summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 19:12:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 19:12:14 +0000
commit4b8a0f3f3dcf60dac2ce308ea08d413a535af29f (patch)
tree0f09c0ad2a4d0f535d89040a63dc3a866a6606e6
parentInitial commit. (diff)
downloadreprepro-upstream.tar.xz
reprepro-upstream.zip
Adding upstream version 5.4.4.upstream/5.4.4upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.gitignore21
-rw-r--r--AUTHORS1
-rw-r--r--COPYING340
-rw-r--r--ChangeLog2615
-rw-r--r--HACKING45
-rw-r--r--INSTALL192
-rw-r--r--Makefile.am47
-rw-r--r--NEWS742
-rw-r--r--README120
-rw-r--r--TODO23
-rw-r--r--acinclude.m447
-rw-r--r--aptmethod.c1216
-rw-r--r--aptmethod.h27
-rw-r--r--ar.c299
-rw-r--r--ar.h22
-rw-r--r--archallflood.c714
-rw-r--r--archallflood.h6
-rw-r--r--atoms.c398
-rw-r--r--atoms.h75
-rwxr-xr-xautogen.sh27
-rw-r--r--binaries.c784
-rw-r--r--binaries.h57
-rw-r--r--byhandhook.c241
-rw-r--r--byhandhook.h26
-rw-r--r--changes.c335
-rw-r--r--changes.h24
-rw-r--r--checkin.c1657
-rw-r--r--checkin.h26
-rw-r--r--checkindeb.c444
-rw-r--r--checkindeb.h28
-rw-r--r--checkindsc.c435
-rw-r--r--checkindsc.h31
-rw-r--r--checks.c382
-rw-r--r--checks.h16
-rw-r--r--checksums.c1503
-rw-r--r--checksums.h145
-rw-r--r--chunkedit.c452
-rw-r--r--chunkedit.h58
-rw-r--r--chunks.c798
-rw-r--r--chunks.h61
-rw-r--r--configparser.c1532
-rw-r--r--configparser.h305
-rw-r--r--configure.ac170
-rw-r--r--contents.c408
-rw-r--r--contents.h33
-rw-r--r--copypackages.c1038
-rw-r--r--copypackages.h28
-rw-r--r--database.c2709
-rw-r--r--database.h59
-rw-r--r--database_p.h14
-rw-r--r--debfile.c229
-rw-r--r--debfile.h15
-rw-r--r--debfilecontents.c221
-rw-r--r--descriptions.c202
-rw-r--r--descriptions.h11
-rw-r--r--diffindex.c227
-rw-r--r--diffindex.h19
-rw-r--r--dirs.c230
-rw-r--r--dirs.h29
-rw-r--r--distribution.c1233
-rw-r--r--distribution.h162
-rw-r--r--docs/FAQ219
-rw-r--r--docs/Makefile.am4
-rwxr-xr-xdocs/bzip.example35
-rwxr-xr-xdocs/changelogs.example246
-rw-r--r--docs/changestool.1172
-rwxr-xr-xdocs/copybyhand.example28
-rw-r--r--docs/di.example/DI-filter.sh40
-rw-r--r--docs/di.example/README13
-rw-r--r--docs/di.example/distributions23
-rw-r--r--docs/di.example/updates5
-rwxr-xr-xdocs/mail-changes.example69
-rw-r--r--docs/manual.html1497
-rwxr-xr-xdocs/outsftphook.py589
-rwxr-xr-xdocs/outstore.py237
-rwxr-xr-xdocs/pdiff.example255
-rw-r--r--docs/recovery67
-rw-r--r--docs/reprepro.12847
-rw-r--r--docs/reprepro.bash_completion742
-rw-r--r--docs/reprepro.zsh_completion554
-rw-r--r--docs/rredtool.190
-rwxr-xr-xdocs/sftp.py886
-rw-r--r--docs/short-howto209
-rw-r--r--docs/xz.example30
-rw-r--r--donefile.c242
-rw-r--r--donefile.h24
-rw-r--r--downloadcache.c315
-rw-r--r--downloadcache.h49
-rw-r--r--dpkgversions.c150
-rw-r--r--dpkgversions.h13
-rw-r--r--error.h56
-rw-r--r--exports.c548
-rw-r--r--exports.h26
-rw-r--r--extractcontrol.c458
-rw-r--r--filecntl.c89
-rw-r--r--filecntl.h13
-rw-r--r--filelist.c735
-rw-r--r--filelist.h33
-rw-r--r--files.c817
-rw-r--r--files.h86
-rw-r--r--filterlist.c599
-rw-r--r--filterlist.h41
-rw-r--r--freespace.c243
-rw-r--r--freespace.h20
-rw-r--r--globals.h111
-rw-r--r--globmatch.c187
-rw-r--r--globmatch.h7
-rw-r--r--guesscomponent.c115
-rw-r--r--guesscomponent.h14
-rw-r--r--hooks.c62
-rw-r--r--hooks.h16
-rw-r--r--ignore.c101
-rw-r--r--ignore.h67
-rw-r--r--incoming.c2643
-rw-r--r--incoming.h10
-rw-r--r--indexfile.c304
-rw-r--r--indexfile.h19
-rw-r--r--log.c1138
-rw-r--r--log.h30
-rw-r--r--main.c5355
-rw-r--r--md5.c251
-rw-r--r--md5.h41
-rw-r--r--mprintf.c70
-rw-r--r--mprintf.h12
-rw-r--r--names.c150
-rw-r--r--names.h38
-rw-r--r--needbuild.c303
-rw-r--r--needbuild.h16
-rw-r--r--optionsfile.c128
-rw-r--r--optionsfile.h13
-rw-r--r--outhook.c196
-rw-r--r--outhook.h13
-rw-r--r--override.c410
-rw-r--r--override.h35
-rw-r--r--package.h87
-rw-r--r--pool.c869
-rw-r--r--pool.h33
-rw-r--r--printlistformat.c232
-rw-r--r--printlistformat.h7
-rw-r--r--pull.c1114
-rw-r--r--pull.h31
-rw-r--r--readtextfile.c140
-rw-r--r--readtextfile.h16
-rw-r--r--reference.c231
-rw-r--r--reference.h50
-rw-r--r--release.c1900
-rw-r--r--release.h68
-rw-r--r--remoterepository.c2103
-rw-r--r--remoterepository.h68
-rw-r--r--rredpatch.c772
-rw-r--r--rredpatch.h19
-rw-r--r--rredtool.c1459
-rw-r--r--sha1.c201
-rw-r--r--sha1.h15
-rw-r--r--sha256.c274
-rw-r--r--sha256.h20
-rw-r--r--sha512.c309
-rw-r--r--sha512.h20
-rw-r--r--signature.c570
-rw-r--r--signature.h66
-rw-r--r--signature_check.c924
-rw-r--r--signature_p.h18
-rw-r--r--signedfile.c502
-rw-r--r--sizes.c256
-rw-r--r--sizes.h6
-rw-r--r--sourcecheck.c482
-rw-r--r--sourcecheck.h8
-rw-r--r--sourceextraction.c716
-rw-r--r--sourceextraction.h21
-rw-r--r--sources.c733
-rw-r--r--sources.h48
-rw-r--r--strlist.c283
-rw-r--r--strlist.h50
-rw-r--r--target.c1245
-rw-r--r--target.h126
-rw-r--r--termdecide.c302
-rw-r--r--termdecide.h19
-rw-r--r--terms.c387
-rw-r--r--terms.h65
-rw-r--r--tests/Makefile.am62
-rw-r--r--tests/atoms.test180
-rwxr-xr-xtests/basic.sh425
-rwxr-xr-xtests/brokenuncompressor.sh13
-rw-r--r--tests/buildinfo.test656
-rw-r--r--tests/buildneeding.test631
-rw-r--r--tests/check.test225
-rw-r--r--tests/copy.test210
-rw-r--r--tests/descriptions.test143
-rw-r--r--tests/diffgeneration.test271
-rw-r--r--tests/easyupdate.test142
-rw-r--r--tests/evil.key18
-rw-r--r--tests/expired.key19
-rw-r--r--tests/expiredwithsubkey-working.key52
-rw-r--r--tests/expiredwithsubkey.key52
-rw-r--r--tests/export.test79
-rw-r--r--tests/exporthooks.test79
-rw-r--r--tests/flat.test518
-rw-r--r--tests/flood.test744
-rwxr-xr-xtests/genpackage.sh106
-rw-r--r--tests/good.key18
-rw-r--r--tests/includeasc.test221
-rw-r--r--tests/includeextra.test857
-rw-r--r--tests/layeredupdate.test684
-rw-r--r--tests/layeredupdate2.test683
-rw-r--r--tests/listcodenames.test41
-rw-r--r--tests/morgue.test276
-rwxr-xr-xtests/multiversion.sh353
-rw-r--r--tests/old-database/conf/distributions5
-rw-r--r--tests/old-database/db/checksums.dbbin0 -> 16384 bytes
-rw-r--r--tests/old-database/db/contents.cache.dbbin0 -> 16384 bytes
-rw-r--r--tests/old-database/db/packages.dbbin0 -> 57344 bytes
-rw-r--r--tests/old-database/db/references.dbbin0 -> 16384 bytes
-rw-r--r--tests/old-database/db/release.caches.dbbin0 -> 20480 bytes
-rw-r--r--tests/old-database/db/version4
-rw-r--r--tests/onlysmalldeletes.test142
-rw-r--r--tests/override.test172
-rw-r--r--tests/packagediff.test287
-rw-r--r--tests/revoked.keybin0 -> 624 bytes
-rw-r--r--tests/revoked.pkeybin0 -> 690 bytes
-rw-r--r--tests/shunit2-helper-functions.sh68
-rw-r--r--tests/signatures.test286
-rw-r--r--tests/signed.test68
-rw-r--r--tests/snapshotcopyrestore.test597
-rw-r--r--tests/srcfilterlist.test221
-rw-r--r--tests/subcomponents.test502
-rw-r--r--tests/template.test4
-rw-r--r--tests/test.inc237
-rwxr-xr-xtests/test.sh271
-rw-r--r--tests/trackingcorruption.test79
-rw-r--r--tests/uncompress.test514
-rw-r--r--tests/updatecorners.test176
-rw-r--r--tests/updatepullreject.test555
-rw-r--r--tests/uploaders.test253
-rw-r--r--tests/valgrind.supp128
-rw-r--r--tests/various1.test1430
-rw-r--r--tests/various2.test2462
-rw-r--r--tests/various3.test982
-rw-r--r--tests/verify.test437
-rw-r--r--tests/withsubkeys-works.key52
-rw-r--r--tests/withsubkeys.key52
-rw-r--r--tests/wrongarch.test86
-rw-r--r--tool.c3099
-rw-r--r--tracking.c1468
-rw-r--r--tracking.h53
-rw-r--r--trackingt.h38
-rw-r--r--uncompression.c1794
-rw-r--r--uncompression.h69
-rw-r--r--updates.c2702
-rw-r--r--updates.h40
-rw-r--r--upgradelist.c756
-rw-r--r--upgradelist.h45
-rw-r--r--uploaderslist.c1520
-rw-r--r--uploaderslist.h26
-rw-r--r--valgrind.nodebug.supp66
254 files changed, 94992 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..1c7814c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,21 @@
+/aclocal.m4
+/stamp-h.in
+Makefile.in
+/configure
+/config.h.in
+Makefile
+/config.h
+/config.log
+/stamp-h*
+/config.cache
+/config.status
+/debug.log
+/.deps
+/reprepro
+/changestool
+/rredtool
+/*.o
+/.deps
+/old
+/test
+/ac
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..dc9fb06
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1 @@
+Bernhard R. Link <brlink@debian.org>
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..3912109
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..1792035
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,2615 @@
+2024-02-17 Bastian Germann <bage@debian.org>
+
+ * configure.ac, NEWS: Release version 5.4.4
+
+2024-02-05 Sylvestre Ledru <sylvestre@debian.org>
+
+ * docs: Fix some typos
+ * reprepro.1: add missing single quotes
+
+2024-01-08 Bastian Germann <bage@debian.org>
+
+ * uncompression.c: Wait for poll event.
+ Revert "uncompress: prevent reprepro from hanging on unzstd"
+
+2024-01-07 Bastian Germann <bage@debian.org>
+
+ * configure.ac, NEWS: Release version 5.4.3
+
+2023-03-01 Bastian Germann <bage@debian.org>
+
+ * configure.ac, NEWS: Release version 5.4.2
+
+2023-03-01 Simon Chopin <schopin@ubuntu.com>
+
+ * uncompression.c: uncompress: close the pipe after the child exits
+
+2022-12-14 Hu Deng <hudeng@uniontech.com>
+
+ * archallflood.c, upgradelist.c: fix: redundant header file
+
+2022-08-30 Bastian Germann <bage@debian.org>
+
+ Add SHA512 support (Thanks to Hu Deng)
+
+2022-08-17 Bastian Germann <bage@debian.org>
+
+ * debfilecontents.c: If data tar extraction fails try again as uncompressed
+ Some packages have an uncompressed data.tar.gz.
+ It seems that the "ar" code doesn't support reading a member more
+ than once, so it is necessary to retry the whole process in
+ uncompressed mode rather than just retrying reading the data member.
+ * signedfile.c: Prevent duplicated keyid in signing error message
+ Reported by: Uwe Kleine-König
+ * configure.ac: Release version 5.4.1 with patches from Debian bug tracker
+
+2022-08-17 Luca Capello <luca.capello@infomaniak.com>
+
+ * docs/mail-changes.example: new file to notify processing of .changes files
+
+2013-12-18 Bernhard R. Link <brlink@debian.org>
+
+ * checkin.c, ignore.h: Add --ignore=conflictingarchall
+ This is useful if autobuilders for more than one architecture will
+ build Architecture: all packages of the same version.
+ Based on a patch by Sjoerd Simons.
+
+2022-07-27 Bastian Germann <bage@debian.org>
+
+ Integrate Benjamin Drung's work
+
+ * ChangeLog: Add missing entries
+ The entries are generated from git and edited manually.
+ The git changes are not in chronological order,
+ so dates can appear more than once.
+ * NEWS: Copy from "Release Notes" in Benjamin's README.md
+ * README: Integrate info from Benjamin's README.md.
+ Integrate "How to keep multiple versions" and "Database layout changes".
+ * TODO: Remove "multiple versions" entry
+ * configure.ac: Release version 5.4.0 with multiple versions feature
+
+2021-07-20 Benjamin Drung <benjamin.drung@ionos.com>
+
+ Add trace debugging output
+
+2017-04-12 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Accept .ddeb files as dbgsym packages
+
+2017-02-28 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tests/basic.sh, tests/multiversion.sh:
+ Add test cases for Archive option
+ * distribution.c, distribution.h, docs/reprepro.1, target.c:
+ Add Archive option
+
+2017-02-27 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tests/basic.sh, tests/multiversion.sh: Add test cases for move* commands.
+ Add test cases for the move, movesrc, movematched, movefilter commands.
+
+2017-03-30 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * copypackages.c: package_add: Add fromtracks parameter
+
+2017-02-23 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * copypackages.c: Add fromtarget to struct target_package_list.
+ The move commands needs access to the from target to remove the
+ packages after adding them to the destination target.
+ * copypackages.c, copypackages.h, main.c:
+ Enhance copy functions parameters to support moving
+ * database.c: Convert old database format into new format
+ * docs/reprepro.1, docs/reprepro.bash_completion, main.c: Add move* commands.
+ Add the commands move, movesrc, movematched, movefilter.
+ * copypackages.c: Implement remove source support
+
+2017-02-24 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * database.c: Remove tracking of opened individual databases
+ * database.c: Add print_opened_tables (for debugging purposes)
+ * database.c: Keep track of all opened database tables.
+ The move command will need to open two tables at the same time (the
+ source table and the destination table). Thus keep track of all
+ opened tables.
+
+2017-04-11 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Use database environment
+
+ * database.c: When opening multiple databases in parallel (needed for the
+ move command or the archive option), the databases needs to be configured
+ with locking. Thus an database environment is needed. Open and close
+ the database environment when getting/releasing the database lock.
+
+2018-08-27 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tests/multiversion.sh, tests/old-database/conf/distributions,
+ tests/old-database/db/version: Add test case for bug
+ "Database migration screws up database names"
+
+2017-02-07 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tests/Makefile.am, tests/basic.sh, tests/multiversion.sh,
+ tests/shunit2-helper-functions.sh: Add multiversion test
+ cases
+
+2017-02-28 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Add Limit option
+
+ * configparser.h, distribution.c, distribution.h, docs/reprepro.1, target.c:
+ Limit the number of versions of a package per distribution,
+ architecture, component, and type. The limit must be a number. If
+ the number is positive, all old package version that exceed these
+ limit will be removed when a new package version is added. If the
+ number is zero or negative, all package version will be kept. By
+ default only one package version will be kept.
+
+2018-08-30 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tracking.c: Support multiple versions for removesrc command
+
+2017-02-06 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * database.c: Support adding the same upstream tarball twice.
+ DB_DUPSORT allows duplicate keys in the database, but not
+ duplicate key/value pairs. Only if the duplicate data items are unsorted,
+ applications may store identical duplicate data items.
+ Since the references.db stores a the filekey mapping to the
+ codename|component|architecture triplet, there might be identical
+ duplicates, when upstream tarballs are references by multiple
+ version. Therefore switch references.db from DB_DUPSORT to DB_DUP.
+ * main.c: Use nameandversion struct for remove command.
+ The multiple version support will require to pass a list of names
+ and versions to the remove_from_target() function instead of just a
+ list of names. Thus use the nameandversion struct for the remove
+ command.
+ * copypackages.c, database.c, database.h, docs/reprepro.1, main.c,
+ release.c, target.c: Add multiple version management
+
+2017-02-23 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ table_addrecord: Support non-duplicate tables
+
+ * database.c: The DB_NODUPDATA flag may only be specified if the underlying
+ database has been configured to support sorted duplicates. Thus do
+ not set the DB_NODUPDATA flag when the database does not support
+ duplicates. To avoid querying the flags on each call, save the flags
+ when opening the table.
+
+2017-02-02 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Introduce nameandversion struct for copy command
+
+ * copypackages.c, copypackages.h, main.c:
+ The multiple version support will require to pass a list of names
+ and versions to the copy_by_name() function instead of just a list
+ of names. Thus introduce a nameandversion struct that also holds
+ the data needed for the copy_by_name() function.
+
+2018-08-29 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Fix "Package database is not sorted" in update command
+
+ * upgradelist.c: When multiple versions of one package are available in the archive,
+ the update command will fail:
+ ```
+ Calculating packages to get...
+ Package database is not sorted!!!
+ reprepro: upgradelist.c:135: save_package_version: Assertion `false' failed.
+ Aborted
+ ```
+ Fix this assertion error by iterating only over the newest version of each package.
+
+2018-08-29 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ package_openiterator: Pass through duplicate option
+
+2018-08-29 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Add duplicate option to table_newglobalcursor
+
+ * database.c, database.h, filelist.c, files.c, main.c, reference.c,
+ sizes.c, target.c, tracking.c: Allow to open a cursor that either
+ iterates over all database entries or only over the first of each
+ duplicate (i.e. only the latest version of each package).
+
+2017-02-08 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Support listing multiple versions in list command
+
+ * main.c: Currently only one package version is supported for each target,
+ but prepare support for multiple versions. Instead of querying only one
+ package for each target in the list command, iterate over all
+ packages with the given name for each target.
+
+2017-02-03 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Support listing multiple versions in ls command
+
+ * main.c: Currently only one package version is supported for each target,
+ but prepare support for multiple versions. Instead of querying only one
+ package for each target in the ls command, iterate over all packages
+ with the given name for each target.
+
+2017-04-10 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * distribution.h, tracking.c, tracking.h:
+ Remember opened tracking databases
+ * copypackages.c, incoming.c, main.c, needbuild.c, tracking.c, tracking.h:
+ Pass distribution to tracking_done.
+ For a later commit, pass the distribution to tracking_done.
+
+2017-03-28 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ package_openiterator: Support opened databases
+
+ * package.h, target.c: This change is a preparation for the
+ package_openduplicateiterator() function.
+
+2017-04-10 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Change error handling
+
+ * target.c: Use variable 'result' only for the final returned result.
+
+2017-02-06 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * database.c: table_close: Set default return value
+
+2017-02-02 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * main.c: Introduce splitnameandversion().
+ The multi version support will require splitting the
+ name and version in multiple places. Thus moved the code in a
+ splitnameandversion() function.
+ * main.c, target.c, target.h, upgradelist.c:
+ target_removepackage: Support specifying package version.
+ If no package version is specfied, use the latest version.
+ * main.c: rename todo to remaining.
+ The word 'todo' is used for marking todo items for the programmer.
+ Thus use 'remaining' instead of 'todo' as variable name.
+ * database.c: Move cursor struct upwards.
+ Move cursor struct upwards to have the struct definition in one block.
+ * globals.h: Add helper function strcmp2()
+ * copypackages.c: Add helper function cascade_strcmp()
+ * target.h: Add helper function package_primarykey()
+ * database.c, error.h: Add helper function get_package_name()
+ * database.c: Add helper function debianversioncompare()
+
+2017-02-01 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * database.c: table_getrecord: Add newline to error message
+ * database.c: table_printerror: Improve database error message
+
+2017-02-03 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * database.c: Introduce static newcursor() function.
+ There are multiple places where new cursors are generated.
+ Remove duplicate code by introducing the newcursor() function.
+ * target.c: Print version when removing a package
+ * database.c, database.h, files.c, main.c, reference.c:
+ Merge cursor_nexttemp() into cursor_nexttempdata().
+ cursor_nexttempdata has an additional len_p output parameter (compared
+ to cursor_nexttemp). Make the len_p output parameter optional and
+ replace cursor_nexttemp by cursor_nexttempdata.
+ Thus cursor_nexttemp(...) becomes cursor_nexttempdata(..., NULL).
+ * database.c: Introduce parse_data().
+ The cursor_nextpair() function has a parse_pair() function for
+ evaluating the returned database output. Introduce a similar
+ parse_data() function for the cursor_nexttempdata() function.
+ * database.c: Introduce cursor_next().
+ The functions cursor_nexttempdata() and cursor_nextpair() share a similar logic.
+ Thus combine the duplicate code in cursor_next().
+ cursor_nexttempdata() set always DB_NEXT as cursor flag instead of
+ using the cursor->flags value. All users of cursor_nexttempdata()
+ call table_newglobalcursor() beforehand.
+ * database.c, database.h, tracking.c: rename table_newduplicatecursor.
+ Rename table_newduplicatecursor to table_newduplicatepairedcursor
+ to make use this name for a data cursor.
+ * database.c, database.h: Add helper function table_newduplicatecursor()
+ * package.h, target.c: Add helper function package_openduplicateiterator()
+
+2018-08-27 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * guesscomponent.c: Fix missing quotation mark in component list.
+ The error message in guess_component misses a leading quotation
+ mark, for example: Could not find 'main' in components of 'bionic': contrib'
+
+2017-03-30 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tests/trackingcorruption.test, tracking.c:
+ Fix typo "could not found" -> "could not find"
+
+2017-02-03 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Evaluate return value of write command
+
+ * signature.c: Compiling reprepro produces this warning:
+ ```
+ signature.c: In function ‘signature_getpassphrase’:
+ signature.c:63:2: warning: ignoring return value of ‘write’, declared with attribute warn_unused_result [-Wunused-result]
+ write(fd, p, strlen(p));
+ ^~~~~~~~~~~~~~~~~~~~~~~
+ signature.c:64:2: warning: ignoring return value of ‘write’, declared with attribute warn_unused_result [-Wunused-result]
+ write(fd, "\n", 1);
+ ^~~~~~~~~~~~~~~~~~
+ ```
+
+2014-05-30 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * main.c: Fix indentation (spaces to tabs)
+
+2017-08-22 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ Use useful names for .changes files
+
+ * tests/genpackage.sh: Use the common naming schema $source_$version_$arch.changes
+ for the name of the .changes files for testing.
+
+2017-03-28 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tests/genpackage.sh: Silence output
+ * tests/genpackage.sh: Use existing priority.
+ Using the non-existing priority 'superfluous' causes warning messages.
+
+2017-02-06 Benjamin Drung <benjamin.drung@profitbricks.com>
+
+ * tests/genpackage.sh: Use the host architecture by default.
+ To be able to build packages for the tests, use the host
+ architecture (to avoid requiring a cross-compiler).
+ * tests/genpackage.sh: Use dpkg-source format 3.0.
+ To test the handling of upstream tarballs,
+ switch from source format 1.0 to either 3.0 (quilt) or 3.0 (native).
+ * tests/Makefile.am, tests/basic.sh, tests/shunit2-helper-functions.sh:
+ Add basic shunit2 based tests
+
+2022-07-14 Bastian Germann <bage@debian.org>
+
+ Continue ceased upstream development
+
+ * ChangeLog: Add some missing entries
+ * NEWS: Mention major 5.3.1 work
+ * configure.ac: Release existing patches as 5.3.1
+ * configure.ac: Drop Bernhard's email as bug address;
+ Thanks for all the work making reprepro a stable repo tool!
+
+2021-06-18 Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+ * Add Zstd support
+
+2021-06-15 Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+ * Bump up the maxsize on a fixed-size C buffer
+ * Flush stdout, stderr before calling endhook.
+
+2019-08-04 Bernhard R. Link <brlink@debian.org>
+ * fix manpage to add the behaviour if reprepro is linked against liblzma
+ * adopt testsuite and more places in documentation to the non-deprecated list of --export values
+ * remove no longer needed warning suppressing code
+ * mark 'dumpcontents' command as deprecated
+
+2019-02-02 Bernhard R. Link <brlink@debian.org>
+ * fix more spelling errors
+ * fix some spelling errors in comments
+ * fix some spelling errors in the manpage
+ * handle a missing Binary field in a .changes file like an empty one.
+ (So not having one in a source only upload will be ignored,
+ while a missing one in a binary upload will complain about the
+ packages not listed in Binary: instead of complaining about not
+ having a Binary field).
+
+2018-09-23 Bernhard R. Link <brlink@debian.org>
+ * check command no longer checks if the files
+ of existing packages are names as they are
+ expected to be named. (There was no way to
+ get them wrong apart editing the database and
+ checking it makes the code more complex).
+
+2018-08-26 Bernhard R. Link <brlink@debian.org>
+ * mark .lz support as deprecated
+
+2018-08-12 Bernhard R. Link <brlink@debian.org>
+ * allow to set Signed-By header via conf/distributions
+ * add _listcodenames command (based on work from Benjamin Drung)
+ * drop "FILE LOCATION" headers from Contents files
+ * multiple manpage fixes
+ (thanks to Paul Wise, Simon Kainz, Christoph Biedl)
+
+2017-03-02 Bernhard R. Link <brlink@debian.org>
+ * handle .asc files in source files better
+ (thanks to Marc Laue)
+
+2017-01-31 <flapflap@riseup.net>
+ * allow '+' character in method-URI
+
+2016-12-28 Bernhard R. Link <brlink@debian.org>
+ * improve error handling when extracting .deb file contents
+
+2016-12-23 Bernhard R. Link <brlink@debian.org>
+ * properly report errors of the internal xz and lzma decompressors
+ * when using the builtin .xz uncompressor,
+ support concatenated streams
+ * when using the builtin .gz uncompressor,
+ support concatenated streams
+
+2016-12-22 Bernhard R. Link <brlink@debian.org>
+ * add unreferencesnapshot and removereference commands
+ * document --export=silent-never
+ * when using the builtin .bz2 uncompressor,
+ support concatenated streams
+
+2016-12-21 Bernhard R. Link <brlink@debian.org>
+ * fix behaviour of (Deb|Dsc)Indices without a Release file
+ (if no Release filename was given the default was used
+ instead of not creating a file)
+ * document what is needed to use --ask-passphrase with newer
+ gnupg versions in the manpage
+
+2016-12-21 Bernhard R. Link <brlink@debian.org>
+ * add support for .buildinfo files in .changes files:
+ - new tracking mode includebuildinfos to store them in pool/
+ - ignored by 'include' unless Tracking: includebuildinfos
+ - processincoming with LogDir set stores them like log files
+ - otherwise ignored by 'processincoming' if not used
+ - new Cleanup: unused_buildinfo_files for conf/incoming to
+ remove used buildinfo files.
+
+2016-12-18 Bernhard R. Link <brlink@debian.org>
+ * fix some logical errors in some warnings.
+
+2016-10-22 Bernhard R. Link <brlink@debian.org>
+ * drop workaround for apt-method interface change:
+ answer 103 is now always expected to end the method's doing
+ (in other words the http apt method from squeeze and before is no
+ longer supported)
+
+2016-03-* Bernhard R. Link <brlink@debian.org>
+ * refactor code to use struct package and struct package_cursor
+ most of the time package data is accessed.
+
+2016-03-13 Bernhard R. Link <brlink@debian.org>
+ * fix bug in flood that could get confused which binary package
+ belongs to which source if a destination contains packages
+ belonging to different versions of the same souce.
+ * fix bug in the message about "warning" triggered in FilterList
+ of pull. ('(null)' was printed instead of the package name).
+
+2015-12-28 Bernhard R. Link <brlink@debian.org>
+ * fix duplicated --keeptemporaries description in manpage
+ * add Permit: unlisted_binary for conf/incoming
+ * if encountering a -dbgsym package in a .changes file, check the name
+ without -dbgsym in the Binary: header instead
+
+2015-06-13 Bernhard R. Link <brlink@debian.org>
+ * add Exportoptions: to conf/distributions,
+ allowing to give "noexport" to never export a distribution.
+
+2015-05-09 Bernhard R. Link <brlink@debian.org>
+ * ignores lines starting with '#' in filterlists
+ * fix error parsing control files with multiple spaces/tabs after a colon
+
+2014-11-12 Bernhard R. Link <brlink@debian.org>
+ * fix segfault when verbose exporting with .xz indices
+
+2014-08-24 Bernhard R. Link <brlink@debian.org>
+ * fix DownloadListsAs not accepting .lz
+ * add support for unpacking .xz and .lzma files with liblzma
+ instead of calling unxz and unlzma.
+ * default to linking with liblzma if no --with or --without is given
+
+2014-08-16 Bernhard R. Link <brlink@debian.org>
+ * rename the old python pdiff implementation example script
+ from tiffany.example to pdiff.example and make it use python3.
+
+2014-06-28 Bernhard R. Link <brlink@debian.org>
+ * fix compiling without liblzma.
+ * disable liblzma usage unless explicitly requested
+ (to avoid the double-dependency to liblzma un lzma-utils).
+
+2014-06-14 Bernhard R. Link <brlink@debian.org>
+ * add xz.example to script to generate Packages.gz
+ * improve multiple_distributions description in manpage
+
+2014-06-03 Bernhard R. Link <brlink@debian.org>
+ * multiple fixes to the outsftphook example
+
+2014-05-10 Bernhard R. Link <brlink@debian.org>
+ * add support for linking against liblzma
+ and generating .xz Indices.
+ (no changes to decompressing code yet, that still
+ needs xzcat available)
+
+2014-03-18 Bernhard R. Link <brlink@debian.org>
+ * update ignore source packages with ExtraSourceOnly by default,
+ unless the new OmitExtraSourceOnly option is set in conf/updates
+ to false.
+ * fix override mechanism of .udeb files
+
+2014-02-12 Lukas Anzinger <l.anzinger@gmail.com>
+ * add _addreferences to add multiple references at once
+
+2014-02-11 Bernhard R. Link <brlink@debian.org>
+ * improve the errormessage of processincoming if the inclusion of a
+ package is forbidden by uploaders files.
+
+2013-11-21 Bernhard R. Link <brlink@debian.org>
+ * automatically add long Descriptions when updating from a source that
+ does not have them in the Packages files.
+
+2013-10-05 Bernhard R. Link <brlink@debian.org>
+ * fix docs/outstore.py to work with newer python3 dbm behaviour
+ * more strict checking of all size information in .changes files
+
+2013-06-15 Bernhard R. Link <brlink@debian.org>
+ * use libarchive_read_free instead of libarchive_read_finish
+ with newer libarchive to avoid issues with future versions.
+ * repairdescriptions also repairs udeb descriptions
+
+2013-06-15 Bernhard R. Link <brlink@debian.org>
+ * make reprepro compile with libdb6.0
+
+2013-06-02 Bernhard R. Link <brlink@debian.org>
+ * as gcc got better, remove conditional workarounds for
+ most uninitialized-false-positives and make the remaining
+ cases unconditonal (but marked with SETBUTNOTUSED).
+
+2013-05-30 Bernhard R. Link <brlink@debian.org>
+ * fix bug is restore to only act if the
+ last package looked at is restored.
+
+2013-05-04 Bernhard R. Link <brlink@debian.org>
+ * build-needing properly handles sources with
+ architecture wildcards (linux-any) in them.
+
+2013-04-12 Bernhard R. Link <brlink@debian.org>
+ * fix percomponent udeb Contents filenames
+
+2013-02-17 Bernhard R. Link <brlink@debian.org>
+ * add outsftphook.py example
+
+2012-12-31 Bernhard R. Link <brlink@debian.org>
+ * add --outhook
+
+2012-12-20 Bernhard R. Link <brlink@debian.org>
+ * fix inconsistent spacing of ls command,
+ * fix --nothingiserror ls not treating no result as error
+ * add lsbycomponent command (as ls, but grouped by component)
+
+2012-12-15 Bernhard R. Link <brlink@debian.org>
+ * move around some of the code related to moving
+ (In)Release(.gpg) to it's final place. Side effect
+ is that those files are removed if there are no longer
+ requested.
+
+2012-12-09 Bernhard R. Link <brlink@debian.org>
+ * unify export handling (moving it out of the
+ action specific code)
+
+2012-12-02 Bernhard R. Link <brlink@debian.org>
+ * keep around relative release filenames always
+
+2012-11-24 Bernhard R. Link <brlink@debian.org>
+ * make setting of environment variables for
+ hooks more uniform (and with less code duplication).
+
+2012-11-17 Bernhard R. Link <brlink@debian.org>
+ * '~/' or '+{b,o,c}/' or './' now also special
+ in ByHandHooks and ListHook.
+ * add support for signing hooks (SignWith: !...)
+
+2012-11-11 Bernhard R. Link <brlink@debian.org>
+ * add --endhook to start a script when terminating
+
+2012-11-04 Bernhard R. Link <brlink@debian.org>
+ * add repairdescriptions command to readd missing long
+ descriptions (which you might get as reprepro cannot yet
+ get Translations files and get them from there)
+ from the .deb files.
+
+2012-10-30 Bernhard R. Link <brlink@debian.org>
+ * add ${$basename}, ${$filekey} and ${$fullfilename} to --listformat
+ * fix some bitrot in the non-libarchive code paths
+
+2012-10-21 Bernhard R. Link <brlink@debian.org>
+ * reject absurd large values in ValidFor header
+ * fix wrong include type in termdecide.h
+
+2012-09-03
+ * fix overlong VerifyRelease example in manual.html
+
+2012-07-12
+ * add 'deleteifunreferenced' command to safely delete
+ and forget the given files in a repository with
+ keepunreferencedfiles set.
+
+2012-07-11
+ * fix bug in checking old unchanged {Packages/Sources}.bz2
+ files for existence. (Triggering even an assertion when
+ only .bz2 index files are requested).
+ * ignore diff comments about unterminated lines
+ when parsing .diff files
+
+2012-06-24
+ * support http-method's extended 103 redirect status
+ * actually set REPREPRO_CONFIG_DIR in hooks as
+ documented in manpage.
+ * document more environment variables in manpage
+
+2012-06-07
+ * fix bash and zsh completion to work with
+ conf/distributions and conf/incoming directories.
+ * fix allocation error with more than 16 group
+ members in allocation files.
+
+2012-05-30
+ * add support for -A, -C, -T to *update and *pull.
+
+2012-05-22
+ * try to get InRelease from remote repositories
+ instead of Release (with fall-back of the old behaviour)
+ * new GetInRelease: to conf/updates, defaults to yes
+
+2012-05-21
+ * fix some errors when compiled without libgpgme
+
+2012-05-20
+ * normalize included package control information to
+ always start with the Package: field (as some clients
+ assume that).
+ * don't require md5sum to download binary or source packages
+ in the remote index files (any known hash suffices)
+
+2012-05-19
+ * avoid some problem with gcc-4.7
+
+2012-04-24
+ * change Contents-* files generation default from
+ "allcompontents" to "percomponent compatsymlink".
+ (i.e. best for >= wheezy, only first component visible for
+ <= squeeze)
+
+2012-04-04
+ * 'include' now only warns about section "unknown" instead of
+ rejecting it. add warnings to 'includedsc' and 'includedeb', too.
+
+2012-03-26
+ * allow absolute filenames in !include directives, and
+ expand filenames starting with "~/" "+b/" "+c/" in those
+ and export hooks, filter lists, log scripts, override filenames,
+ and uploaders filenames.
+ * conf/distributions, conf/updates, conf/pulls and conf/incoming
+ or files included by those can be directories with all *.conf
+ files read instead.
+
+2012-03-25
+ * changelogs.example can now also place changelogs in places
+ where apt-get changelog looks for "third party site" changelogs.
+ * add 'supersede' as FilterList keyword to remove the old package
+ if the the new would be installed otherwise.
+ * fix broken test against leading whitespace in config file
+ field names
+ * add support for !include directive in conf/distributions,
+ conf/updates, conf/pulls and conf/incoming.
+
+2012-01-23
+ * reject "any" as Architecture part of a distribution
+
+2012-01-21
+ * build-needing now can list missing architecture 'all'
+ packages. (Will not list .dsc files producing both
+ architecture dependent and architecture indepentent ('all')
+ packages unless they are built with dpkg-dev >= 1.16.1,
+ though).
+
+2012-01-19
+ * build-needing takes 'any' instead of a architecture, too.
+ * uploader files can 'include' other files.
+
+2012-01-17
+ * improve config file parser error messages about missing fields
+
+2010-12-18
+ * rredtool: produce .diff/Index files that reprepro can understand.
+ * warn if uploader files contains key ids too long to handle
+ * don't warn against .git files as unknown extension
+
+2010-12-09
+ * if failing to parse .diff/Index, proceed with other
+ ways to retrieve Packages/Sources.
+
+2010-10-30
+ * don't give spurious warnings about "strange filekey"s if
+ components contain slashes.
+
+2010-10-10
+ * fix NULL-reference segfault if patch in a Packages.diff
+ does not have a history attached to it (or if it is listed
+ two times)
+
+2010-10-03
+ * when using nocompatsymlink in Contents warn about
+ old file/symlink still present.
+
+2010-09-28
+ * fix archive_set_error calls
+ (don't give error messages as format strings)
+ * remove undocumented Contents: options with leading +/-
+ * add compatsymlink nocompatsymlink Contents: options
+ (and document that the default will change in the future)
+
+2010-08-22
+ * add 'redochecksums' command to complete the checksum information
+ in package indices.
+
+2010-08-19
+ * add percomponent and allcomponents to Contents: flags
+ to switch between the format of Contents file to generate.
+ Currently the default is allcomponents but that will switch
+ later.
+ * fix bug that would delete files only to be deleted after an
+ successful export also when aborting an export
+
+2010-07-07
+ * don't give downgrading message if not downgrading but
+ replacing with same version
+
+2010-06-02
+ * fix bug not deleting packages if none added in update
+
+2010-05-05
+ * ignore leading comments in control files
+
+2010-04-18
+ * add --restrict and --restrict-bin to restrict update
+ and pull operations to specific packages.
+ * add --restrict-file and --restrict-file-bin.
+
+2010-04-17
+ * add --export=silent-never like never but silenting
+ all warnings (mostly useful for testsuite).
+ * avoid 'Data seems not to be signed trying to use directly'
+ message if data start like unsigned file should start.
+
+2010-04-16
+ * add 'FilterSrcList'.
+
+2010-04-15
+ * Many clean-ups and coding style fixes.
+
+2010-03-30
+ * Support specifying a version in FilterList
+
+2010-02-29
+ * support compiling with libdb5
+ * fix memory bug in filelist generation
+ (as realloc usually not moves stuff when reducing the size that
+ is no real issue, but newer valgrind detects it and warns).
+
+2010-02-28
+ * 'check' also checks if architectures match
+ * fix 'sourcemissing', 'unusedsources' and 'reportcruft'
+ on distributions without tracking.
+ * fix 'pull' copying packages with wrong architecture
+
+2010-02-21
+ * support reading of Release files without MD5Sum
+ * add all missing Checksums-* when importing from
+ remote repositories
+ * allow md5 in IgnoreHashes
+
+2010-02-16
+ * make 'sourcemissing', 'unusedsources' and 'reportcruft' work on
+ distributions without tracking.
+
+2010-02-14
+ * add 'reportcruft' command
+ * ignore source checking in distributions without 'source' architecture
+
+2010-01-30
+ * add 'sizes' command.
+ * add "distribution 'codename'" support to uploaders files.
+ * some fixes for __checkuploaders
+
+2010-01-27
+ * SignWith can take multiple arguments to denote multiple keys to
+ sign a repository with.
+
+2010-01-22
+ * add removesrcs command (like removesrc but can get multiple
+ source package names)
+
+2010-01-03
+ * add groups to Uploaders:-lists.
+ * add __checkuploaders command so uploaders lists can be tested
+ from the test-suite
+
+2010-12-23
+ * fix some minor memory/resource leaks found by cppcheck
+
+2010-10-16
+ * support "ButAutomaticUpgrades" field to be copied to
+ the generated Release files (Thanks to Modestas Vainius)
+
+2010-10-15
+ * add support for lzip compressed files
+ (Thanks to Daniel Baumann for the patch).
+
+2010-09-10
+ * add special '$Delete' override field to delete fields
+
+2010-09-09
+ * fix reoverride problem with packages only having a $Component
+ special-override-field.
+
+2010-08-12
+ * fix missing #ifdef breaking --without-libbz2 compiles
+ * include sys/stat.h in filecntl.c, thanks to Jeroen van Meeuwen
+
+2010-08-04
+ * add unusedsources and sourcemissing commands.
+
+2010-07-10
+ * create InRelease files when signing...
+
+2010-07-05
+ * special $Component in override files will force
+ placing packages in the specified component
+ upon inclusion (unless -C is given).
+
+2010-07-04
+ * consult override files when importing packages
+ with 'update' or 'pull'.
+
+2010-07-01
+ * fix inconsistency in changelog.example.
+ Thanks to Christoph Mathys.
+
+2010-06-30
+ * allow patterns in override files
+
+2010-06-29
+ * do not stop with error if a downloaded Packages
+ file contains unexpected wrong Architecture lines
+ but only print a warning. Add --ignore=wrongarchitecture
+ to not print that warning.
+
+2010-06-26
+ * store override data in a tree instead of an list and
+ some preparations for patterns in override files.
+
+2010-06-25
+ * Ignore overrides for fields starting with '$' and
+ warn about unknown fields to allow later introduction
+ of special values.
+ * disallow overrides of core fields (Package, Version,
+ Filename, ...)
+
+2010-05-07
+ * add --onlysmalldeletes option that cancels pulls
+ and updates that delete more than 20% of some target
+ (but at least 10 packages). The change also causes
+ update no longer claiming to get packages if there are
+ not any packages to get...
+
+2010-04-30
+ * change parsing of .changes lines to cope with
+ N_V.orig-X.tar.C files where V.orig-X does not survive
+ a proper version check (underscores most prominently).
+
+2010-04-23
+ * Fix typo causing --changes Log-notifiers not being called
+ with processincoming in many cases.
+
+2010-04-07
+ * add '${$source}' and '${$sourceversion}' to --list-format
+
+2010-03-31
+ * describe byhand file in the manpage's "nomenclature".
+
+2010-03-19
+ * add "dumbremove" to changestool.
+
+2010-02-10
+ * fix failure if trying to extract exactly one of
+ section or priority from a tar file.
+
+2010-01-24
+ * add ByHandHooks to conf/distributions for hooks
+ called by processincoming (and in the future perhaps by include)
+
+2010-01-18
+ * properly handle relative LogDir in conf/incoming
+
+2009-12-08
+ * add byhand statement to uploaders files
+
+2009-11-22
+ * fix build with --without-libgpgme
+ (thanks to Reto Gantenbein for reporting)
+
+2009-11-16
+ * include <stdint.h> where *int*_t is used
+
+2009-11-13
+ * 'include' now errors out early if the .changes includes source files but
+ no .dsc file.
+
+2009-11-12
+ * add mode to rredtool to act as reprepro index hook and generate
+ and update a *.diff/Index file.
+
+2009-11-06
+ * when 'include'ing a .changes file, do not insist on section
+ information of non-.dsc source files.
+
+2009-10-27
+ * Do not warn about a missing VerifyRelease if there is a
+ IgnoreRelease.
+ * Handle apt transport methods returning missing files as
+ success with alternate filename suggestion more gracefully.
+ * when getting packages from another architecture while updating,
+ ignore all packages with architecture not fitting into the target.
+ (Fixes a regression introduced in 3.8.0)
+
+2009-10-21
+ * reduce number of places where new compressions must be added
+ * improve checking for proper filenames in changestool's verify
+ * allow .build as synonym for .log as suffix in changes files
+
+2009-10-20
+ * reduce number of places where new compressions must be added
+
+2009-10-17
+ * support xz compressed files if unxz is installed.
+
+2009-10-02
+ * make 'check' (and some other commands) warn if a file expected
+ is not in the checksums database but found correctly in the pool.
+
+2009-09-23
+ * Method: and Fallback: in conf/updates now strip the last '/' from
+ the URI given. (Some apt methods get confused if they get "//").
+
+2009-09-15
+ * fix exit-code of 'list' with --nothingiserror
+
+2009-09-10
+ * call gpgme_check_version so that libgpgme 1.2.0 does not fail
+ to initialize.
+
+2009-08-24
+ * remove all files.db code (except translatelegacyfilelists).
+ * remove --oldfilesdb option.
+ * remove --overridedir
+
+2009-08-23
+ * warn if old legacy files.db is still used and add new
+ translatelegacyfilelists command for easier migration.
+
+2009-08-21
+ * new --showpercent option to show percent and total
+ download size when downloading packages.
+ * do not output the new warning about a new architecture
+ when all architectures are new (i.e. new distribution)
+
+2009-08-20
+ * new 'Options: limit_arch_all' in conf/incoming causes
+ processincoming to only put architecture all packages into
+ the architectures uploaded with them to allow usage together
+ with 'flood'.
+
+2009-08-18
+ * speed up 'flood' by using an tree instead of a list for source
+ package lookups.
+
+2009-08-17
+ * add new 'flood' command to distribute architecture all packages
+ within one architecture.
+
+2009-08-15
+ * -A, -T and -C can now have multiple arguments separated by '|'.
+
+2009-08-13
+ * FakeComponentPrefix now does not add the prefix to components
+ already having it and removes it from the relative directory where
+ it is put into (so no duplication on the whole path, either).
+
+2009-08-06
+ * command line (and conf/options) options to specify a directory
+ now treat arguments starting with '+b/', '+c/' or '+o/' as relative
+ to the basedir, confdir or outdir.
+ * warn if directories do not start with '/', './' or '+x/'.
+
+2009-08-05
+ * if a package is not accepted by processincoming because no
+ distribution is found for it or no distribution allows it, the
+ existcode is now 243
+
+2009-08-03
+ * add a MorgueDir option to conf/incoming where cleaned up files
+ are moved to.
+ * if a .changes has improper name, version or architectures,
+ trigger the 'Cleanup: on_error' case.
+
+2009-08-01
+ * improve deleteunreferenced's error message with keepunreferencedfiles
+
+2009-07-25
+ * add $Version, $Source, $SourceVersion, $Architecture, $Component,
+ $PackageType as special fields in formulas.
+
+2009-07-21
+ * fix build-needing to look at the correct Architecture field in
+ .dsc files.
+
+2009-07-20
+ * add an --morguedir where files removed from the pool are
+ stored.
+
+2009-07-15
+ * add --create-with-all-fields to changestool that is
+ like --create but also creates Urgency and Changes fields.
+
+2009-07-11
+ * make predelete also call retrack when needed,
+ silence false warning of stale tracking by removesrc
+
+2009-07-10
+ * warn if a distribution with tracking is modified in a form tracking
+ data might get out of data. update and pull automatically cause a
+ retrack on distributions with tracking enabled.
+
+2009-07-09
+ * some more improvements to the build-needing command
+
+2009-07-07
+ * fix bug in processincoming not accepting Suite or AlsoAcceptFor
+ because of counting it two times and erroring out.
+ (Thanks to Wookey for finding this bug).
+
+2009-06-16
+ * add listmatched, removematched, copymatched and restorematched.
+ (For those who think listfilter 'Package (% glob)' is too hard
+ to write, to remember or too slow).
+ * add build-needing command
+
+2009-06-05
+ * add glob-matching in formulas via '(% pattern)'
+ * uploaders list conditions that supported stars
+ now use the generic globmatch (thus more stars and ? and []).
+
+2009-06-03
+ * new --list-max and --list-skip
+
+2009-06-02
+ * new 'architectures' condition for uploader lists and other
+ conditions support 'contains' now.
+
+2009-05-31
+ * add --list-format
+
+2009-05-29
+ * add _listdbidentifiers and _listconfidentifiers
+ * add condition "source 'something'" for uploader lists,
+ to limit a uploader to packages with the specified source.
+
+2009-05-22
+ * allow subkey matching in uploader lists, 'unsigned' now only
+ means unsigned while the new 'anybody' means everybody.
+ Preparations for more conditions.
+
+2009-05-12
+ * copy and copysrc give warnings about not found packages unless
+ verbosity is reduced by --silent. (To help people catch their typos).
+
+2009-04-13
+ * rewrite Release.gpg verification code:
+ - to allow usage of expired or revoced keys, the key-id
+ in VerifyRelease has to be appended with '!' and the corresponding
+ new ignore option given.
+ - subkeys are accepted if the key-id is appended with '+'.
+ - keys are requested from libgpgme before anything is downloaded
+ (helps catching c&p errors and makes subkey checks possible).
+ - if verification fails, the status of all found signatures is printed.
+
+2009-04-07
+ * bugfix: ListHook was not used in rules including the rule with it
+ in "From:"
+ * add "ListShellHook", that is like ListHook but with arguments and
+ the files in stdin and stdout.
+
+2009-04-03
+ * fix bug (caught by assertion) that inverts the logic of downloading
+ .diff files when there is no DownLoadListsAs line.
+
+2009-03-18
+ * support new suffix ".new." for export hooks.
+ (Which moves filename + ".new" to filename on success,
+ but unlike ".new" does not mention the file in Release)
+ * new suffix ".keep" for export hooks tha just ignores that line,
+ for compatibility with future changes.
+ * warn if an (Deb|UDeb|Dsc)Indices line contains no filename.
+ (warn against everything starting with a dot to avoid a user putting
+ forgetting it and putting a compression identifier there).
+
+2009-03-14
+ * fix mishandling of libz return code
+ causing "Zlib error 1"..."stream end" error messages.
+ This defect seems to be only triggered with at least lenny's libz.
+ (And only when extracting Section and Priority from a dsc).
+
+2009-03-05
+ * Implement force.<compression> as DownLoadListAs item to download an
+ index not found in the Release file.
+ * warn if database is in old format
+
+2009-03-04
+ * also continue downloading index files after failure to get the
+ prefered one in the IgnoreRelease case.
+
+2009-03-03
+ * regression fix: when updating with IgnoreRelease, old index
+ files were no longer deleted in 3.8 before telling the apt-methods to
+ download new ones, which can trigger buggy behaviour in those.
+ * if one index file fails to be downloaded, try the next one
+ (except for updates with IgnoreRelease, yet)
+
+2009-03-02
+ * fix bug not taking all DownloadListAs into account when multiple
+ update rules requests the same index file to be downloaded.
+ * if a .diff/Index file does not list the available Packages file
+ or if not for targeted file, proceed with other ways to retrieve
+ it.
+ * add .diff processing as first default when there is no
+ DownloadListsAs.
+
+2009-03-01
+ * support using Packages.diff when updating.
+ (Fallback to other methods not yet supported, so not yet enabled
+ in the default DownloadlistsAs)
+
+2009-02-28
+ * fix some bugs in --nothingiserror handling
+
+2009-02-27
+ * move handling of downloaded files from aptmethod.c to
+ the code queuing the files. (refactorisation in preparation of later
+ changes)
+
+2009-02-24
+ * fix race condition causing external uncompressors sometimes
+ to catch a sigpipe if their output is closed before they receive
+ the signal to kill them.
+ * changestool now supports looking into lzma files
+ (and bz2 files even when not compiled against libbz2), if
+ external uncompressors are available.
+ * fix bug extracting the Section and Priority from .diff
+ files if control was not the first file in it.
+ * fix bug .diff parsing's exception to also allow diff
+ generated files.
+
+2009-02-23
+ * log notifiers get variables REPREPRO_CAUSING_RULE and
+ REPREPRO_FROM set when adding packages via update/pull.
+ The later also in copy* and restore* commands.
+ * delete unexpected (i.e. not registered in the database)
+ files in pool when trying to replace with new ones.
+
+2009-02-21
+ * add --keeptemporaries and without it delete all .new files when
+ exporting fails (and not only Release) and with it keep all
+ (including Release). Also fix gpg error message to not suggest trying
+ it with a file that later will be deleted.
+
+2009-02-20
+ * add 'warning' flag for FilterList files
+
+2009-02-13
+ * add ReadOnly option for conf/distributions
+
+2009-02-08
+ * processincoming support includebyhand and includelogs tracking
+ options
+ * new LogDir for processincoming, that gets the .changes files,
+ .log files and unused byhand (or raw-*) files.
+
+2009-02-06
+ * ignore byhand and logfiles in 'include' unless tracking
+ with includebyhand or includelogs is activated, then store them
+ into the pool.
+
+2009-01-22
+ * fix typo causing copyfilter to fail
+ * add --gnupghome option to set GNUPGHOME environment variable
+ * fix importing of source packages from flat repositories without
+ a Directory field in Sources index.
+
+2009-01-17
+ * fix erroneous "strange filekey" warning for lib files in 3.8.0~alpha
+
+2009-01-16
+ * make Date: more like official Release files by replacing
+ the old "+0000" with "UTC".
+
+2009-01-15
+ * add support to generate Valid-Until in Release
+
+2009-01-09
+ * handle 'raw-*' sections like 'byhand' sections (i.e. mostly not
+ handle them, but give better error messages).
+
+2009-01-06
+ * add DownloadListsAs: option for conf/updates to specify which index
+ files (.gz, .bz2, .lzma, ...) to download when available.
+
+2009-01-04
+ * add support for libdb4.7 (yet with some warnings to note I have not
+ tested it much yet)
+ * bugfix in checkpool with old files.db
+
+2009-01-02
+ * FilterList/FilterFormula can be inherited with From: in update rules.
+ * bugfix: if FilterList return hold, FilterFormula was not asked.
+ Not it is only hold if FilterFormula also includes this package.
+ (pull/update)
+ * if a distribution is both flat and non-flat, do not raise an
+ assert, but emmit a warning and proceed (new flatandnonflat ignore
+ class to ignore that warning).
+
+2008-12-06
+ * add 'upgradeonly' value for FilterList, that only takes
+ an package into account if it already exists.
+
+2008-12-02
+ * implement cleanlists command
+
+2008-11-24
+ * fix bug in sha256 calculation over very large files
+
+2008-11-13
+ * add dumpupdate and dumppull actions that are like checkupdate and
+ checkpull but with less information but that more easily parseable.
+
+2008-11-04
+ * fix parsing error of contents of very big .deb files.
+ Thanks to Aramian Wasielak and Alexander Perlis.
+
+2008-11-03
+ * rework handling of files added to the pool not used by anything.
+ (for example because the package was not added due to error).
+ New --keepunusednewfiles option to not delete such files.
+
+2008-11-01
+ * print number of newly unreferenced file on --keepunreferenced
+ and commands not deleting their references.
+
+2008-10-30
+ * add support for flat repositories with Sources files without
+ Directory lines (Thanks to Cody A.W. Somerville for noting).
+
+2008-10-12
+ * some rework on unreferenced files bookkeeping. Should make no
+ difference yet but only make the "Deleting files not longer
+ referenced" only show up if something is deleted...
+
+2008-10-05
+ * Internaly atomize components architectures and packagetypes.
+ Causes multiple checks for unknown identifiers to be earlier or
+ more strict. (And fields in conf/distributions have more
+ restrictions w.r.t their order).
+ * fix bug in (tracking enabled) removesrc that caused malformed
+ tracking data when a source package's track record contains a
+ file no longer found in any distribution.
+ [2009-01-16: I previously believed this nearly impossible to
+ trigger, but a simply outdated tracking data already suffices
+ to trigger it]
+
+2008-10-01
+ * warn if an update rule references local components or architectures
+ that were not seen in conf/distributions (old behaviour was to check
+ if any distribution that references this rule had this architecture,
+ but that was too complex with the new rule-can-reference-rule
+ possibilities).
+
+2008-09-18
+ * update rules can include other rules with From: allowing
+ leaner conf/updates file and avoiding duplicate downloading
+ of upstream indices.
+ * do not process distributions without Updates: field upon
+ update/checkupdate/predelete...
+
+2008-09-09
+ * also support external uncompression programs for
+ .orig.tar/.debian.tar/.tar uncompression, i.e.:
+ - support Section/Priority extraction from lzma compressed dsc packages
+ - libarchive no longer needs to be linked against zlib/libbz2
+ * fix some corner cases in .diff parsing
+
+2008-09-07
+ * add support for external uncompression programs
+ - speeding up updating, as downloading and uncompressing
+ can happen at the same time
+ - support lzma compressed .deb and .diff (when unlzma is available)
+ - supporting .bz2 compressed files even when compiled without libbz2
+ (but needing runtime bunzip2 then)
+ * make --nooldfilesdb the default
+
+2008-08-24
+ * unify reading of compressed files, adding support for:
+ - extracting section and priority from a .diff.bz2
+ - restoring from a snapshot with only .bz2 indices
+
+2008-08-23
+ * massive refactorisation of the update code to retrieve
+ remote index files. Most important modifications:
+ - when the same remote distribution is needed by multiple
+ updates, then the index files are only downloaded once.
+ (still needs futher changes to allow better detection
+ of the same source).
+ - ListHooks are called once per use (should mostly only
+ make a difference for flat sources or with settings
+ where this is needed).
+ - --nolistsdownload now only not downloads lists and has
+ no other effects (checksums still checked, --noskipold
+ no longer implied).
+ - deleting of old no longer needed lists (the default
+ --nokeepunneeded) no longer exists.
+ - index files are stored uncompressed in lists/ and the
+ way files are named there is less strange...
+ - many other changes are possible now and will hopefully
+ be implemented soon.
+ * support downloading .bz2 indices
+ * add --via to Log-notifiers to only call notification
+ scripts when the action was triggered by a specific
+ command.
+
+2008-08-22
+ * some internal cleanup preparing for future changes...
+
+2008-08-16
+ * allow multiple export hooks
+
+2008-08-12
+ * check for Ctrl-C in file_foreach (dumpunreferenced, ...)
+
+2008-08-08
+ * fix handling of libbz2 return codes
+
+2008-08-07
+ * make reoverride work again...
+ (and not ignore section and priority)
+
+2008-08-03
+ * remove iteratedupdate
+
+2008-07-30
+ * fix double-free whith --export=never
+
+2008-07-27
+ * buffered read of index files upon "update".
+
+2008-07-26
+ * add support to retrieve packages from flat repositories.
+
+2008-07-25
+ * refactor indexfile parsing. (Needed for future changes,
+ perhaps speeding some things up a tiny littly bit).
+ * fix logic error causing restorefilter aborting
+
+2008-07-23
+ * Do not claim --noskipold makes a difference in the update output
+ for targets not having any upstream to pull from.
+
+2008-07-22
+ * better cope with a file needed multiple times when
+ updating
+
+2008-07-12
+ * make list package argument optional, listing all
+ packages if not there.
+ * fix bug causing assert() instead of proper error message
+ if list gets too many arguments.
+
+2008-07-03
+ * add IgnoreHashes directive for conf/updates
+
+2008-06-26 Bernhard R. Link <brlink@debian.org>
+ * add FakeComponentPrefix, that adds a prefix to components
+ in the Release file and removes them from Codename and Suite
+ in the central Release file. This way it looks more like
+ security /updates and thus apt is not confused.
+
+2008-06-25 Bernhard R. Link <brlink@debian.org>
+ * avoid creating symlinks that cannot work because of
+ a '/' in the link to create.
+
+2008-06-23 Bernhard R. Link <brlink@debian.org>
+ * fix bug in optionsfilename calculating introduced in
+ last revision.
+
+2008-06-22 Bernhard R. Link <brlink@debian.org>
+ * move some directoy variables to global variables,
+ some related cleanup in the code
+ * set REPREPRO_BASE_DIR, REPREPRO_OUT_DIR, REPREPRO_DIST_DIR,
+ REPREPRO_CONF_DIR and REPREPRO_LOG_DIR when calling log notifiers,
+ apt methods, update hooks or export hooks.
+
+2008-06-07 Bernhard R. Link <brlink@debian.org>
+ * remove some checks that fail for version 2 or 3 debian
+ source packages. (in reprepro include and changestool verify)
+ * extract missing Section and Priority also from a .debian.tar.{gz,bz2}
+ file.
+
+2008-06-06 Bernhard R. Link <brlink@debian.org>
+ * switch to 'new' AC_INIT and AM_INIT_AUTOMAKE syntax,
+ move automaitcally included autoconf to ac/ subdir
+ * fix typo causing internal error when removesrc
+ is called for a distribution with tracking for an unknown
+ source name.
+
+2008-05-17 Bernhard R. Link <brlink@debian.org>
+ * Add support for sha256.
+ * changestool puts Files: last, makes it easier
+ to use some versions of dupload.
+
+2008-05-16 Bernhard R. Link <brlink@debian.org>
+ * When include'ing a .changes file with Checksums
+ header and limiting to some files with -A or -T, do
+ not errounously complain about not expecting the
+ skipped files in Checksums-* headers
+ * Look at suite names when no distribution with the
+ requested codename exists.
+
+2008-05-15 Bernhard R. Link <brlink@debian.org>
+ * Print warning when not including when not including a
+ package because of unknown key/expire/revocation.
+ (In addition to the warning with -v about those problems
+ with a signature and in addition to the message of not
+ including a package at all if that was the only chance to
+ get it in)
+
+2008-04-17 Bernhard R. Link <brlink@debian.org>
+ * fix free of uninitialized pointer when calling log notifiers
+ while removing (this time for real)
+
+2008-04-12 Bernhard R. Link <brlink@debian.org>
+ * move assertion to not abort() on wrong md5sums in include
+ command, but cleanly error out.
+ * do not close random fd when starting client without
+ control data.
+ * fix free of uninitialized pointer when calling log notifiers
+ while removing
+
+2008-04-05 Bernhard R. Link <brlink@debian.org>
+ * add restore restoresrc restorefilter and _addpackage
+
+2008-04-04 Bernhard R. Link <brlink@debian.org>
+ * add copysrc and copyfilter
+ * reimplement copy command (should no longer invalidate
+ tracking information)
+ * warn against impossible -T values and impossible
+ -A -T combinations (source is dsc and dsc is source)
+
+2008-03-31 Bernhard R. Link <brlink@debian.org>
+ * bugfix: no longer confuse -S and -P (introduced in 3.0.1)
+
+2008-03-25 Bernhard R. Link <brlink@debian.org>
+ * put a fake Suite: field in Release files generated by
+ gensnapshot to avoid apt warning about the distribution
+ name not matching.
+
+2008-03-17 Bernhard R. Link <brlink@debian.org>
+ * Log:-scripts are starting with environment-variable
+ REPREPRO_CAUSING_FILE set to the main file causing this
+ change. (.changes for include/processincoming, .dsc for includedsc,
+ .deb for includedeb);
+
+2008-03-14 Bernhard R. Link <brlink@debian.org>
+ * read Checksums-Sha1 in .changes file in processincoming
+
+2008-03-13 Bernhard R. Link <brlink@debian.org>
+ * changestool can write Checksums-Sha1 headers now
+ * read Checksums-Sha1 in .changes file in the include command
+
+2008-03-12 Bernhard R. Link <brlink@debian.org>
+ * Bugfix: When replacing fields only those matching with
+ the same case were replaced.
+
+2008-03-10 Bernhard R. Link <brlink@debian.org>
+ * write Checksums-Sha1 to Sources.gz when available and
+ remove Checksums-Sha256 to avoid problems with not yet being
+ able to add the .dsc file.
+ * Do not warn about missing Standards-Version as newer dpkg-source
+ no longer include them.
+
+2008-03-09 Bernhard R. Link <brlink@debian.org>
+ * read Checksums-Sha1 in .dsc files
+
+2008-03-08 Bernhard R. Link <brlink@debian.org>
+ * When missing section or priority reprepro's includedsc and
+ changestool's add[dsc] look into the .diff and the .tar file.
+ * changestool's add* commands look for files in the current directory
+ first, adddsc for files referenced in the directory of the dsc file.
+
+2008-03-06 Bernhard R. Link <brlink@debian.org>
+ * fix/improve some messages, based upon many suggestions
+ by Marc Haber.
+
+2008-03-02 Bernhard R. Link <brlink@debian.org>
+ * fix double free error in checksums upgrade case of includedeb
+
+2008-03-01 Bernhard R. Link <brlink@debian.org>
+ * cleaning: port changestool to new checksums code,
+ finally removing the old md5sum code.
+
+2008-02-29 Bernhard R. Link <brlink@debian.org>
+ * improve documentation of listfilter command
+
+2008-02-21 Bernhard R. Link <brlink@debian.org>
+ * make --without-libarchive compile again, thanks to
+ Jesus Roncero for noticing.
+
+2008-02-19 Bernhard R. Link <brlink@debian.org>
+ * Try harder not to leave any newly added files
+ to the pool in the case of an error.
+
+2008-02-15 Bernhard R. Link <brlink@debian.org>
+ * Also ignore missing Changes and Description lines
+ in .changes files with "include".
+
+2008-02-12 Bernhard R. Link <brlink@debian.org>
+ * Add --outdir directive to set the directory the pool
+ hierarchy is put under (and the dists hierarchy unless
+ --distdir puts it somewhere else).
+
+2008-02-11 Bernhard R. Link <brlink@debian.org>
+ * fix --waitforlock parsing on 64 bit size_t architectures.
+ (Thanks to Arno Renevier for reporting the bug)
+
+2008-02-01 Bernhard R. Link <brlink@debian.org>
+ * new --nooldfilesdb switch to only use new-style checksum database
+ * improve db/version generation, set minimum required reprepro version
+ to 3.3.0 when only using checksums.db
+
+2008-01-13 Bernhard R. Link <brlink@debian.org>
+ * improve collecting of not yet known checksums and using
+ already recorded checksums in the database
+
+2008-01-06 Bernhard R. Link <brlink@debian.org>
+ * implement collectnewchecksums
+
+2008-01-04 Bernhard R. Link <brlink@debian.org>
+ * add checksums.db to store all checksums (as opposed to only md5sums
+ in files.db). The old files.db persists for compatibility, but when
+ checksums.db is up to date (when repository is generated with new
+ reprepro or to be implemented collectnewchecksums was run) the old
+ files.db can be deleted and only checksums.db is used then. (Of
+ course you should not run an older reprepro with that repository
+ then, ever).
+
+2008-01-03 Bernhard R. Link <brlink@debian.org>
+ * tracking.c uses database.c instead of libdb directly
+
+2007-12-14 - 2007-12-23 Bernhard R. Link <brlink@debian.org>
+ * collect and advertise more checksums, though not yet stored
+
+2007-12-10 Bernhard R. Link <brlink@debian.org>
+ * support lzma compressed source packages
+
+2007-12-01 Bernhard R. Link <brlink@debian.org>
+ * beautify control data read from .deb or .dsc/.changes files:
+ remove all CR and make sure leading or trailing newlines do
+ not hurt.
+
+2007-11-27 Bernhard R. Link <brlink@debian.org>
+ * rewrite support for reading text files containing a single
+ chunk. (Release, .dsc, .changes). Unsigned .dsc and .changes
+ files are no longer routed through libgpgme.
+
+2007-11-24 Bernhard R. Link <brlink@debian.org>
+ * references.c uses database.c instead of accessing libdb directly
+
+2007-11-19 Bernhard R. Link <brlink@debian.org>
+ * mark more filedescriptors closeonexec,
+ support closefrom and F_CLOSEM when available.
+
+2007-11-18 Bernhard R. Link <brlink@debian.org>
+ * add sha1 hash calculation code
+ * add sha1 hashes of index files into Release files.
+ release.cache.db renmamed to release.caches.db due
+ to modified syntax.
+
+2007-10-31 Bernhard R. Link <brlink@debian.org>
+ * translatefilelists now can be run when both old
+ and new style filelists are there (this can happen
+ when it was translated and an old version of reprepro
+ was run over this database. You should not do this,
+ but when it happens, translatefilelists can be used
+ now instead of having to reextract the lists).
+
+2007-10-29 Bernhard R. Link <brlink@debian.org>
+ * If exporting a distribution fails, warn if something is left
+ in a state that needs manual exporting.
+
+2007-10-26 Bernhard R. Link <brlink@debian.org>
+ * change --export default from "normal" (now also available
+ under the name "lookedat") to "changed".
+
+2007-10-21 Bernhard R. Link <brlink@debian.org>
+ * warn against -A,-C,-T,-S or -P given to an action not
+ using it, with new --ignore=unusedoption to ignore this.
+
+2007-10-07 Bernhard R. Link <brlink@debian.org>
+ * change db/version file to final format,
+ abort if version or libdb version specified
+ there cannot be fulfilled.
+
+2007-09-27 Bernhard R. Link <brlink@debian.org>
+ * allow comments starting within lines in config files
+ * also allow tab as first character for continued lines as
+ manpage already says.
+
+2007-09-23 Bernhard R. Link <brlink@debian.org>
+ * save another 2 seconds while sorting filelists for Contents files
+
+2007-09-22 Bernhard R. Link <brlink@debian.org>
+ * make empty Architectures and Components fields
+ in conf/distributions an error.
+ * Contents: fields no longer has a rate value,
+ ContentsComponents/Architectures/UComponents
+ triggers or disables contents generation if non-/empty.
+ * empty Architecturs/Components/UdebComponents in
+ conf/updates and conf/pulls now mean nothing instead of all.
+ * minimal additional speedup when sorting filelists
+
+2007-09-21 Bernhard R. Link <brlink@debian.org>
+ * save cached filelists of packages for Contents files
+ in a preprocessed form, needing only about half the disk
+ space and only half the time when generating the Contents file.
+ * new translatefilelists command to translate old to new format
+ * filelists reading no longer available without libarchive
+
+2007-09-19 Bernhard R. Link <brlink@debian.org>
+ * files.c uses database.c instead of accessing libdb directly
+ * release.c uses database.c instead of accessing libdb directly
+
+2007-09-16 Bernhard R. Link <brlink@debian.org>
+ * add removesrc and removefilter action
+
+2007-09-15 Bernhard R. Link <brlink@debian.org>
+ * move package database handling from packages.c to database.c
+
+2007-09-14 Bernhard R. Link <brlink@debian.org>
+ * rereference now also refreshes references by tracking data.
+
+2007-09-13 Bernhard R. Link <brlink@debian.org>
+ * retrack no longer create track records for distributions with
+ tracking disabled, dumptracks no longer generated empty databases.
+ * removealltracks now also works on distributions no longer listed
+ in conf/distributions, no longer supports being used on all
+ distributions listed there (i.e. without argumnts)
+ * tidytracks not remove all tracking data from a distribution without
+ tracking activated.
+ * clearvanished removes tracking data from vanished distributions.
+ * in default --nofast mode, check for unexpected tracking data and
+ do not run, unless --ignore=undefinedtracking is defined
+ * retrack refreshes tracking information instead of destroying and
+ starting new.
+ * make update's ListHook relative to confdir
+ * low level part of the includelogs options added
+
+2007-09-11 Bernhard R. Link <brlink@debian.org>
+ * reject spaces and tabs in key-names (i.e. before :) in config files,
+ instead of bubbling about unknown fields.
+
+2007-09-10 Bernhard R. Link <brlink@debian.org>
+ * improve parsing of update's Config lines
+
+2007-09-09 Bernhard R. Link <brlink@debian.org>
+ * never hardlink index files, but copy them always into the lists
+ directory. (Should not make a difference yet, but feels safer).
+ * warn if update rules list components or architectures are always ignored
+
+2007-09-08 Bernhard R. Link <brlink@debian.org>
+ * warn if pull rules list components or architectures are always ignored
+
+2007-09-07 Bernhard R. Link <brlink@debian.org>
+ * create db/version
+ * always create all packages.db subtables, so future
+ versions can detect new architectures/components.
+
+2007-09-06 Bernhard R. Link <brlink@debian.org>
+ * read all distribution definitions before starting
+ any action.
+
+2007-09-04 Bernhard R. Link <brlink@debian.org>
+ * test number of arguments earlier.
+
+2007-09-03 Bernhard R. Link <brlink@debian.org>
+ * remove the dbdirs and all its parents created at startup
+ that are still empty at shutdown. (Does not make much difference
+ yet, as most commands create an empty file database in there.)
+ * obsolete --overridedir, overrides belong to conf dir like all
+ the other config files now.
+
+2007-09-02 Bernhard R. Link <brlink@debian.org>
+ * fix uninitialized use of errno in listclean.
+ (might cause update to report error opening dir: file exists)
+ * new config file parser
+ * remove --ignore from changestool, --ignore=shortkeyid from reprepro
+ * move to C99's bool, false and true
+
+2007-08-21 Bernhard R. Link <brlink@debian.org>
+ * ignore SIGPIPE, so that libgpgme cannot tear us apart
+ so easily.
+
+2007-08-20 Bernhard R. Link <brlink@debian.org>
+ * Print ignored signatures in Release.gpg files
+ when verbosity > 10
+
+2007-08-18 Bernhard R. Link <brlink@debian.org>
+ * stop dumpreferences output when Ctrl-c is received.
+
+2007-08-03 Bernhard R. Link <brlink@debian.org>
+ * add --without-libgpgme to compile without
+ gpgme support (checking and signing are then not
+ available, yet).
+
+2007-08-19 Bernhard R. Link <brlink@debian.org>
+ * [SECURITY] fix bug causing a Release.gpg with only
+ unknown signatures considered as properly signed.
+
+2007-07-28 Bernhard R. Link <brlink@debian.org>
+ * fix segfault in changestool's verify if
+ md5sum of .orig.tar.gz is wrong and not listed
+ in the .changes file.
+ * changestool's verify knows about epochs not showing
+ up in filenames now.
+
+2007-07-26 Bernhard R. Link <brlink@debian.org>
+ * add support for .changes file having the source
+ version in the Sources: header (like binNMUs) to the
+ include and processincoming commands.
+
+2007-07-22 Bernhard R. Link <brlink@debian.org>
+ * include[u]deb allows multiple files to include now
+
+2007-06-25 Bernhard R. Link <brlink@debian.org>
+ * don't complain if suite name and component name are
+ the same in createsymlinks
+
+2007-06-24 Bernhard R. Link <brlink@debian.org>
+ * processincoming allows an optional second argument
+ to limit processing to a specific file for better
+ integration with inoticoming.
+
+2007-06-16 Bernhard R. Link <brlink@debian.org>
+ * when checking a file to have the expected checksum,
+ first check if the file size matches before calculating
+ its md5sum.
+
+2007-06-11 Bernhard R. Link <brlink@debian.org>
+ * detect "false" and "no" as false in boolean headers.
+ (Until now only existence was tested and considered as
+ true, which broke apt-methods telling "Send-Config: false")
+
+2007-06-10 Bernhard R. Link <brlink@debian.org>
+ * don't waste filedescriptors by not closing .done-files
+
+2007-06-09 Bernhard R. Link <brlink@debian.org>
+ * set GPG_TTY when unset and stdin is a terminal.
+ (and new option --noguessgpgtty to suppress this)
+
+2007-06-03 Bernhard R. Link <brlink@debian.org>
+ * fix segfault when running processincoming without notificators
+ (Thanks to Julien Valroff for finding this)
+
+2007-06-02 Bernhard R. Link <brlink@debian.org>
+ * rename --checkspace to --spacecheck, as
+ manpage and error messages hint to that.
+ * fix 64bit problem in errormessages for Log:
+
+2007-05-29 Bernhard R. Link <brlink@debian.org>
+ * adapt name include uses for .changes files to
+ that of processincoming.
+
+2007-05-25 Bernhard R. Link <brlink@debian.org>
+ * some fixed and improvements of the free space calculation
+ ( add --spacecheck, --safetymargin, --dbsafetymargin )
+
+2007-05-24 Bernhard R. Link <brlink@debian.org>
+ * error/warn if trying to include a package via
+ processincoming which is already there newer
+ * do not notify a .changes when no package included
+ (when using Log: --changes)
+ * add Permit: unused_files older_version
+ and Cleanup: unused_files on_deny on_error for conf/incoming
+ * add --waitforlock option
+
+2007-05-23 Bernhard R. Link <brlink@debian.org>
+ * fix remove action not tidy tracked packages.
+ (Thanks to Dan Pascu for finding this, too)
+ * rename cleartracks in removealltracks
+ * new tidytracks command
+
+2007-05-22 Bernhard R. Link <brlink@debian.org>
+ * Add per distribution notification scripts for accepted changes files.
+
+2007-05-21 Bernhard R. Link <brlink@debian.org>
+ * fix problem of not waiting for notificators in some commands
+ (Thanks to Dan Pascu for finding this)
+
+2007-05-07 Bernhard R. Link <brlink@debian.org>
+ * move some code from release.c to signature.c in preperation of
+ later changes
+
+2007-05-06 Bernhard R. Link <brlink@debian.org>
+ * changestool: add adddsc command
+ * changestool: add --create option
+ * changestool: add add command
+ * changestool: add setdistribution command
+
+2007-05-03 Bernhard R. Link <brlink@debian.org>
+ * changestool: add addrawfile command
+
+2007-04-03 Bernhard R. Link <brlink@debian.org>
+ * first code for checking for enough free space
+
+2007-03-29 Bernhard R. Link <brlink@debian.org>
+ * add rerunnotifiers command
+
+2007-03-28 Bernhard R. Link <brlink@debian.org>
+ * add support logging to external notificators
+ (including example to create changelog/ hierachy)
+
+2007-03-26 Bernhard R. Link <brlink@debian.org>
+ * fix bug in term parsing not accepting '<<'
+
+2007-03-23 Bernhard R. Link <brlink@debian.org>
+ * first part of logging code
+
+2007-03-16 Bernhard R. Link <brlink@debian.org>
+ * fix bug not recognizing already existing .bz2 files
+ when exporting only changes.
+ * more changes in verbose output
+
+2007-03-15 Bernhard R. Link <brlink@debian.org>
+ * more output to stdout instead of stderr
+
+2007-03-14 Bernhard R. Link <brlink@debian.org>
+ * processincoming only exports distributions looked at
+ with --export=always (the default) and not every distribution.
+ (other commands should not have changed)
+ * changed output of many status messages to stdout instead of stderr
+ * changed verbosity level needed to see some messages
+
+2007-03-12 Bernhard R. Link <brlink@debian.org>
+ * add --silent option
+ * change some status output to stdout instead of stderr.
+
+2007-02-26 Bernhard R. Link <brlink@debian.org>
+ * add gensnapshot command
+
+2007-02-23 Bernhard R. Link <brlink@debian.org>
+ * rename import to processincoming
+ * describe in manpage
+ * update bash completion example
+
+2007-02-11 Bernhard R. Link <brlink@debian.org>
+ * fix bug in non-libarchive filelist extraction with long
+ filelists
+
+2007-01-25 Bernhard R. Link <brlink@debian.org>
+ * import allow .changes files with multiple distributions
+
+2007-01-21 Bernhard R. Link <brlink@debian.org>
+ * add trackingsupport to "import" command
+
+2007-01-17 Bernhard R. Link <brlink@debian.org>
+ * fail cleanly when getting a .dsc without Format header
+
+2007-01-16 Bernhard R. Link <brlink@debian.org>
+ * improve error message of missing Files: line in .dsc files
+
+2007-01-12 Bernhard R. Link <brlink@debian.org>
+ * add AlsoAcceptFor for distributions
+
+2007-01-06 Bernhard R. Link <brlink@debian.org>
+ * incoming fixups and more testcases
+ * omit some warnings about versions not starting
+ with a digit
+
+2007-01-05 Bernhard R. Link <brlink@debian.org>
+ * better cope with double entries in some
+ lists. (Like Architectures or Components)
+ * incoming fixups and more testcases
+
+2007-01-04 Bernhard R. Link <brlink@debian.org>
+ * more fixups of incoming handling
+
+2007-01-03 Bernhard R. Link <brlink@debian.org>
+ * factor some checkindeb code into binaries.c
+ * incoming.c uses now only binaries.c and not checkindeb.c
+ in preperation of different semantics to come.
+
+2007-01-02 Bernhard R. Link <brlink@debian.org>
+ * factor some checkindsc code into source.c
+ * add dsc support for import from incoming
+
+2007-01-01 Bernhard R. Link <brlink@debian.org>
+ * move uploaderslist load into distribution struct
+ * fix bug in manpage: uploaders list keyword is allow and not accept
+ * some more code for incoming processing
+
+2006-12-31 Bernhard R. Link <brlink@debian.org>
+ * first code for importing from an incoming dir, not
+ yet useable (supports no source, no overrides, no ... yet)
+ * move loaded overrides into distribution struct.
+
+2006-12-17 Bernhard R. Link <brlink@debian.org>
+ * tell about the filename in the non-libarchive
+ case of failure to extract control or filelist
+ from a .deb
+ * add _fakeemptyfilelist action to omit a file
+ when generting Content files.
+
+2006-11-28 Bernhard R. Link <brlink@debian.org>
+ * mostly rewrote "adddeb"
+
+2006-11-27 Bernhard R. Link <brlink@debian.org>
+ * add "adddeb" option to changestool
+
+2006-10-31 Bernhard R. Link <brlink@debian.org>
+ * fix spelling mistakes in manpage (thanks to A. Costa)
+ fixed the same errors in the code and its messages
+
+2006-10-29 Bernhard R. Link <brlink@debian.org>
+ * fix updatechecksums for .changes files not
+ listing entries from the .dsc
+
+2006-10-11 Bernhard R. Link <brlink@debian.org>
+ * add Uploaders: rule to conf/distributions to
+ limit include to .changes files signed with specific keys.
+
+2006-10-07 Bernhard R. Link <brlink@debian.org>
+ * only show control information of to be added packages
+ in checkpull/checkupdate with -V
+ * fixed a missed refcount increasing in yesterdays code
+ * give hints where to look when gpgme reports no error on
+ failure
+
+2006-10-06 Bernhard R. Link <brlink@debian.org>
+ * FilterList in update and pull rules now
+ is a space separated list of files.
+
+2006-10-03 Bernhard R. Link <brlink@debian.org>
+ * fix typos and spelling errors in manpage (Thanks to Bruce Sass)
+ * fix type-mismatch to silence compiler-warning
+ * work around signing problems in gpgme11, fix some memory holes
+
+2006-10-01 Bernhard R. Link <brlink@debian.org>
+ * new includeallsources command for changestool
+ to change a .changes as if it was created with -sa
+
+2006-09-30 Bernhard R. Link <brlink@debian.org>
+ * new updatechecksums command for changestool
+
+2006-09-24 Bernhard R. Link <brlink@debian.org>
+ * ported to libgpgme11
+ * removed --onlyacceptsigned
+
+2006-09-20 Bernhard R. Link <brlink@debian.org>
+ * make strlist_init void
+
+2006-09-19 Bernhard R. Link <brlink@debian.org>
+ * rename modifychanges to changestool
+
+2006-09-17 Bernhard R. Link <brlink@debian.org>
+ * fix return of fingerprints in new signature handling code
+ * move endswith from main.c to names.h
+ * add modifychanges helper program (yet only validating some stuff)
+
+2006-09-12 Bernhard R. Link <brlink@debian.org>
+ * reject .changes with binaries not listed, unless --ignore=surprisingbinary
+ * reject .changes with .dsc or .deb with wrong source version
+ unless --ignore=wrongversion or --ignore=wrongsourceversion
+ * earlier and better error message if source name differs from the one
+ given in the .changes file.
+
+2006-09-11 Bernhard R. Link <brlink@debian.org>
+ * new strlist_add_dup
+ * more fine tuned signature checking (one valid signature suffices)
+ * fix a little memory hole in tracking code
+
+2006-09-07 Bernhard R. Link <brlink@debian.org>
+ * fix some typos (thanks to Jordi Mallach for noting)
+
+2006-09-04 Bernhard R. Link <brlink@debian.org>
+ * support .orig.tar.bz2 .tar.bz2 and .diff.bz2 in source packages
+ * fix bug, causing Contents-* files containing only the first file
+ of a package when this is the first time this package is accessed
+
+2006-08-22 Bernhard R. Link <brlink@debian.org>
+ * fix db3 mention in reprepro.1
+
+2006-08-05 Bernhard R. Link <brlink@debian.org>
+ * some error/status/debug messages improved a little
+
+2006-08-03 Bernhard R. Link <brlink@debian.org>
+ * improve messages when missing files (.tar.gz most likely)
+
+2006-07-28 Bernhard R. Link <brlink@debian.org>
+ * remove unreferenced files when doing removetracks
+ * fix bug omitting an uncompressed Sources entry in
+ Release files when only exporting changed values and
+ the source part changed not. (Thanks to Alexander Kuehn
+ for finding this one).
+ * fix tiny memory in clearvanished
+
+2006-07-26 Bernhard R. Link <brlink@debian.org>
+ * do not error out if one file gets unreferenced by two different
+ reasons at the same time.
+ * implement "minimal" and "all" tracking support for packages losing
+ files because of getting replaced by newer ones...
+
+2006-07-23 Bernhard R. Link <brlink@debian.org>
+ * rewrite some parts of tracking support, implement
+ "minimal" and "all" methods...
+
+2006-07-18 Bernhard R. Link <brlink@debian.org>
+ * fix segfault in non-libarchive control extraction code
+ introduced with the last change
+
+2006-07-16 Bernhard R. Link <brlink@debian.org>
+ * cope with control.tar.gz files without leading ./
+ when not using libarchive.
+
+2006-07-15 Bernhard R. Link <brlink@debian.org>
+ * cope with GNU style ar files when using libarchive
+ (i.e. with .deb files not generated by dpkg-deb)
+
+2006-07-08 Bernhard R. Link <brlink@debian.org>
+ * add clearvanished command
+
+2006-06-21 Bernhard R. Link <brlink@debian.org>
+ * add copy command to pull only a specific package
+ without having to add FilterFormulas to conf/pulls
+ (and also a bit faster)
+
+2006-06-19 Bernhard R. Link <brlink@debian.org>
+ * add predelete action to remove packages from
+ a distribution that would be deleted or replaced
+ by a command.
+
+2006-06-18 Bernhard R. Link <brlink@debian.org>
+ * check for file conflicts and missing files when including
+ .changes files before copying/moving files into the pool
+ (Files missing in .dsc and files having the wrong md5sum
+ are still only noticed after/while moving them in the pool)
+ * delete files from the pool when checks after including
+ the files but before including the packages failed.
+
+2006-06-16 Bernhard R. Link <brlink@debian.org>
+ * manpage mentions includeudeb now. (Thanks to Jordi Mallach for noting)
+ * changed manpage to make clear options are before the command (dito)
+ * catch TERM, ABRT, INT and QUIT and do not start any new stuff after
+ that.
+ * remove force option (rarely worked and caused ugly bugs otherwise)
+
+2006-06-12 Bernhard R. Link <brlink@debian.org>
+ * some prework for predelete action
+
+2006-06-01 Bernhard R. Link <brlink@debian.org>
+ * better usage description in tiffany.example
+ * fix the fix for the export preprocessor
+
+2006-05-30 Bernhard R. Link <brlink@debian.org>
+ * fix bug in communication with Index file preprocessor
+ (so the .diff directories tiffany.example creates are
+ properly advertised so that apt-get can use them)
+
+2006-05-15 Bernhard R. Link <brlink@debian.org>
+ * warn against dobuled fields in
+ config files. (ignorable with --ignore=doublefield)
+ * better error message when trying to forget
+ filekey not existing
+
+2006-05-14 Bernhard R. Link <brlink@debian.org>
+ * add support for libdb4.3 and libdb4.4,
+ default is libdb4.4 now.
+
+2006-05-13 Bernhard R. Link <brlink@debian.org>
+ * add support for contents file when compiled
+ without libarchive.
+
+2006-05-12 Bernhard R. Link <brlink@debian.org>
+ * add content file generation
+
+2006-05-07 Bernhard R. Link <brlink@debian.org>
+ * add support for extracting filelists from
+ Debian packages for future usage and a
+ __extractfilelist action. (only available when
+ compiled with libarchive)
+
+2006-05-06 Bernhard R. Link <brlink@debian.org>
+ * add support for using libarchive to get the
+ control file out of a .deb instead of calling
+ ar and tar.
+
+2006-05-03 Bernhard R. Link <brlink@debian.org>
+ * add new pull and checkpull actions
+ * repair checkupdate statistics of newest available
+ version of checkupdate when using delete rules.
+ (Showed 'unavailable for reload').
+ * fix segfault and memory leak in checkupdate
+ * fix including a changes file with source and restricting
+ to some binary distribution or to binary package type.
+ * add some warnings against impossible combinations of -T and -A
+
+2006-04-29 Bernhard R. Link <brlink@debian.org>
+ * fix some minor memory leaks
+
+2006-04-28 Bernhard R. Link <brlink@debian.org>
+ * rewrite decision for exporting distributions a bit:
+ export all distributions that did not have errors by default
+ (it did not export anything when an error occurred)
+ added new --export option with possible values
+ never, changed, normal and forced.
+
+2006-04-25 Bernhard R. Link <brlink@debian.org>
+ * do not export indices if all upgrades were skipped
+
+2006-04-23 Bernhard R. Link <brlink@debian.org>
+ * unbreak new skipold for delete rules
+
+2006-04-22 Bernhard R. Link <brlink@debian.org>
+ * explicitly save which files are already
+ processed and to be skipped by --skipold.
+
+2006-04-11 Bernhard R. Link <brlink@debian.org>
+ * tell the user running gpg manually sometimes
+ resolves problems while calling it through libgpgme
+ does not help.
+ * add a WORKAROUND part to the manpage
+
+2006-04-09 Bernhard R. Link <brlink@debian.org>
+ * remove the woody reference in signature.c
+
+2006-03-30 Bernhard R. Link <brlink@debian.org>
+ * warn about architectures called 'all'
+
+2006-02-25 Bernhard R. Link <brlink@debian.org>
+ * add --ignore=missingfile to look for .orig.tar.gz
+ files of broken .changes (no -sa though needed) files
+ in the directory of the .changes file.
+
+2006-02-20 Bernhard R. Link <brlink@debian.org>
+ * add optional "NotAutomatic" field for the
+ distribution specification.
+
+2006-02-10 Bernhard R. Link <brlink@debian.org>
+ * add new --ignore=extension, without which
+ it refuses to 'include' files not ending in '.changes',
+ to 'include[u]deb' files not ending in '.[u]deb' or to
+ 'includedsc' files not ending '.dsc'.
+
+2006-01-21 Bernhard R. Link <brlink@debian.org>
+ * fix typesetting error in ratpoison.1
+ and add an example for update's Config option.
+ * fix segfault of FD_ISSET(-1,&...) when
+ method is not used (i.e. --nolistsdownload
+ and only need to get from other sources)
+ * fix minor memory leak of --skipold
+
+2005-12-24 Bernhard R. Link <brlink@debian.org>
+ * add cache database to store md5sums
+ of released files in there.
+
+2005-12-23 Bernhard R. Link <brlink@debian.org>
+ * Implement native .bz2 compression
+ (only when libbz2.so was available at build time)
+
+2005-12-22 Bernhard R. Link <brlink@debian.org>
+ * fix some spelling errors
+ (thanks to Guilherme de S. Pastore for notifying me)
+
+ * make index exportion code more low level, allowing
+ in-place md5sum calculation without needing to reread
+ the generated files.
+
+ * fix problem of bzip2.example script
+
+2005-12-20 Bernhard R. Link <brlink@debian.org>
+ * refactor index exporting/release generation
+ so that is always puts the uncompressed checksums
+ in the Release file.
+ * reverting the changes from 2005-12-15
+ (i.e. again not writing uncompressed Sources
+ by default, as the checksum now shows up
+ in the Release file anyway, as apt needs it)
+ * {Dsc,Deb,UDeb}Indices' external programs
+ are now only called with the uncompressed files.
+
+2005-12-19 Bernhard R. Link <brlink@debian.org>
+ * fix segfault introduced into interatedupdate
+ by --skipold.
+
+2005-12-18 Bernhard R. Link <brlink@debian.org>
+ * split Release reading from release.c to readrelease.c
+
+2005-12-15 Bernhard R. Link <brlink@debian.org>
+ * Generate uncompressed source/Sources by default.
+
+2005-12-11 Bernhard R. Link <brlink@debian.org>
+ * Unless the new --noskipold is used,
+ only targets with newly downloaded index
+ files are updated.
+
+2005-12-10 Bernhard R. Link <brlink@debian.org>
+ * remove pool-directories gotten empty
+ (thanks to Julien Valroff for suggesting this)
+ * new --keepdirectories option to not try this
+
+2005-10-27 Bernhard R. Link <brlink@debian.org>
+ * add colons in description within bzip.example
+ (thanks to Steve Kemp for finding this)
+
+2005-10-05 Bernhard R. Link <brlink@debian.org>
+ * add --ignore=missingfield,brokenold,brokenversioncmp,
+ unusedarch,surpisingarch
+
+2005-10-03 Bernhard R. Link <brlink@debian.org>
+ * replace readdir_r by readdir to be sure errno is
+ set properly.
+
+2005-10-02 Bernhard R. Link <brlink@debian.org>
+ * some cleanups (strict truthvalue-typing
+ and some integer signednesses...)
+
+2005-09-28 Bernhard R. Link <brlink@debian.org>
+ * Fix segfault when update file is empty.
+ (Thanks to Gianluigi Tiesi for noticing this.)
+
+2005-09-26 Bernhard R. Link <brlink@debian.org>
+ * Document override files' format in manpage
+ * Fix integer size in tracking data handling
+
+2005-09-25 Bernhard R. Link <brlink@debian.org>
+ * Documenting --ignore in manpage
+ * some clarifications in manpage
+
+2005-09-24 Bernhard R. Link <brlink@debian.org>
+ * putting a .changes in the wrong distribution
+ is an error now without --ignore=wrongdistribution
+ * puttin new address in GPL notices, redownload
+ COPYING (fixing some typos and addresses)
+
+2005-09-22 Bernhard R. Link <brlink@debian.org>
+ * add --unignore (with alias --noignore)
+ to allow overwriting ignore in config.
+
+2005-09-06 Bernhard R. Link <brlink@debian.org>
+ * fix error in parsing FilterList default action
+ (thanks to Sergio Talens-Oliag for finding that)
+
+2005-08-28 Bernhard R. Link <brlink@debian.org>
+ * add REPREPRO_CONFIG_DIR
+
+2005-08-26 Bernhard R. Link <brlink@debian.org>
+ * read conf/options for default command line options,
+ use REPREPRO_BASE_DIR for default -b value, add --no
+ options to disable previously enabled options again.
+ * add a createsymlinks command to create suite->codename
+ symlinks
+
+2005-08-05 Bernhard R. Link <brlink@debian.org>
+ * do not set execute bit of signed files
+
+2005-08-02 Bernhard R. Link <brlink@debian.org>
+ * allow ~ in versions listed within .changes
+ * changed spacing in dpkgversions.c to make
+ comparing to originals in dpkg easier.
+
+2005-07-20 Bernhard R. Link <brlink@debian.org>
+ * read SignWith:-argument and give it to
+ libgpgme to decide which key to use.
+
+2005-07-05 Bernhard R. Link <brlink@debian.org>
+ * Document tracking
+
+2005-07-03 Bernhard R. Link <brlink@debian.org>
+ * add quick&dirty --ask-passphrase option
+
+2005-06-18 Bernhard R. Link <brlink@debian.org>
+ * add tracking.c and some starting functionality
+ * therefor refactored .deb and .dsc inclusion
+ so that .changes includsion can check those
+ better before doing anything.
+ * some little tidy ups (freeing more memory,
+ fixing bad english
+
+2005-06-02 Bernhard R. Link <brlink@debian.org>
+ * Change default basedir to "."
+
+2005-05-31 Bernhard R. Link <brlink@debian.org>
+ * Fix bogus free causing segfaults
+ * No longer silently ignore additional arguments with include*
+
+2005-05-13 Bernhard R. Link <brlink@debian.org>
+ * add Fallback option to update-methods.
+
+2005-04-16 Bernhard R. Link <brlink@debian.org>
+ * fix broken fix in signature.c from 2005-04-10
+ * fix bug when after a delete rule the second
+ origin has the version already in an archive
+
+2005-04-12 Bernhard R. Link <brlink@debian.org>
+ * fix same more warnings
+
+2005-04-10 Bernhard R. Link <brlink@debian.org>
+ * apply some clean ups:
+ - distinguish between boolean and non-boolean values
+ - split globals from error.h in globals.h
+ * fix bug in signature.c to not treat config error like valid key.
+
+2005-04-07 Bernhard R. Link <brlink@debian.org>
+ * fix wrong handling of bugs in update specifications
+ * adopt short-howto to present
+ * fix typo in manpage
+
+2005-04-05 Bernhard R. Link <brlink@debian.org>
+ * create files without executeable bit set
+ when copying files.
+
+2005-03-29 Bernhard R. Link <brlink@debian.org>
+ * iteratedupdate directly exports indices instead
+ of all at the end...
+
+2005-03-28 Bernhard R. Link <brlink@debian.org>
+ * Implement "interatedupdate" command, which iterates the
+ distributions and targets within them, instead of first
+ downloading all lists, then processing all lists, then
+ downloading all packages and then installing them all.
+ (This can be a bit slower, but needs less memory)
+
+ * Two --force are needed to ignore wrong Release.gpg
+
+2005-03-27 Bernhard R. Link <brlink@debian.org>
+ * Implement ".tobedeleted" feature for
+ export skripts.
+
+2005-03-22 Bernhard R. Link <brlink@debian.org>
+ * Repeat that there were errors at the
+ end of reprepro.
+
+2005-03-11 Bernhard R. Link <brlink@debian.org>
+ * Do not accept multiple -A,-C,-T,-S or -Ps.
+
+2005-03-02 Bernhard R. Link <brlink@debian.org>
+ * Change Override/SrcOverride to
+ DebOverride/UDebOverride/DscOverride
+
+ * add new command reoverride to reapply
+ overrides to all packages.
+
+2005-02-20 Bernhard R. Link <brlink@debian.org>
+ * add docs/tiffany.example, which generates
+ apt-qupdate'able .diff directories.
+
+ * Many small changes to make splint more
+ happy. (Mostly annotations, some clearance
+ and some fixes of memory holes or possible
+ segfaults if running out of memory)
+
+2005-02-19 Bernhard R. Link <brlink@debian.org>
+ * Refactor Index Exporting and Release generation
+ to reduce the time Release files and Package indices
+ are out of sync (Everything is written to files
+ ending in .new now, only when everything is ready
+ all are moved to their final place) and to prepare
+ DebIndices UDebIndices and DscIndices Options.
+
+ * add another test-case
+ * FIX the overflow bug in chunks_replacefield
+
+ * add DebIndices UDebIndices and DscIndices
+ options for conf/distributions. This allows
+ to change which Indices to generate for this
+ type, or calls hook to even generate additional
+ ones. (See docs/bzip.example).
+
+2005-02-14 Bernhard R. Link <brlink@debian.org>
+ * Some little changes to make splint
+ and valgrind happier.
+
+2005-02-13 Bernhard R. Link <brlink@debian.org>
+ * Remove some code duplication in main.c
+ (and renamed _md5sums to _listmd5sums)
+ * change -b to not overwrite prior given
+ --listdir --distdir ...
+
+2005-02-12 Bernhard R. Link <brlink@debian.org>
+ * Some clean up of the code and added some
+ paranoia checks.
+
+2005-02-10 Bernhard R. Link <brlink@debian.org>
+ * No longer shutdown aptmethods when nothing is to do.
+ (This caused problems when index files are already in
+ place but still packages to be downloaded).
+
+ * Do not warn about deleting _changed files from listdir.
+
+2005-02-08 Bernhard R. Link <brlink@debian.org>
+ * Do some more checks reading signed sources.
+
+ * Release 0.1.1
+
+2005-02-07 Bernhard R. Link <brlink@debian.org>
+ * Fix --onlyacceptsigned to safely handle unknown
+ keys or multiple keys of different state.
+
+2005-02-06 Bernhard R. Link <brlink@debian.org>
+ * Release 0.1
+
+2005-02-05 Bernhard R. Link <brlink@debian.org>
+ * Add --onlyacceptsigned to make include and includedsc only
+ accept signed files.
+
+ * Check Codename, Components and Architectures fields
+ of conf/distributions for sane values
+ * fix checks for strange characters
+
+2005-02-03 Bernhard R. Link <brlink@debian.org>
+ * When updating delete files lists/<codename>_ for
+ all updated distributions, which will not be needed any more.
+
+2005-02-01 Bernhard R. Link <brlink@debian.org>
+ * Add some missing files in Makefile.am so they end up in dist
+ * Add some #includes so that it also compiles without
+ warnings on sarge/i386
+
+ * --ignore= allows multiple options separated by commas.
+
+ * Tell about -b if conf/distributions cannot be found
+
+ * Tell which release.gpg file is missing the signature.
+
+ * Some tidy up to reduce number of warnings with -W
+
+ * Allow multiple keys specified in update's ReleaseCheck
+
+2005-01-29 Bernhard R. Link <brlink@debian.org>
+ * Be more descriptive with missing signatures.
+
+2005-01-28 Bernhard R. Link <brlink@debian.org>
+ * readd _detect command
+ * write recovery HOWTO how to deal with database corruptions
+
+2005-01-27(at least GMT) Bernhard R. Link <brlink@debian.org>
+ * add a lockfile
+
+2005-01-26 Bernhard R. Link <brlink@debian.org>
+ * change FilterList to need a defaultaction given
+
+ * tidy up upgradelist.c and report errors properly
+
+ * ListHook is also called when --nolistsdownload is given
+
+ * update/checkupdate only download lists not already here
+
+2005-01-25 Bernhard R. Link <brlink@debian.org>
+ * Add ListHook keyword for external processing
+ of the downloaded index file before updating.
+
+ * Add FilterList keyword for a list in the
+ format of dpkg --get-selections
+
+2005-01-24 Bernhard R. Link <brlink@debian.org>
+ * Make includedeb work again.
+
+ * Fix bugs in override file parsing
+
+ * add a listfilter command
+ * fix bug in term evaluation with non-existing fields
+ * fix another parsing bug when too few spaces where around
+ * implement T_NEGATED flag of parsing
+ * document listfilter command
+
+ * check conf/distributions conf/updates for unknown fields
+ (to rule out typos, lines with # are ignored)
+
+2005-01-22 Bernhard R. Link <brlink@debian.org>
+ * Make -T work everywhere -A works.
+ * rename variables from suffix to packagetype
+
+ * allow colons in .changes filenames.
+ (epoch with colon is stripped, but
+ colons after that are allowed)
+
+ * Add tests/test.sh to test for basic
+ things to work...
+
+ * fix bug that prevented Release regeneration
+ when a index-file is changed to zero entries.
+
+2005-01-19 Bernhard R. Link <brlink@debian.org>
+ * now also include, includedeb, includedsc
+ and update will remove files which are no
+ longer needed due to newer versions available,
+ except when --keepunreferencedfiles is given.
+
+ * change some verbosities of files and refereces
+
+2005-01-17 Bernhard R. Link <brlink@debian.org>
+ * remove short options -e -N -l -r -M -d -D -c -p -o
+ to make it more guessable (and reserving short options
+ for important and likely often called functions).
+
+ * add --keepunreferencedfile option (if you think
+ this is long, remember GNU getopt_long will accept
+ --keep, too)
+
+2005-01-15 Bernhard R. Link <brlink@debian.org>
+ * Seperate parsing and looking for allowed
+ values a bit more. Some more things can
+ be ignored with --ignore now.
+
+ * includedsc and includedeb only export
+ files that changed.
+
+ * remove now deletes files of removed packages
+ not referenced by any other package.
+
+2005-01-10 Bernhard R. Link <brlink@debian.org>
+ * Made updates using --force with failing parts
+ more graceful
+
+ * Make aptmethods less verbose
+
+2005-01-07 Bernhard R. Link <brlink@debian.org>
+ * Changed the meaning of the "Architectures:"
+ field in conf/distributions. Now a distribution
+ will have sources exactly when a "source" is in
+ this line.
+
+2005-01-05 Bernhard R. Link <brlink@debian.org>
+ * Only generate Release (and Release.gpg) files when
+ something changed.
+
+ * Add a --nolistsdownload option to avoid update and
+ checkupdate downloading all those lists again.
+
+2005-01-04 Bernhard R. Link <brlink@debian.org>
+ * Several code clean-ups, should not change anything....
+
+2004-12-30 Bernhard R. Link <brlink@debian.org>
+ * Tidy up (introduce bool_t and replace dpkgversion_isNewer)
+
+ * add a magic rule minus ("-") to mark all packages to be
+ deleted.
+
+ * add a checkupdate command to show what would be done.
+
+2004-12-24 Bernhard R. Link <brlink@debian.org>
+ * Fixed a boolean inversion in the check if | is allowed
+ in formulas.
+
+ * added FilterFormula to docs/reprepro.1
+
+2004-12-19 Bernhard R. Link <brlink@debian.org>
+
+ * change parsing of conf/distributions, the fields only
+ copied to Release files can be omitted now. Additional
+ it warns if required fields are missing intead of
+ silently ignoring this block...
+
+2004-12-18 Bernhard R. Link <brlink@debian.org>
+
+ * remove now tells which packages were removed (with -v)
+ and which could not be deleted. Indicies will only
+ be exported when something was deleted.
+
+2004-12-18 Bernhard R. Link <brlink@debian.org>
+
+ * Modify remove to allow -T to specify the
+ type (deb,dsc,udeb) to delete from.
diff --git a/HACKING b/HACKING
new file mode 100644
index 0000000..c9afe17
--- /dev/null
+++ b/HACKING
@@ -0,0 +1,45 @@
+A general tour through the reprepro source tree:
+
+See also the "naming conventions" section in README.
+
+Most code returns a "retvalue". That is just a int, but a int with semantics.
+Errors are negative number (RET_WAS_ERROR is true), RET_OK means it returned
+successfully. RET_NOTHING can mean nothing has happened or nothing to do, or
+outgoing arguments were not initialized or something like that.
+
+Errors are to be checked always and immediately.
+
+Most of the code is written in some POOP (Pseudo Object Orientated
+Programming), i.e. for a struct foo there is a foo_something allocating and
+initiating it, several foo_something having a struct foo as first argument
+(think methods) and some destructor (usually foo_free if it also deallocates it
+and foo_done if you have to do the deallocation).
+
+A special case are binary and source packages. Those are (except in some
+special cases when preparing to add packages) not found as some struct but
+instead in serialized form as "char *" as 'controlchunk' (older code often
+calls it 'chunk', newer 'control'). The serialisation format is the same as
+the part of the package in the generated Packages or Sources file.
+
+Extracing data from this format is usually done via routines in chunk.c.
+For source and binary packages those chunk_* routines are usually called from
+functions in sources.c or binaries.c, which are usually called via function
+pointers in struct target (think of the function pointers in struct target as
+some externalized virtual method table for packages).
+
+Implementation (including the layout of structs) is hidden in the specific
+module (i.e. foo.c and not foo.h), unless doing so would require writing glue
+code or significant getter/setter code or error messages would not have enough
+information to be helpful.
+
+STYLE GUIDE
+-----------
+Some coding style guidelines I try to adhere to:
+
+Function prototypes in a single line.
+No other line longer than 80 characters.
+Indentation by tabs (which count as 8 spaces for the 80 characters rule).
+No appreciations unless there is really no other way.
+One or two letter names only for core variables.
+Spaces before and after binary operators ('=',...) and after ',' and
+before the '(' of flow controls (if, for, while, assert).
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..c2dd930
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,192 @@
+Build-Dependencies:
+ libdb3, libdb4.x or libdb5.x
+ libz
+Optional Dependencies:
+ libgpgme >= 0.4.1 (In Debian libgpgme11-dev, NOT libgpgme-dev)
+ libbz2
+ libarchive
+When Building from git:
+ autoconf2.50 (autoconf 2.13 will not work)
+
+Basic Installation
+==================
+
+ These are generic installation instructions.
+
+ The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation. It uses
+those values to create a `Makefile' in each directory of the package.
+It may also create one or more `.h' files containing system-dependent
+definitions. Finally, it creates a shell script `config.status' that
+you can run in the future to recreate the current configuration, a file
+`config.cache' that saves the results of its tests to speed up
+reconfiguring, and a file `config.log' containing compiler output
+(useful mainly for debugging `configure').
+
+ If you need to do unusual things to compile the package, please try
+to figure out how `configure' could check whether to do them, and mail
+diffs or instructions to the address given in the `README' so they can
+be considered for the next release. If at some point `config.cache'
+contains results you don't want to keep, you may remove or edit it.
+
+ The file `configure.in' is used to create `configure' by a program
+called `autoconf'. You only need `configure.in' if you want to change
+it or regenerate `configure' using a newer version of `autoconf'.
+
+The simplest way to compile this package is:
+
+ 1. `cd' to the directory containing the package's source code and type
+ `./configure' to configure the package for your system. If you're
+ using `csh' on an old version of System V, you might need to type
+ `sh ./configure' instead to prevent `csh' from trying to execute
+ `configure' itself.
+
+ Running `configure' takes awhile. While running, it prints some
+ messages telling which features it is checking for.
+
+ 2. Type `make' to compile the package.
+
+ 3. Optionally, type `make check' to run any self-tests that come with
+ the package.
+
+ 4. Type `make install' to install the programs and any data files and
+ documentation.
+
+ 5. You can remove the program binaries and object files from the
+ source code directory by typing `make clean'. To also remove the
+ files that `configure' created (so you can compile the package for
+ a different kind of computer), type `make distclean'. There is
+ also a `make maintainer-clean' target, but that is intended mainly
+ for the package's developers. If you use it, you may have to get
+ all sorts of other programs in order to regenerate files that came
+ with the distribution.
+
+Compilers and Options
+=====================
+
+ Some systems require unusual options for compilation or linking that
+the `configure' script does not know about. You can give `configure'
+initial values for variables by setting them in the environment. Using
+a Bourne-compatible shell, you can do that on the command line like
+this:
+ CC=c89 CFLAGS=-O2 LIBS=-lposix ./configure
+
+Or on systems that have the `env' program, you can do it like this:
+ env CPPFLAGS=-I/usr/local/include LDFLAGS=-s ./configure
+
+Compiling For Multiple Architectures
+====================================
+
+ You can compile the package for more than one kind of computer at the
+same time, by placing the object files for each architecture in their
+own directory. To do this, you must use a version of `make' that
+supports the `VPATH' variable, such as GNU `make'. `cd' to the
+directory where you want the object files and executables to go and run
+the `configure' script. `configure' automatically checks for the
+source code in the directory that `configure' is in and in `..'.
+
+ If you have to use a `make' that does not supports the `VPATH'
+variable, you have to compile the package for one architecture at a time
+in the source code directory. After you have installed the package for
+one architecture, use `make distclean' before reconfiguring for another
+architecture.
+
+Installation Names
+==================
+
+ By default, `make install' will install the package's files in
+`/usr/local/bin', `/usr/local/man', etc. You can specify an
+installation prefix other than `/usr/local' by giving `configure' the
+option `--prefix=PATH'.
+
+ You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files. If you
+give `configure' the option `--exec-prefix=PATH', the package will use
+PATH as the prefix for installing programs and libraries.
+Documentation and other data files will still use the regular prefix.
+
+ In addition, if you use an unusual directory layout you can give
+options like `--bindir=PATH' to specify different values for particular
+kinds of files. Run `configure --help' for a list of the directories
+you can set and what kinds of files go in them.
+
+ If the package supports it, you can cause programs to be installed
+with an extra prefix or suffix on their names by giving `configure' the
+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
+
+Optional Features
+=================
+
+ Some packages pay attention to `--enable-FEATURE' options to
+`configure', where FEATURE indicates an optional part of the package.
+They may also pay attention to `--with-PACKAGE' options, where PACKAGE
+is something like `gnu-as' or `x' (for the X Window System). The
+`README' should mention any `--enable-' and `--with-' options that the
+package recognizes.
+
+ For packages that use the X Window System, `configure' can usually
+find the X include and library files automatically, but if it doesn't,
+you can use the `configure' options `--x-includes=DIR' and
+`--x-libraries=DIR' to specify their locations.
+
+Specifying the System Type
+==========================
+
+ There may be some features `configure' can not figure out
+automatically, but needs to determine by the type of host the package
+will run on. Usually `configure' can figure that out, but if it prints
+a message saying it can not guess the host type, give it the
+`--host=TYPE' option. TYPE can either be a short name for the system
+type, such as `sun4', or a canonical name with three fields:
+ CPU-COMPANY-SYSTEM
+
+See the file `config.sub' for the possible values of each field. If
+`config.sub' isn't included in this package, then this package doesn't
+need to know the host type.
+
+ If you are building compiler tools for cross-compiling, you can also
+use the `--target=TYPE' option to select the type of system they will
+produce code for and the `--build=TYPE' option to select the type of
+system on which you are compiling the package.
+
+Sharing Defaults
+================
+
+ If you want to set default values for `configure' scripts to share,
+you can create a site shell script called `config.site' that gives
+default values for variables like `CC', `cache_file', and `prefix'.
+`configure' looks for `PREFIX/share/config.site' if it exists, then
+`PREFIX/etc/config.site' if it exists. Or, you can set the
+`CONFIG_SITE' environment variable to the location of the site script.
+A warning: not all `configure' scripts look for a site script.
+
+Operation Controls
+==================
+
+ `configure' recognizes the following options to control how it
+operates.
+
+`--cache-file=FILE'
+ Use and save the results of the tests in FILE instead of
+ `./config.cache'. Set FILE to `/dev/null' to disable caching, for
+ debugging `configure'.
+
+`--help'
+ Print a summary of the options to `configure', and exit.
+
+`--quiet'
+`--silent'
+`-q'
+ Do not print messages saying which checks are being made. To
+ suppress all normal output, redirect it to `/dev/null' (any error
+ messages will still be shown).
+
+`--srcdir=DIR'
+ Look for the package's source code in directory DIR. Usually
+ `configure' can determine that directory automatically.
+
+`--version'
+ Print the version of Autoconf used to generate the `configure'
+ script, and exit.
+
+`configure' also accepts some other, not widely useful, options.
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..5069a5f
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,47 @@
+SUBDIRS = docs tests
+
+EXTRA_DIST = autogen.sh
+
+bin_PROGRAMS = reprepro changestool rredtool
+
+if HAVE_LIBARCHIVE
+ARCHIVE_USED = ar.c debfile.c
+ARCHIVE_CONTENTS = debfilecontents.c
+ARCHIVE_UNUSED = extractcontrol.c
+else
+ARCHIVE_USED = extractcontrol.c
+ARCHIVE_CONTENTS =
+ARCHIVE_UNUSED = ar.c debfile.c debfilecontents.c
+endif
+
+AM_CPPFLAGS = $(ARCHIVECPP) $(DBCPPFLAGS)
+reprepro_LDADD = $(ARCHIVELIBS) $(DBLIBS)
+changestool_LDADD = $(ARCHIVELIBS)
+
+reprepro_SOURCES = outhook.c descriptions.c sizes.c sourcecheck.c byhandhook.c archallflood.c needbuild.c globmatch.c printlistformat.c diffindex.c rredpatch.c pool.c atoms.c uncompression.c remoterepository.c indexfile.c copypackages.c sourceextraction.c checksums.c readtextfile.c filecntl.c sha1.c sha256.c sha512.c configparser.c database.c freespace.c hooks.c log.c changes.c incoming.c uploaderslist.c guesscomponent.c files.c md5.c dirs.c chunks.c reference.c binaries.c sources.c checks.c names.c dpkgversions.c release.c mprintf.c updates.c strlist.c signature_check.c signedfile.c signature.c distribution.c checkindeb.c checkindsc.c checkin.c upgradelist.c target.c aptmethod.c downloadcache.c main.c override.c terms.c termdecide.c ignore.c filterlist.c exports.c tracking.c optionsfile.c donefile.c pull.c contents.c filelist.c $(ARCHIVE_USED) $(ARCHIVE_CONTENTS)
+EXTRA_reprepro_SOURCE = $(ARCHIVE_UNUSED)
+
+changestool_SOURCES = uncompression.c sourceextraction.c readtextfile.c filecntl.c tool.c chunkedit.c strlist.c checksums.c sha1.c sha256.c sha512.c md5.c mprintf.c chunks.c signature.c dirs.c names.c $(ARCHIVE_USED)
+
+rredtool_SOURCES = rredtool.c rredpatch.c mprintf.c filecntl.c sha1.c
+
+noinst_HEADERS = outhook.h descriptions.h sizes.h sourcecheck.h byhandhook.h archallflood.h needbuild.h globmatch.h printlistformat.h pool.h atoms.h uncompression.h remoterepository.h copypackages.h sourceextraction.h checksums.h readtextfile.h filecntl.h sha1.h sha256.h sha512.h configparser.h database_p.h database.h freespace.h hooks.h log.h changes.h incoming.h guesscomponent.h md5.h dirs.h files.h chunks.h reference.h binaries.h sources.h checks.h names.h release.h error.h mprintf.h updates.h strlist.h signature.h signature_p.h distribution.h debfile.h checkindeb.h checkindsc.h upgradelist.h target.h aptmethod.h downloadcache.h override.h terms.h termdecide.h ignore.h filterlist.h dpkgversions.h checkin.h exports.h globals.h tracking.h trackingt.h optionsfile.h donefile.h pull.h ar.h filelist.h contents.h chunkedit.h uploaderslist.h indexfile.h rredpatch.h diffindex.h package.h
+
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in $(srcdir)/configure $(srcdir)/stamp-h.in $(srcdir)/aclocal.m4 $(srcdir)/config.h.in
+
+clean-local:
+ -rm -rf autom4te.cache $(srcdir)/autom4te.cache
+
+maintainer-clean-local:
+ -rm -rf $(srcdir)/ac
+
+# Some things for my private laziness
+strictbooleancheck:
+ /home/brl/gcc/b/gcc/cc1 -DHAVE_CONFIG_H -I/home/brl/gcc/b/gcc/include -I/usr/include -I. -Wall -DAVOID_CHECKPROBLEMS=1 -g -W -O2 *.c
+
+SPLINT=splint
+SPLITFLAGSFORVIM= -linelen 10000 -locindentspaces 0
+SPLINTFLAGS= +posixlib -booltype bool -numabstractcast -fixedformalarray -enumint +enumindex +charint $(SPLITFLAGSFORVIM) $(EXTRASPLINTFLAGS)
+
+splint:
+ $(SPLINT) -DSPLINT=1 $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) -D_GNU_SOURCE=1 $(SPLINTFLAGS) $(foreach file,$(reprepro_SOURCES),$(srcdir)/$(file))
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..ba35da4
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,742 @@
+Updates between 5.4.3 and 5.4.4:
+- Revert "uncompress: prevent reprepro from hanging on unzstd"
+- Wait for poll event
+
+Updates between 5.4.2 and 5.4.3:
+- uncompress: prevent block on unzstd (https://bugs.debian.org/1056380)
+
+Updates between 5.4.1 and 5.4.2:
+- Add SHA512 support (http://bugs.debian.org/855975)
+- uncompress: close the pipe after the child exits
+ (https://bugs.launchpad.net/ubuntu/+bug/2008508)
+
+Updates between 5.4.0 and 5.4.1:
+- Add --ignore=conflictingarchall (http://bugs.debian.org/697630)
+- Add example to notify processing of .changes files (https://bugs.debian.org/827816)
+- If data tar extraction fails try again as uncompressed (https://bugs.debian.org/863061)
+- Prevent duplicated keyid in signing error message (https://bugs.debian.org/1006766)
+
+Updates between 5.3.1 and 5.4.0:
+- Add shunit2 based tests (https://bugs.debian.org/857302)
+- Support multiple versions. (https://bugs.debian.org/570623)
+- Add the commands move, movesrc, movematched, movefilter
+- Add Limit and Archive option
+
+Updates between 5.3.0 and 5.3.1:
+- fix manpage to add the behaviour if reprepro is linked against liblzma
+- mark 'dumpcontents' command as deprecated
+- Add Zstd support
+
+Updates between 5.2.0 and 5.3.0:
+- no Binary field in a .changes file is no longer an error
+ but handled like an empty Binary field. (Needed to still
+ accept source only .changes files by dpkg > 1.19.3).
+
+Updates between 5.1.1 and 5.2.0:
+- mark .lz support as deprecated
+- add _listcodenames command
+- allow to set Signed-By header via conf/distributions
+- add support for .asc files in source packages
+- some documentation improvements
+- allow '+' character in method-URI
+
+Updates between 5.1.0 and 5.1.1:
+- improve error handling when extracting .deb file contents
+- fix many spelling mistakes
+
+Updates between 5.0.0 and 5.1.0:
+- .gz, .bz2 and .lzma built-in uncompressors now also support
+ concatenated files (as gunzip, bunzip2 and unxz).
+- added add unreferencesnapshot and removereference commands
+- fixed (Deb|Dsc)Indices parsing (did add a default Release even
+ if none was specified)
+- some documentation improvements
+
+Updates between 4.18.0 and 5.0.0:
+- some code refactoring (no new features or bugfixes, only chances to break stuff)
+- require new status 103 behaviour from apt methods
+ (i.e. http from squeeze and before no longer supported)
+- add support for .buildinfo files in .changes files
+
+Updates between 4.17.0 and 4.17.1:
+- fix bug with 'flood' if there are binaries belonging to different versions
+ of the same source package
+- fix output caused by 'warning' FilterList files.
+
+Updates between 4.16.1 and 4.17.0:
+- some manpage fixes
+- '#' in filterlists are comments
+- fix parsing of strangly formatted control files
+- add Exportoptions: noexport option in conf/distributions
+- add Permit: unlisted_binary option in conf/incoming
+- -dbgsym are not differently looked for in changes' Binary field
+
+Updates between 4.16.0 and 4.16.1:
+- fix segfault in verbose exporting of xzed indices
+
+Updates between 4.15.0 and 4.16.0:
+- add support for xz uncompression using liblzma instead of unxz
+- enable using liblzma by default (if found and not --without-liblzma is given)
+- all example scripts in python now use python3
+
+Updates between 4.14.1 and 4.15.0:
+- fixes to outsftphook example, new xz example, manpage improvements
+- support for native Packages.xz generation using liblzma (disabled by default)
+
+Updates between 4.13.1 and 4.14.0:
+- repairdescriptions also repairs udeb descriptions
+- automatically add long descriptions when updating
+ packages from a source that does not have them
+- ignore Extra-Source-Only source packages by default
+- some small bugfixes and improvements
+
+Updates between 4.13.0 and 4.13.1:
+- fix bug in restore
+- fix percomponent udeb Contents filenames
+- add support for sources listing architecture wildcards to build-needing
+
+Updates between 4.12.5 and 4.13.0:
+- new commands: deleteifunreferenced repairdescriptions lsbycomponent
+- add ${$basename}, ${$filekey} and ${$fullfilename} to --listformat
+- reject absurd large values in ValidFor header
+- add --endhook, --outhook
+- SignWith: can now also contain external scripts for signing
+- several small cleanups and fixes
+
+Updates between 4.12.4 and 4.12.5:
+- various documentation improvements
+- fix bitrot in non-libarchive code
+
+Updates between 4.12.3 and 4.12.4:
+- fix bug when only generating .bz2 indices
+- ignore diff comments about unterminated lines when parsing .diff files
+
+Updates between 4.12.2 and 4.12.3:
+- actually set REPREPRO_CONFIG_DIR in hooks as documented in manpage
+- support 103 redirect message from apt's http method.
+ (works best with apt >= 0.9.4)
+
+Updates between 4.12.1 and 4.12.2:
+- fix error with uploader files with more than 16 group members
+
+Updates between 4.12.0 and 4.12.1:
+- fix bash and zsh completion to work with config directories
+- add experimental -A, -C, -T support to the pull/update family of commands
+
+Updates between 4.11.0 and 4.12.0:
+- get InRelease from remote repositories (to disable use new GetInRelease: no)
+- always put Package field first in indices
+- support getting packages from remote repositories without md5sums.
+
+Updates between 4.10.0 and 4.11.0:
+- Contents files default location is now "percomponent compatsymlink".
+- unify handling of "unknown" section.
+
+Updates between 4.9.0 and 4.10.0:
+- allow "!include:" in conf/{distributions,updates,pulls,incoming}
+- conf/{distributions,updates,pulls,incoming} can be directories
+- add FilterList keyword 'supersede' to remove if upstream has newer pkgs
+- improve changelogs.example (CHANGELOGDIR empty stored directly in pool/)
+
+Updates between 4.8.2 and 4.9.0:
+- build-needing now allows one to look for things for 'all' and 'any'
+- improve error messages when parsing config files
+- uploader files now can 'include' other files.
+
+Updates between 4.8.1 and 4.8.2:
+- rredtool: produce .diff/Index files that reprepro can understand.
+- warn if uploader files contains key ids too long to handle
+- make .diff/Index parsing errors non-fatal
+
+Updates between 4.8.0 and 4.8.1:
+- fix NULL-dereference with broken Packages.diff/Index files
+
+Updates between 4.7.0 and 4.8.0:
+- add compatsymlink nocompatsymlink Contents: options
+ (and document that the default will change in the future)
+
+Updates between 4.6.1 and 4.7.0:
+- add 'redochecksums' command
+- add percomponent and allcomponents to Contents: flags
+
+Updates between 4.6.0 and 4.6.1:
+- fix message given when replacing a package with the same version
+- fix bug not deleting packages if none added in update
+
+Updates between 4.5.1 and 4.6.0:
+- add 'FilterSrcList' for update and pull
+- ignore leading comments in control files
+
+Updates between 4.5.0 and 4.5.1:
+- 'check' also checks if architectures match
+- buffix in 'sourcemissing', 'unusedsources' and 'reportcruft' without tracking
+- fix 'pull' copying packages with wrong architecture
+- compile with libdb5
+
+Updates between 4.4.0 and 4.5.0:
+- support reading of Release files without MD5Sum
+- add all missing Checksums-* when importing from remote repositories
+- add 'reportcruft' command
+
+Updates between 4.3.0 and 4.4.0:
+- SignWith allows multiple arguments to denote multiple keys to sign wtih
+- add removesrcs command (like removesrc can you can remove more at once)
+- uploaders files can have groups of uploaders and depend on the
+ distribution to upload to (to share uploaders file between distributions)
+- add 'sizes' command.
+
+Updates between 4.2.0 and 4.3.0:
+- add special "$Delete" override field to get rid of fields
+- add support for ButAtuomaticUpgrades
+- add 'unusedsources' and 'sourcemissing' commands
+- add support for lzip compressed files
+- bugfixes ($component overrides, compiling without libbz2, with gcc4.5)
+
+Updates between 4.1.1 and 4.2.0:
+- allow patterns in override files
+- apply override files when doing 'pull' and 'update'
+- add special '$Component' override field
+- create InRelease file additionally to Release.gpg
+
+Updates between 4.1.1 and 4.1.2:
+- fix parsing of .orig-*.tar.* lines in .changes files,
+ especially do not choke on _ characters.
+- add --onlysmalldeletes option to limit scope of update and pull
+
+Updates between 4.1.0 and 4.1.1:
+- fix calling --changes Log:-notifiers from processincoming
+- add '${$source}' and '${$sourceversion}' to --list-format
+
+Updates between 4.0.2 and 4.1.0:
+- rredtool can be used as index hook to maintain a .diff/Index file.
+- properly handle relative LogDir in conf/incoming
+- add ByHandHooks to conf/distributions (only used by processincoming yet)
+- fix extraction of exactly one of section or priority from a tar file.
+- new byhand statement for uploaders files and ByHandHook for configuration
+
+Updates between 4.0.1 and 4.0.2:
+- add support for xz de-compression
+- fix regression (since 3.8.0) breaking arch1>arch2 update rules.
+- some small warning output fixes in update code
+
+Updates between 4.0.0 and 4.0.1:
+- strip the last '/' from Method and Fallback in conf/updates to work
+ around problems in some apt methods. (to get old behaviour, use "//")
+- 'check' now warns if a file was missing but could be readded
+- much more permissive check for libdb. You are on your own now to check
+ what version to build against.
+
+Updates between 3.12.1 and 4.0.0:
+- disable old files.db handling (remove all support but
+ translatelegacyfilelists), remove --oldfilesdb options
+- remove --overridedir
+- bugfixes in documentation and bash/zsh completion
+
+Updates between 3.12.0 and 3.12.1:
+- fix problems with libgpgme 1.2.0
+
+Updates between 3.11.1 and 3.12.0:
+- warn if directories are relative to the currect directory but do not start
+ with './'
+- directories starting '+b/' '+o/' and '+c/' are relative to basedir, outdir
+ or confdir.
+- FakeComponentPrefix now no longer adds its arguments to components already
+ having it and shortens their dist directories to not duplicate that either.
+- -A, -C and -T can have multiple arguments now, separated with '|'.
+- new 'flood' action to align architecture all packages
+- new '--show-percent' option
+- warn if old legacy files.db is still used
+- add new translatelegacyfilelists command for easier migration.
+ (just a collectnewchecksums and deleting that file was enough, though)
+
+Updates between 3.11.0 and 3.11.1:
+- new changestool option --create-with-all-fields
+- new --morguedir option (or morguedir in conf/options, of course)
+- new $Version, $Source, $SourceVersion et al on formulas
+- bugfixes
+
+Updates between 3.10.0 and 3.11.0:
+- new --list-max and --list-skip options
+- new glob-matching in formulas
+ (e.g. "reprepro listfilter codename 'Package (% linux-image-*)'")
+- new listmatched, removematched, copymatched and restorematched
+- new build-needing command to list source packages likely to need
+ a build for a given architecture.
+- pull, predelete and update call retrack on
+ distributions with tracking enabled.
+
+Updates between 3.9.2 and 3.10.0:
+- fix bug of ListHook not used if in the From: rule of a rule.
+- add ListShelllHook
+- add _listdbidentifiers and _listconfidentifiers
+- add --list-format to change format of list and listfilter
++ rewrite Release.gpg verification code:
+- more hops needed to use expired or revoked keys
+- earlier check of keys. now all keys in VerifyRelease must be known to gpg
+- subkeys are accepted if the key-id is appended with '+'.
+* improve uploader lists:
+- subkeys are accepted if key-if is appended with '+'
+- new 'anybody' while 'unsigned' now means really unsigned
+- new conditions to look as sourcename, binary names and sections
+
+Updates between 3.9.1 and 3.9.2:
++ fix bug (caught by assertion if there is no old index file)
+ that inverts the logic of downloading .diff files when there is no
+ DownLoadListsAs line.
+
+Updates between 3.9.0 and 3.9.1:
++ fix error of misinterpreting newer libz return value
+ when extracting section from a .dsc.
+
+Updates between 3.8.2 and 3.9.0:
++ deprecate old (pre 3.3) file database format. Warn loudly
+ when the database is still using that format.
++ new features
+- support Sources/Package.diff downloading
+ (Use DownloadListsAs if you want to force .gz downloading instead)
+- support falling back to other compressions of index files when
+ not downloadable at first.
+- changestool can now also look in .lzma files for .dsc section/priority
+- delete .new files in dists/ on error unless --keeptemporaries
+- new 'warning' state for FilterList
+- set REPREPRO_FROM and REPREPRO_CAUSING_RULE in some log notifiers
++ bug fixes:
+- re-enable workaround for apt-methods having problem with existing
+ files which got lost in 3.8
+- fix bug not looking at DownloadListsAs in all cases
+- bugfix in misparsing some .diff files for section/priority retrieval
+- do not stop when incomplete downloads or other stray files are in the pool
+
+Updates between 3.8.1 and 3.8.2:
+- add ReadOnly option for conf/distributions
+- support byhand and raw-* files in include and processincoming
+- allow uploading log files with .changes files
+- new LogDir in conf/incoming to store changes and log files.
+
+Updates between 3.8.0 and 3.8.1:
+- make filtercopy work again
+- fix bug not allowing source packages from flat
+ repositories without Directory fields to be imported
+- add gnupghome option to make GNUPGHOME setable via conf/options
+
+Updates between 3.8.0~alpha and 3.8.0:
+- add support for generating Valid-Until fields in Release files
+
+Updates between 3.6.3 and 3.8.0~alpha:
++ different small improvements:
+- log notifiers can be limited to a specific command with --via
+- upgradeonly value for FilterList to only include a package if
+ an older one is already there.
+- new --keepunusednewfiles to keep files just added to the pool
+ but later in the same run decided to no longer be needed
+ (for example because a package was not added because of later
+ detected errors).
+- --keepunreferenced and actions implying this now print
+ the number of files that lost their last reference
+- new dumpupdate and dumppull actions that are like checkupdate and
+ checkpull put with output easier parseable
+- new ls action to list a package in all distributions
++ bugfixes
+- if FilterFormula excludes a package, FilterList can no longer put
+ a package on hold.
++ improved decompression support
+- support looking into lzma compressed .deb, .diff and .tar files.
+- support for external helpers for uncompression
+ (to speed up uncompression on multiple processors, also reprepro
+ can now be compiled without libbz2 and zlib if needed)
+- support for downloading and using bz2 and lzma index files in updates
++ major changes to index file retrieval on updates:
+- iteratedupdate action was removed
+- update-rules can inherit settings from others
+- ListHooks are now called once per usage
+ (mostly only makes a difference for flat upstream repositories)
+- --nolistsdownload no longer includes --noskipold and checks checksums
+ of the lists files.
+- format of lists/ directory contents changed
+ (I doubt anyone cares for the files in there, but if you
+ do, you have been informed hereby that it looks differently)
+- lists/ directory no longer auto-cleaned,
+ thus --(no)keepuneeded longer exists and
+ new action cleanlists to clean files no longer usable...
++ visible effects of internal refactorisations:
+- multiple checks for identifiers more strict now
+- some fields in conf/distributions need a specific order now
+ (Architectures and Components before things using the values
+ defined by those)
+
+Updates between 3.6.2 and 3.6.3:
+- fix sha256 generation of very large files, thanks to Max Bowsher
+- allow multiple export hooks at once
+- use libgpg-error directly (to avoid some warnings in dependency analysis)
+
+Updates between 3.6.1 and 3.6.2:
+- --nooldfilesdb is the default now, create new repositories
+ with --oldfilesdb if you do not want to destroy them by accidentially
+ running reprepro versions before 3.0.0 on them...
+- fix content reading of overlong .deb files
+- fix parsing of flat repositories without Directory in Sources
+- fix tracking database corruption in removesrc with outdated tracking data
+ [previously believed hard to trigger, but outdated tracking data suffices]
+- many improvements and less spelling errors in manpage
+
+Updates between 3.6.0 and 3.6.1:
+- fix reoverride
+- fix bz2 compression (newer libbz2 sometimes uses more return codes
+ than previous versions, triggering a bug in reprepro)
+
+Updates between 3.5.2 and 3.6.0:
+- add IgnoreHashes option
+- allow list to list all packages if not package name specified.
+- support retrieving packages from flat repositories
+- speed up updating by buffering zlib's reading of index files
+- remove iteratedupdate
+- multiple little but nasty bugs fixed
+
+Updates between 3.5.1 and 3.5.2:
+- fix bug in optionsfilename generation introduced in 3.5.1
+- add FakeComponentPrefix to cope with apt's problems with
+ / in distribution names.
+
+Updates between 3.5.0 and 3.5.1:
+- support upcoming version 3 format source packages
+ (priority and section extraction only for wig&pen and quilt format)
+- set environment variables REPREPRO_*_DIR when calling hooks.
+ (note that those are set to the last set values, so for example
+ REPREPRO_CONF_DIR will be the directory with 'distributions' in it,
+ not necessarily the one with 'options' in it that was parsed).
+- other minor bugfixes
+
+Updates between 3.4.2 and 3.5.0:
+- allow suite names as command line arguments
+ (when there is not codename of this name and only one distribution
+ has this suite name)
+- generate and check Sha256, too.
+- changestool puts Files: last in .changes files so etch's dupload
+ works.
+
+Updates between 3.4.1 and 3.4.2:
+now really fix the nasty bug with notifiers 3.4.1 should
+have fixed and be more verbose when rejecting packages because
+of problems with a key
+
+Updates between 3.4.0 and 3.4.1:
+bugfixes only (though of the ugly segfaults kind)
+
+Updates between 3.3.2 and 3.4.0:
++ bugfixes:
+- no longer mix up -S and -P command line arguments (introduced in 3.0.1)
+- some field overriding was erroneously case dependent.
+- many spelling corrections
++ improvements:
+- more support for Checksums-Sha1
+- add copysrc and copyfilter commands (improve copy w.r.t tracking)
+- add restore restoresrc restorefilter and _addpackage commands
+- warn about some impossible -A -T combinations.
+- set fake Suite: in snapshots to quiet apt's signature checks.
+- add REPREPRO_CAUSING_FILE environment variable in log notifiers.
+- update expected fields to new dpkg-dev
+- try to extract missing section and priority of .dsc files from
+ .diff.gz and .tar.gz.
+
+Updates between 3.3.1 and 3.3.2:
+- bugfix in includedeb and a little bit code cleanup
+
+Updates between 3.3.0 and 3.3.1:
+- multiple bugfixes
+
+Updates between 3.1.0 and 3.3.0:
+- add support for different checksums.
+ The new checksums.db which stores all the checksums, while
+ files.db still only stores md5sum and is the canonical information,
+ when it exists. This way repositories keep backward compatible.
+ A repository generated with --nooldfilesdb only has checksums.db
+ and will not work with reprepro version prior to 3.3.
+ New command collectnewchecksums to calculate checksums missing
+ in the database.
+
+Updates between 3.0.1 and 3.1.0:
+- add sha1 hashes to the generated Release files.
+ the changes semantics needed in the release.caches.db file for this
+ should be transient. This will only cause index files without
+ uncompressed variants to be regenerated once upon upgrade, but
+ switching back and forth between previous versions and this or
+ later versions will cause regenerating of unchanged files.
+- internal changes of reading of text files (.dsc/.changes/Release/
+ control from .deb). Should not make any difference with normal input,
+ and make the situation better with strange input.
+- source packages now can have .tar und .diff lzma compressed
+ (still missing is support for lzma compressed binary packages)
+
+Updates between 3.0.0 and 3.0.1:
+- the default for --export is now "changed", as the old default was
+ just too confusing most of the time.
+- translatefilelist know also can convert databases with old and new
+ style entries
+
+Updates between 2.2.4 and 3.0.0:
+- new config file parser:
+ * many error messages now with line numbers
+ * native support of comments (i.e. lines starting with # are now ignored,
+ instead of treated as ignored headers, # within lines is now comment, too)
+ * better support of tabs
+ * meaning of empty fields changed, empty now means nothing and not all.
+- always parse the whole distributions file first before doing anything else
+ (avoids actions started in the wrong base directory and helps to catch more
+ disambiguities, may lead to the need of a valid config file for some actions
+ not needing one, though).
+- check pull and update rules to not list any architectures or components that
+ will never be used, so typos won't go unnoticed.
+- obsolete --overridedir and searching files in overrides/ directory by default.
+ This places are still search, but so is the configuration directory now and
+ future version will stop accepting --overridedir and not search in that
+ directory.
+- added db/version file to document database format
+ (so future versions can warn about incompatibilities)
+- cleaned up tracking handling a bit:
+ * retrack no longer created tracking data for distributions without tracking
+ * retrack only recreates usage data, not all data
+ (so .changes files and old versions are no longer lost when run)
+ also references from tracking data are now refreshed by rereferences instead
+ * removealltracks now needs explicitly needs distribution names
+ * tidytracks now removes all tracking data from a distribution without tracking
+ * clearvanished removes tracking data from vanished distributions.
+- make update's ListHook relative to confdir (unless absolute)
+- added removesrc and removefilter
+- new format for contents.cache.db. Only needs half of the disk space and runtime
+ to generate Contents files, but you need to run translatefilelists to translate
+ the cached items (or delete your contents.cache.db and let reprepro reread
+ all your .deb files). Also format and meaning of the Contents-fields changed, a
+ rate no longer can be specified.
+
+Updates between 2.2.3 and 2.2.4:
+- [SECURITY] fix bug causing a Release.gpg with only
+ unknown signatures considered as properly signed.
+
+Updates between 2.2.2 and 2.2.3:
+- add support for binNMUs (i.e. .changes files having a Version: that is not
+ the source version).
+- add zsh auto-completions script
+
+Updates between 2.2.1 and 2.2.2:
+- processincoming can be limited to a single .changes file
+- fix to support apt-methods stating Send-Config: false
+- set GPG_TTY when stdin is a terminal to ease usage of pinentry-curses
+
+Updates between 2.2.0 and 2.2.1:
+- fix mixup of the name of the --spacecheck option
+- fix missing options in bash completions
+- fix segfault when including changes without notificators
+
+Updates between 2.1.0 and 2.2.0:
+- renamed cleartracks in removealltracks
+- new notifier type for accepted changes files
+- bugs fixed:
+ * not tidy tracking dependencies on package remove
+ * forgot to call some slow notifiers in processincoming
+- new --wait-for-lock option
+- check free space on update (new --spaceheck option to switch this off)
+- extended the changestool helper (add, adddsc, addrawfile, setdistribution)
+- processincoming changes:
+ * reports and error if a package is not included due to an already existing
+ newer version.
+ * allow ignoring of unused files and newer versions (Permit:)
+ * option when to delete rejected or faulty package (Cleanup:)
+- include command names incldued .changes files like processincoming does
+
+Updates between 2.0.0 and 2.1.0:
+- add --silent option
+- change some status output to stdout instead of stderr.
+- fix some uncessary exporting of index files
+- fix bug in term parsing (for FilterFormula and the like)
+- add Log: mechanism to log to file and execute external helpers
+- example-script to generate a packages.debian.org/changelogs like
+ hierachy with changelog and copyright files.
+
+Updates between 1.3.1 and 2.0.0:
+- add "adddeb" action to changestool
+- fix bug in manpage ("accept" should have been "allow" for uploaders)
+- new AlsoAcceptFor:-header for conf/distributions to allow more fine
+ controled which distributions to allow than just codename/suite
+ or everything (via --ignore=wrongdistribution)
+- fail cleanly when getting a .dsc without Format header
+- fix bug in non-libarchive filelist extraction on large lists
+- add processincoming command to scan an incoming directory and add
+ packages from there. (this needed some refactorisations of other
+ code, so beware)
+- add gensnapshot command
+
+Updates between 1.3.0 and 1.3.1:
+- bugfix in changestool updatechecksums
+
+Updates between 1.2.0 and 1.3.0:
+- now uses libgpgme11 instead of libgpgme6.
+- remove --onlyacceptsigned switch (soon to be be replaced by something
+ useable, hopefully)
+- only reject a package because of signatures if it only has bad signatures
+ and no good one. (Rejecting a package because of a missing key when
+ it would have processed without signature did not really make sense)
+- new --ignore=brokensignatures to also accept packages with broken signatures
+ without any valid signature.
+- Now looks at the Binary: and Version: fields of a .changes file.
+ Unless the new --ignore=wrongversion is specified, a dsc must
+ have the same version, and a .deb must have this source version
+ unless --ignore=wrongsourceversion is given. A .deb must also
+ contain a package listed in the Binary: header unless
+ --ignore=surprisingbinary is given. (A .dsc with an different name
+ or a .deb with an different Source than the Source-header if the
+ .changes file is still not ignoreable due to file naming issues)
+- FilterList in update and pull rules now has a space separated list
+ of filenames instead of only a single filename.
+- new Uploaders field in conf/distributions:
+ specifies what a .changes file has to be signed with to be allowed in
+- new helper program "changestool" to preprocess .changes files.
+
+Updates between 1.1.0 and 1.2.0:
+- improve message of missing files
+- checkin now support .tar.bz2, .diff.bz2 and .tar.bz2
+ (checkindsc did not care, binaries may contain tar.bz2 if reprepro
+ is compiled with libarchive and libbz2)
+- fix bug delaying full Contents- generation
+
+Updates between 1.0.2 and 1.1.0:
+- extended the (experimental) package tracking feature
+- cleartracks removes files losing their last reference (unless --keepunreferenced as usual)
+- fix bug of not generating a uncompressed Sources line in Release when no uncompressed
+ Sources file is generated.
+
+Updates between 1.0.1 and 1.0.2:
+- fix segfault in non-libarchive code introduced with 1.0.0
+
+Updates between 1.0.0 and 1.0.1:
+- add clearvanished command
+- cope with GNU ar style .deb files (when using libarchive)
+- cope with strange control.tar.gz files (when not using libarchive)
+
+Updates between 0.9.1 and 1.0.0:
+- reject some .changes earlier, delete added files
+ when checks after copying files to the pool failed.
+- handle some signals (TERM, ABRT, INT and QUIT) a bit
+ more gracefully
+- some little fixes in the documentation
+- add predelete action to delete packages that would be
+ deleted or replaced in an update
+- add new copy command to copy a single package from
+ one distribution to another.
+
+Updates between 0.9.0 and 0.9.1:
+- fix bug in post-export script handling.
+- fixed documentation in tiffany.example how to generate
+ .diff directories the new apt can read.
+
+Updates between 0.8.2 and 0.9.0:
+- added --export= option and harmonized exporting of
+ distributions. (Now every distribution processed
+ without errors is exported by default, with options
+ for always, never or only export it when changed)
+- added pull and checkpull actions.
+ Those are roughly equivalent to upgrade rules with
+ file:/path/to/basedir Method, but faster and a bit
+ more limited (files cannot change components)
+- fix segfault of checkupdate
+- fix including a changes file with source and restricting
+ to some binary distribution or to binary package type.
+- add support to use libarchive instead of calling ar and tar
+- added Contents file generation support
+- now supporting libdb-4.4, libdb-4.3 and libdb3
+
+Updates between 0.8.1 and 0.8.2:
+- mark process list files and only skip those not marked
+ as processed instead those not newly downloaded.
+- change the wording of some warnings, add some new
+- new WORKAROUND part in the manpage
+- add example bash_completion script
+
+Updates between 0.8 and 0.8.1:
+- some bugfixes (segfault, memmory leak, manpage typos)
+- enforcement of extensions of include{,dsc,deb,udeb} files
+ to .changes,.dsc,.deb,.udeb and new --ignore=extension to
+ circumvent it.
+- support generation of the NotAutomatic field.
+- added --ignore=missingfile to ignore files missing in
+ a .changes file, but lying around and requested by
+ a .dsc file.
+
+Updates between 0.7 and 0.8:
+- unless the new --keepdirectories option is given,
+ try to remove pool/ directories that got empty by
+ removing things from them. (To be exact, try to rmdir(2)
+ them every time, which will only work if they are empty).
+- Unless the new --noskipold is used, only targets with newly
+ downloaded index files are updated. (new = downloaded
+ by the instance of reprepro currently running)
+- reprepro now always puts the checksums of the uncompressed
+ index files into the Release file, even if it is not
+ written to disk. This fixes some problems with newer
+ versions of apt. (Take a look at DscIndices to get older
+ versions of reprepro to please them, too).
+- The export hooks (the programs specified as DebIndices,
+ UDebIndices and DscIndices) are now always called once
+ with the uncompressed names.
+- to compile reprepro with woody without a backported zlib
+ use the -DOLDZLIB switch.
+- reprepro now supports bzip2 output natively. (You can
+ still use the example if you want to call bzip2 yourself
+ instead of using the libbz2 library)
+- new db/release.cache.db file storing md5sums of written
+ index and Release files there. (This can cause Release
+ file give old md5sums when the files are not what it
+ expects, but unless you manually changed them that is
+ a good way to find errors, and manually changing if
+ fragile anyway, so better do not do it but ask me
+ if some feature is missing overrides cannot offer yet).
+
+Updates between 0.6 and 0.7:
+- new --ignore=missingfield,brokenold,brokenversioncmp,
+ unusedarch,surpisingarch
+- Fix segfault when update file is empty.
+ (Thanks to Gianluigi Tiesi for noticing this.)
+- improve manpage a little bit
+- many little tidy ups
+
+Updates between 0.5 and 0.6:
+- no longer set execute bit of generated Release.gpg files
+- use REPREPRO_BASE_DIR for default basedir, parse conf/options
+ for further default options. (and add --no options to disable
+ boolean options again, same for ignore)
+- new command createsymlinks (for symlinks like "stable"->"sarge")
+- parse FilterList default action correctly
+- putting .changes in a distribution not listed is now an error
+ without --ignore=wrongdistribution (and without
+ "ignore wrongdistributions" in conf/options)
+
+Updates between 0.4 and 0.5:
+- starts of source package tracking
+- add quick&dirty --ask-passphrase option
+- SignWith's argument is now used, use "yes" or "default" to get old behaviour
+- allow ~ in versions listed in .changes files
+
+Updates between 0.3 and 0.4:
+- minor bugfix: no longer readd existing packages, when after a delete rule
+ a old package was found first.
+- adopt short-howto to changes in keywords.
+- many tidy ups and little bugfixes
+- add Fallback option to specify another host to get mirrored files from
+- default basedir is now "." i.e. the current directory.
+
+Updates between 0.2 and 0.3:
+- Override: SourceOverride: replaced by (Deb|UDeb|Dsc)Override
+- new command reoverride to reapply override information.
+- sometimes be a bit more verbose
+- new experimental iteratedupdate command , which is a variant of
+ update but needs less memory.
+- to ignore Release signature failures two --force's are needed now.
+
+Updates between 0.1.1 and 0.2:
+- _md5sums command got removed. New command to dump
+ the contents of the files database is _listmd5sums
+- --basedir (alias -b) will no longer override prior
+ given values to --confdir, --listdir, ....
+- fix nasty overflow bug
+- write Release, Packages, and Sources files first
+ to .new variants and move then all at once.
+- new Options DebIndices DscIndices UDebIndices
diff --git a/README b/README
new file mode 100644
index 0000000..0d6aa0d
--- /dev/null
+++ b/README
@@ -0,0 +1,120 @@
+* What it is:
+
+ This project is a lightweight feature complete manager of a debian
+ package (i.e. binary .deb and source .dsc+.tar.gz+.diff.gz) repository.
+ Emphasis is put on having all packages in the pool/-directory,
+ maximal checking of all sources.
+ generation of signed Release file, Contents, ...
+ Libraries needed are libdb{3,4.?,5.?} and libz.
+ Libraries used if available are libgpgme, libbz2 and libarchive.
+
+* Current status:
+
+ The main features work without problems. Some special use cases
+ might not be very well tested.
+
+* Some naming conventions:
+ basename: the name of a file without any directory information.
+ filekey: the position relative to the mirrordir.
+ (as found as "Filename:" in Packages.gz)
+ full filename: the position relative to /
+
+ architecture: The term like "sparc","i386","mips",...
+ component: Things like "main" "non-free" "contrib" ...
+ (sometimes also called sections)
+ section: Things like "base" "interpreters" "oldlibs"
+ (sometimes also called subsections)
+ type: The kind of packages, currently supported:
+ "deb", "udeb" and "dsc".
+ target: The smallest unit packages are in. A target
+ is specified by the codename of the distribution
+ it is in, the architecture, component and type.
+ When architecture is "source" exactly when
+ the type is "dsc".
+ identifier: an internal string to specify a target,
+ it has the form "<codename>|<component>|source"
+ for type dsc, "<codename>|<component>|<architecture>"
+ for type deb and "u|<codename>|<component>|<architecture>"
+ for type udeb.
+
+ md5sum: The checksum of a file, being in the format
+ "<md5sum of file> <length of file>"
+
+
+* Differences to how other standard tools handle the situation:
+
+ - mirroring:
+ This makes no real mirror of the distribution, but
+ only of it contents. Thus the Index-files will
+ be different. (And thus no longer can be verified
+ by the official signatures). This means people using
+ this mirror have to trust you to not include anything
+ ugly, as they can only check your signature directly.
+ (Or in other words: not useful for mirroring things
+ to be used by strangers).
+ - location:
+ The directory layout under pool/ is only divided
+ by the component and the sourcename. Especially
+ woody and updates/woody will share the same space,
+ thus avoiding multiple instances of the same file.
+ (Can also cause trouble in the rare cases, when both
+ have a file of the same name with different md5sum.
+ Using -f can help here).
+ - 'byhand'-section
+ This is currently just implemented as alias for '-',
+ to make sure lack of implementation does not cause them
+ to land in a byhand-section...
+ - Override files:
+ Only the ExtraOverride style of apt-ftparchive(1) is
+ supported.
+ (i.e. "packagename Section section\npackagename Maintainer maintainer\n")
+ Note that other than apt-ftparchive case is most likely
+ to be significant. (Having the wrong case in might also
+ cause havoc in apt-ftparchive, as that changes the case of
+ the fieldname, which might confuse other programs...)
+
+* Things that might be interesting to know:
+
+ - guessing the component:
+ If inserting a binary or source package without naming
+ an component, this program has to guess of course.
+ This will done the following way: It will take the
+ first component with the name of the section, being
+ prefix to the section, being suffix to the section
+ or having the section as prefix or any.
+ Thus having specifiend the components:
+ "main non-free contrib non-US/main non-US/non-free non-US/contrib"
+ should map .e.g
+ "non-US" to "non-US/main" and "contrib/editors" to "contrib",
+ while having only "main non-free and contrib" as components should
+ map e.g. "non-US/contrib" to "contrib" and "non-US" to "main".
+
+ NOTE: Always specify main as the first component, if you want things
+ to end up there.
+ NOTE: unlike in dak, non-US and non-us are different things...
+
+* How to keep multiple versions
+
+ - The default behavior of this reprepro is kept to version 5.3.1's behavior.
+ - To keep multiple versions of the same package in the archive,
+ you have to set the "Limit" option to the desired maximum amount (or to 0 for unlimited).
+ - See the description in the man page for details.
+
+* Database layout changes for multiple versions
+
+ - The database layout changes in version 5.4.0. The difference is as following:
+
+ upstream
+ * packages.db maps "package name" to "control file" without duplicates
+ * no packagenames.db
+
+ multiple versions
+ * packages.db maps "package name|version" to "control file" without duplicates
+ * packagenames.db maps "package name" to "package name|version"
+ allowing duplicates and duplicates sorted by dpkg --compare-versions descending
+
+ - Automatic upgrade
+
+ The first time the database is opened by reprepro with multiple versions support,
+ the database will be upgraded from the upstream layout to the multiple versions layout.
+ *Warning*: There is no way back (but could be done with a simple Python script)!
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..6101edf
--- /dev/null
+++ b/TODO
@@ -0,0 +1,23 @@
+TODO:
+ think about a way to make removesrc work on all distributions ('*'?)
+ -> or make things like remove(src)/list/.. work with distribution globs...
+ write something for manual.html how to manually modify snapshots...
+ write more automated test-cases
+ (not even run in the testcase yet: reoverride, ... (probably many))
+ finish import from incoming dir, implement sending mails to uploader
+ extend FilterList et al to specify type/architecture/component
+ add switch to only include if source is present
+ action to redownload missing pool/ files from some update-rules (looking for md5sum)
+ Fields to exclude architectures and components in update rules,
+ (or alternatively allow ! in inclusion lists).
+
+half far goals:
+ rewrite error handling, caching error messages and handling Ctrl-C better.
+
+far goals:
+ check for unmet Dependencies
+ for unmet Build-dependencies.
+ write documentation and some examples.
+ record timestamp when packages are added.
+ option to keep apt-get'able source to each binary (needs mutliple source versions)
+ switch from libdb?.? to sane database
diff --git a/acinclude.m4 b/acinclude.m4
new file mode 100644
index 0000000..c14986f
--- /dev/null
+++ b/acinclude.m4
@@ -0,0 +1,47 @@
+dnl CHECK_ENUM and GET_DEFINE autoconf macros are
+dnl Copyright 2004,2006 Bernhard R. Link
+dnl and hereby in the public domain
+# Check for an enum, which seem to be forgotten in autoconf,
+# as this can neighter be checked with cpp, nor is it a symbol
+m4_define([CHECK_ENUM],
+[AS_VAR_PUSHDEF([check_Enum], [rr_cv_check_enum_$1])dnl
+AC_CACHE_CHECK([for $1 in $2], check_Enum,
+[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([AC_INCLUDES_DEFAULT([$5])
+@%:@include <$2>],
+[ if( $1 == 0 )
+ return 0;
+])],
+ [AS_VAR_SET(check_Enum, yes)],
+ [AS_VAR_SET(check_Enum, no)])])
+AS_IF([test AS_VAR_GET(check_Enum) = yes], [$3], [$4])[]dnl
+AS_VAR_POPDEF([check_Enum])dnl
+])dnl
+# extract the value of a #define from a header
+m4_define([GET_DEFINE],
+[AC_LANG_PREPROC_REQUIRE()dnl
+AS_VAR_PUSHDEF(get_Define, [rr_cv_get_define_$1])dnl
+AC_CACHE_CHECK([for $1], get_Define,
+[dnl
+ m4_ifvaln([$2],[dnl
+ echo "#include <$2>" > conftest.$ac_ext
+ echo "$1" >> conftest.$ac_ext
+ ],[dnl
+ echo "$1" > conftest.$ac_ext
+ ])
+ if _AC_EVAL_STDERR([$ac_cpp conftest.$ac_ext >conftest.out]) >/dev/null; then
+ if test -s conftest.err; then
+ AS_VAR_SET(get_Define, $1)
+ else
+ AS_VAR_SET(get_Define, "$(tail -1 conftest.out)")
+ fi
+ else
+ AS_VAR_SET(get_Define, $1)
+ fi
+ rm -f conftest.err conftest.out conftest.$ac_ext
+])
+TMP_GET_DEFINE=AS_VAR_GET(get_Define)
+TMP_GET_DEFINE=${TMP_GET_DEFINE% }
+TMP_GET_DEFINE=${TMP_GET_DEFINE% }
+AS_IF([test "$TMP_GET_DEFINE" = $1], [$3], [$1="$TMP_GET_DEFINE"])[]dnl
+AS_VAR_POPDEF([get_Define])dnl
+])dnl GET_DEFINE
diff --git a/aptmethod.c b/aptmethod.c
new file mode 100644
index 0000000..e50449b
--- /dev/null
+++ b/aptmethod.c
@@ -0,0 +1,1216 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2004,2005,2007,2008,2009,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <sys/select.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <ctype.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "dirs.h"
+#include "chunks.h"
+#include "checksums.h"
+#include "files.h"
+#include "uncompression.h"
+#include "aptmethod.h"
+#include "filecntl.h"
+#include "hooks.h"
+
+struct tobedone {
+ /*@null@*/
+ struct tobedone *next;
+ /* must be saved to know where is should be moved to: */
+ /*@notnull@*/
+ char *uri;
+ /*@notnull@*/
+ char *filename;
+ /* in case of redirection, store the originally requested uri: */
+ /*@null@*/
+ char *original_uri;
+ /* callback and its data: */
+ queue_callback *callback;
+ /*@null@*/void *privdata1, *privdata2;
+ /* there is no fallback or that was already used */
+ bool lasttry;
+ /* how often this was redirected */
+ unsigned int redirect_count;
+};
+
+struct aptmethod {
+ /*@only@*/ /*@null@*/
+ struct aptmethod *next;
+ char *name;
+ char *baseuri;
+ /*@null@*/char *fallbackbaseuri;
+ /*@null@*/char *config;
+ int mstdin, mstdout;
+ pid_t child;
+
+ enum {
+ ams_notstarted=0,
+ ams_waitforcapabilities,
+ ams_ok,
+ ams_failed
+ } status;
+
+ /*@null@*/struct tobedone *tobedone;
+ /*@null@*//*@dependent@*/struct tobedone *lasttobedone;
+ /*@null@*//*@dependent@*/const struct tobedone *nexttosend;
+ /* what is currently read: */
+ /*@null@*/char *inputbuffer;
+ size_t input_size, alreadyread;
+ /* What is currently written: */
+ /*@null@*/char *command;
+ size_t alreadywritten, output_length;
+};
+
+struct aptmethodrun {
+ struct aptmethod *methods;
+};
+
+static void todo_free(/*@only@*/ struct tobedone *todo) {
+ free(todo->filename);
+ free(todo->original_uri);
+ free(todo->uri);
+ free(todo);
+}
+
+static void free_todolist(/*@only@*/ struct tobedone *todo) {
+
+ while (todo != NULL) {
+ struct tobedone *h = todo->next;
+
+ todo_free(todo);
+ todo = h;
+ }
+}
+
+static void aptmethod_free(/*@only@*/struct aptmethod *method) {
+ if (method == NULL)
+ return;
+ free(method->name);
+ free(method->baseuri);
+ free(method->config);
+ free(method->fallbackbaseuri);
+ free(method->inputbuffer);
+ free(method->command);
+
+ free_todolist(method->tobedone);
+
+ free(method);
+}
+
+retvalue aptmethod_shutdown(struct aptmethodrun *run) {
+ retvalue result = RET_OK, r;
+ struct aptmethod *method, *lastmethod, **method_ptr;
+
+ /* first get rid of everything not running: */
+ method_ptr = &run->methods;
+ while (*method_ptr != NULL) {
+
+ if ((*method_ptr)->child > 0) {
+ if (verbose > 10)
+ fprintf(stderr,
+"Still waiting for %d\n", (int)(*method_ptr)->child);
+ method_ptr = &(*method_ptr)->next;
+ continue;
+ } else {
+ /*@only@*/ struct aptmethod *h;
+ h = (*method_ptr);
+ *method_ptr = h->next;
+ h->next = NULL;
+ aptmethod_free(h);
+ }
+ }
+
+ /* finally get rid of all the processes: */
+ for (method = run->methods ; method != NULL ; method = method->next) {
+ if (method->mstdin >= 0) {
+ (void)close(method->mstdin);
+ if (verbose > 30)
+ fprintf(stderr, "Closing stdin of %d\n",
+ (int)method->child);
+ }
+ method->mstdin = -1;
+ if (method->mstdout >= 0) {
+ (void)close(method->mstdout);
+ if (verbose > 30)
+ fprintf(stderr, "Closing stdout of %d\n",
+ (int)method->child);
+ }
+ method->mstdout = -1;
+ }
+ while (run->methods != NULL || uncompress_running()) {
+ pid_t pid;int status;
+
+ pid = wait(&status);
+ lastmethod = NULL; method = run->methods;
+ while (method != NULL) {
+ if (method->child == pid) {
+ struct aptmethod *next = method->next;
+
+ if (lastmethod != NULL) {
+ lastmethod->next = next;
+ } else
+ run->methods = next;
+
+ aptmethod_free(method);
+ pid = -1;
+ break;
+ } else {
+ lastmethod = method;
+ method = method->next;
+ }
+ }
+ if (pid > 0) {
+ r = uncompress_checkpid(pid, status);
+ RET_UPDATE(result, r);
+ }
+ }
+ free(run);
+ return result;
+}
+
+/******************Initialize the data structures***********************/
+
+retvalue aptmethod_initialize_run(struct aptmethodrun **run) {
+ struct aptmethodrun *r;
+
+ r = zNEW(struct aptmethodrun);
+ if (FAILEDTOALLOC(r))
+ return RET_ERROR_OOM;
+ *run = r;
+ return RET_OK;
+}
+
+retvalue aptmethod_newmethod(struct aptmethodrun *run, const char *uri, const char *fallbackuri, const struct strlist *config, struct aptmethod **m) {
+ struct aptmethod *method;
+ const char *p;
+
+ method = zNEW(struct aptmethod);
+ if (FAILEDTOALLOC(method))
+ return RET_ERROR_OOM;
+ method->mstdin = -1;
+ method->mstdout = -1;
+ method->child = -1;
+ method->status = ams_notstarted;
+ p = uri;
+ while (*p != '\0' && (*p == '_' || *p == '-' || *p == '+' ||
+ (*p>='a' && *p<='z') || (*p>='A' && *p<='Z') ||
+ (*p>='0' && *p<='9'))) {
+ p++;
+ }
+ if (*p == '\0') {
+ fprintf(stderr, "No colon found in method-URI '%s'!\n", uri);
+ free(method);
+ return RET_ERROR;
+ }
+ if (*p != ':') {
+ fprintf(stderr,
+"Unexpected character '%c' in method-URI '%s'!\n", *p, uri);
+ free(method);
+ return RET_ERROR;
+ }
+ if (p == uri) {
+ fprintf(stderr,
+"Zero-length name in method-URI '%s'!\n", uri);
+ free(method);
+ return RET_ERROR;
+ }
+
+ method->name = strndup(uri, p-uri);
+ if (FAILEDTOALLOC(method->name)) {
+ free(method);
+ return RET_ERROR_OOM;
+ }
+ method->baseuri = strdup(uri);
+ if (FAILEDTOALLOC(method->baseuri)) {
+ free(method->name);
+ free(method);
+ return RET_ERROR_OOM;
+ }
+ if (fallbackuri == NULL)
+ method->fallbackbaseuri = NULL;
+ else {
+ method->fallbackbaseuri = strdup(fallbackuri);
+ if (FAILEDTOALLOC(method->fallbackbaseuri)) {
+ free(method->baseuri);
+ free(method->name);
+ free(method);
+ return RET_ERROR_OOM;
+ }
+ }
+#define CONF601 "601 Configuration"
+#define CONFITEM "\nConfig-Item: "
+ if (config->count == 0)
+ method->config = strdup(CONF601 CONFITEM "Dir=/" "\n\n");
+ else
+ method->config = strlist_concat(config,
+ CONF601 CONFITEM, CONFITEM, "\n\n");
+ if (FAILEDTOALLOC(method->config)) {
+ free(method->fallbackbaseuri);
+ free(method->baseuri);
+ free(method->name);
+ free(method);
+ return RET_ERROR_OOM;
+ }
+ method->next = run->methods;
+ run->methods = method;
+ *m = method;
+ return RET_OK;
+}
+
+/**************************Fire up a method*****************************/
+
+inline static retvalue aptmethod_startup(struct aptmethod *method) {
+ pid_t f;
+ int mstdin[2];
+ int mstdout[2];
+ int r;
+
+ /* When there is nothing to get, there is no reason to startup
+ * the method. */
+ if (method->tobedone == NULL) {
+ return RET_NOTHING;
+ }
+
+ /* when we are already running, we are already ready...*/
+ if (method->child > 0) {
+ return RET_OK;
+ }
+
+ method->status = ams_waitforcapabilities;
+
+ r = pipe(mstdin);
+ if (r < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating pipe: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ r = pipe(mstdout);
+ if (r < 0) {
+ int e = errno;
+ (void)close(mstdin[0]); (void)close(mstdin[1]);
+ fprintf(stderr, "Error %d in pipe syscall: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+
+ if (interrupted()) {
+ (void)close(mstdin[0]);(void)close(mstdin[1]);
+ (void)close(mstdout[0]);(void)close(mstdout[1]);
+ return RET_ERROR_INTERRUPTED;
+ }
+ f = fork();
+ if (f < 0) {
+ int e = errno;
+ (void)close(mstdin[0]); (void)close(mstdin[1]);
+ (void)close(mstdout[0]); (void)close(mstdout[1]);
+ fprintf(stderr, "Error %d forking: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (f == 0) {
+ char *methodname;
+ int e;
+ /* child: */
+ (void)close(mstdin[1]);
+ (void)close(mstdout[0]);
+ if (dup2(mstdin[0], 0) < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d while setting stdin: %s\n",
+ e, strerror(e));
+ exit(255);
+ }
+ if (dup2(mstdout[1], 1) < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d while setting stdout: %s\n",
+ e, strerror(e));
+ exit(255);
+ }
+ closefrom(3);
+
+ methodname = calc_dirconcat(global.methoddir, method->name);
+ if (FAILEDTOALLOC(methodname))
+ exit(255);
+
+ /* not really useful here, unless someone write reprepro
+ * specific modules (which I hope no one will) */
+ sethookenvironment(NULL, NULL, NULL, NULL);
+ /* actually call the method without any arguments: */
+ (void)execl(methodname, methodname, ENDOFARGUMENTS);
+
+ e = errno;
+ fprintf(stderr, "Error %d while executing '%s': %s\n",
+ e, methodname, strerror(e));
+ exit(255);
+ }
+ /* the main program continues... */
+ method->child = f;
+ if (verbose > 10)
+ fprintf(stderr,
+"Method '%s' started as %d\n", method->baseuri, (int)f);
+ (void)close(mstdin[0]);
+ (void)close(mstdout[1]);
+ markcloseonexec(mstdin[1]);
+ markcloseonexec(mstdout[0]);
+ method->mstdin = mstdin[1];
+ method->mstdout = mstdout[0];
+ method->inputbuffer = NULL;
+ method->input_size = 0;
+ method->alreadyread = 0;
+ method->command = NULL;
+ method->output_length = 0;
+ method->alreadywritten = 0;
+ return RET_OK;
+}
+
+/**************************how to add files*****************************/
+
+static inline void enqueue(struct aptmethod *method, /*@only@*/struct tobedone *todo) {
+ todo->next = NULL;
+ if (method->lasttobedone == NULL)
+ method->nexttosend = method->lasttobedone = method->tobedone = todo;
+ else {
+ method->lasttobedone->next = todo;
+ method->lasttobedone = todo;
+ if (method->nexttosend == NULL)
+ method->nexttosend = todo;
+ }
+}
+
+static retvalue enqueuenew(struct aptmethod *method, /*@only@*/char *uri, /*@only@*/char *destfile, queue_callback *callback, void *privdata1, void *privdata2) {
+ struct tobedone *todo;
+
+ if (FAILEDTOALLOC(destfile)) {
+ free(uri);
+ return RET_ERROR_OOM;
+ }
+ if (FAILEDTOALLOC(uri)) {
+ free(destfile);
+ return RET_ERROR_OOM;
+ }
+
+ todo = NEW(struct tobedone);
+ if (FAILEDTOALLOC(todo)) {
+ free(uri); free(destfile);
+ return RET_ERROR_OOM;
+ }
+
+ todo->next = NULL;
+ todo->uri = uri;
+ todo->filename = destfile;
+ todo->original_uri = NULL;
+ todo->callback = callback;
+ todo->privdata1 = privdata1;
+ todo->privdata2 = privdata2;
+ todo->lasttry = method->fallbackbaseuri == NULL;
+ todo->redirect_count = 0;
+ enqueue(method, todo);
+ return RET_OK;
+}
+
+retvalue aptmethod_enqueue(struct aptmethod *method, const char *origfile, /*@only@*/char *destfile, queue_callback *callback, void *privdata1, void *privdata2) {
+ return enqueuenew(method,
+ calc_dirconcat(method->baseuri, origfile),
+ destfile, callback, privdata1, privdata2);
+}
+
+retvalue aptmethod_enqueueindex(struct aptmethod *method, const char *suite, const char *origfile, const char *suffix, const char *destfile, const char *downloadsuffix, queue_callback *callback, void *privdata1, void *privdata2) {
+ return enqueuenew(method,
+ mprintf("%s/%s/%s%s",
+ method->baseuri, suite, origfile, suffix),
+ mprintf("%s%s", destfile, downloadsuffix),
+ callback, privdata1, privdata2);
+}
+
+/*****************what to do with received files************************/
+
+static retvalue requeue_or_fail(struct aptmethod *method, /*@only@*/struct tobedone *todo) {
+ retvalue r;
+
+ if (todo->lasttry) {
+ if (todo->callback == NULL)
+ r = RET_ERROR;
+ else
+ r = todo->callback(qa_error,
+ todo->privdata1, todo->privdata2,
+ todo->uri, NULL, todo->filename,
+ NULL, method->name);
+ todo_free(todo);
+ return r;
+ } else {
+ size_t l, old_len, new_len;
+ char *s;
+
+ assert (method->fallbackbaseuri != NULL);
+
+ old_len = strlen(method->baseuri);
+ new_len = strlen(method->fallbackbaseuri);
+ l = strlen(todo->uri);
+ s = malloc(l+new_len+1-old_len);
+ if (FAILEDTOALLOC(s)) {
+ todo_free(todo);
+ return RET_ERROR_OOM;
+ }
+ memcpy(s, method->fallbackbaseuri, new_len);
+ strcpy(s+new_len, todo->uri + old_len);
+ free(todo->uri);
+ todo->uri = s;
+ todo->lasttry = true;
+ todo->redirect_count = 0;
+ enqueue(method, todo);
+ return RET_OK;
+ }
+}
+
+/* look which file could not be received and remove it: */
+static retvalue urierror(struct aptmethod *method, const char *uri, /*@only@*/char *message) {
+ struct tobedone *todo, *lasttodo;
+
+ lasttodo = NULL; todo = method->tobedone;
+ while (todo != NULL) {
+ if (strcmp(todo->uri, uri) == 0) {
+
+ /* remove item: */
+ if (lasttodo == NULL)
+ method->tobedone = todo->next;
+ else
+ lasttodo->next = todo->next;
+ if (method->nexttosend == todo) {
+ /* just in case some method received
+ * files before we request them ;-) */
+ method->nexttosend = todo->next;
+ }
+ if (method->lasttobedone == todo) {
+ method->lasttobedone = todo->next;
+ }
+ fprintf(stderr,
+"aptmethod error receiving '%s':\n'%s'\n",
+ uri, (message != NULL)?message:"");
+ /* put message in failed items to show it later? */
+ free(message);
+ return requeue_or_fail(method, todo);
+ }
+ lasttodo = todo;
+ todo = todo->next;
+ }
+ /* huh? If if have not asked for it, how can there be errors? */
+ fprintf(stderr,
+"Method '%s' reported error with unrequested file '%s':\n'%s'!\n",
+ method->name, uri, message);
+ free(message);
+ return RET_ERROR;
+}
+
+/* look which file could not be received and readd the new name... */
+static retvalue uriredirect(struct aptmethod *method, const char *uri, /*@only@*/char *newuri) {
+ struct tobedone *todo, *lasttodo;
+
+ lasttodo = NULL; todo = method->tobedone;
+ while (todo != NULL) {
+ if (strcmp(todo->uri, uri) == 0) {
+
+ /* remove item: */
+ if (lasttodo == NULL)
+ method->tobedone = todo->next;
+ else
+ lasttodo->next = todo->next;
+ if (method->nexttosend == todo) {
+ /* just in case some method received
+ * files before we request them ;-) */
+ method->nexttosend = todo->next;
+ }
+ if (method->lasttobedone == todo) {
+ method->lasttobedone = todo->next;
+ }
+ if (todo->redirect_count < 10) {
+ if (verbose > 0)
+ fprintf(stderr,
+"aptmethod redirects '%s' to '%s'\n",
+ uri, newuri);
+ /* readd with new uri */
+ if (todo->original_uri != NULL)
+ free(todo->uri);
+ else
+ todo->original_uri = todo->uri;
+ todo->uri = newuri;
+ todo->redirect_count++;
+ enqueue(method, todo);
+ return RET_OK;
+ }
+ fprintf(stderr,
+"redirect loop (or too many redirects) detected, original uri is '%s'\n",
+ todo->original_uri);
+ /* put message in failed items to show it later? */
+ free(newuri);
+ return requeue_or_fail(method, todo);
+ }
+ lasttodo = todo;
+ todo = todo->next;
+ }
+ /* huh? If if have not asked for it, how can there be errors? */
+ fprintf(stderr,
+"Method '%s' reported redirect for unrequested file '%s'-> '%s'\n",
+ method->name, uri, newuri);
+ free(newuri);
+ return RET_ERROR;
+}
+
+/* look where a received file has to go to: */
+static retvalue uridone(struct aptmethod *method, const char *uri, const char *filename, /*@only@*//*@null@*/struct checksums *checksumsfromapt) {
+ struct tobedone *todo, *lasttodo;
+ retvalue r;
+
+ lasttodo = NULL; todo = method->tobedone;
+ while (todo != NULL) {
+ if (strcmp(todo->uri, uri) != 0) {
+ lasttodo = todo;
+ todo = todo->next;
+ continue;
+ }
+
+ r = todo->callback(qa_got,
+ todo->privdata1, todo->privdata2,
+ todo->original_uri? todo->original_uri : todo->uri,
+ filename, todo->filename,
+ checksumsfromapt, method->name);
+ checksums_free(checksumsfromapt);
+
+ /* remove item: */
+ if (lasttodo == NULL)
+ method->tobedone = todo->next;
+ else
+ lasttodo->next = todo->next;
+ if (method->nexttosend == todo) {
+ /* just in case some method received
+ * files before we request them ;-) */
+ method->nexttosend = todo->next;
+ }
+ if (method->lasttobedone == todo) {
+ method->lasttobedone = todo->next;
+ }
+ todo_free(todo);
+ return r;
+ }
+ /* huh? */
+ fprintf(stderr,
+"Method '%s' retrieved unexpected file '%s' at '%s'!\n",
+ method->name, uri, filename);
+ checksums_free(checksumsfromapt);
+ return RET_ERROR;
+}
+
+/***************************Input and Output****************************/
+static retvalue logmessage(const struct aptmethod *method, const char *chunk, const char *type) {
+ retvalue r;
+ char *message;
+
+ r = chunk_getvalue(chunk, "Message", &message);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ fprintf(stderr, "aptmethod '%s': '%s'\n",
+ method->baseuri, message);
+ free(message);
+ return RET_OK;
+ }
+ r = chunk_getvalue(chunk, "URI", &message);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ fprintf(stderr, "aptmethod %s '%s'\n", type, message);
+ free(message);
+ return RET_OK;
+ }
+ fprintf(stderr, "aptmethod '%s': '%s'\n", method->baseuri, type);
+ return RET_OK;
+}
+static inline retvalue gotcapabilities(struct aptmethod *method, const char *chunk) {
+ retvalue r;
+
+ r = chunk_gettruth(chunk, "Single-Instance");
+ if (RET_WAS_ERROR(r))
+ return r;
+// TODO: what to do with this?
+// if (r != RET_NOTHING) {
+// fprintf(stderr, "WARNING: Single-instance not yet supported!\n");
+// }
+ r = chunk_gettruth(chunk, "Send-Config");
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r != RET_NOTHING) {
+ assert(method->command == NULL);
+ method->alreadywritten = 0;
+ method->command = method->config;
+ method->config = NULL;
+ method->output_length = strlen(method->command);
+ if (verbose > 11) {
+ fprintf(stderr, "Sending config: '%s'\n",
+ method->command);
+ }
+ } else {
+ free(method->config);
+ method->config = NULL;
+ }
+ method->status = ams_ok;
+ return RET_OK;
+}
+
+static inline retvalue goturidone(struct aptmethod *method, const char *chunk) {
+ static const char * const method_hash_names[cs_COUNT] =
+ { "MD5-Hash", "SHA1-Hash", "SHA256-Hash", "SHA512-Hash",
+ "Size" };
+ retvalue result, r;
+ char *uri, *filename;
+ enum checksumtype type;
+ char *hashes[cs_COUNT];
+ struct checksums *checksums = NULL;
+
+ //TODO: is it worth the mess to make this in-situ?
+
+ r = chunk_getvalue(chunk, "URI", &uri);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Missing URI header in uridone received from '%s' method!\n",
+ method->name);
+ r = RET_ERROR;
+ method->status = ams_failed;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = chunk_getvalue(chunk, "Filename", &filename);
+ if (r == RET_NOTHING) {
+ char *altfilename;
+
+ r = chunk_getvalue(chunk, "Alt-Filename", &altfilename);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Missing Filename header in uridone received from '%s' method!\n",
+ method->name);
+ r = urierror(method, uri, strdup(
+"<no error but missing Filename from apt-method>"));
+ } else {
+ r = urierror(method, uri, mprintf(
+"<File not there, apt-method suggests '%s' instead>", altfilename));
+ free(altfilename);
+ }
+ free(uri);
+ return r;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(uri);
+ return r;
+ }
+ if (verbose >= 1)
+ fprintf(stderr, "aptmethod got '%s'\n", uri);
+
+ result = RET_NOTHING;
+ for (type = cs_md5sum ; type < cs_COUNT ; type++) {
+ hashes[type] = NULL;
+ r = chunk_getvalue(chunk, method_hash_names[type],
+ &hashes[type]);
+ RET_UPDATE(result, r);
+ }
+ if (RET_IS_OK(result) && hashes[cs_md5sum] == NULL) {
+ /* the lenny version also has this, better ask for
+ * in case the old MD5-Hash vanishes in the future */
+ r = chunk_getvalue(chunk, "MD5Sum-Hash", &hashes[cs_md5sum]);
+ RET_UPDATE(result, r);
+ }
+ if (RET_WAS_ERROR(result)) {
+ free(uri); free(filename);
+ for (type = cs_md5sum ; type < cs_COUNT ; type++)
+ free(hashes[type]);
+ return result;
+ }
+ if (RET_IS_OK(result)) {
+ /* ignore errors, we can recompute them from the file */
+ (void)checksums_init(&checksums, hashes);
+ }
+ r = uridone(method, uri, filename, checksums);
+ free(uri);
+ free(filename);
+ return r;
+}
+
+static inline retvalue goturierror(struct aptmethod *method, const char *chunk) {
+ retvalue r;
+ char *uri, *message;
+
+ r = chunk_getvalue(chunk, "URI", &uri);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Missing URI header in urierror received from '%s' method!\n", method->name);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = chunk_getvalue(chunk, "Message", &message);
+ if (r == RET_NOTHING) {
+ message = NULL;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(uri);
+ return r;
+ }
+
+ r = urierror(method, uri, message);
+ free(uri);
+ return r;
+}
+
+static inline retvalue gotredirect(struct aptmethod *method, const char *chunk) {
+ char *uri, *newuri;
+ retvalue r;
+
+ r = chunk_getvalue(chunk, "URI", &uri);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Missing URI header in uriredirect received from '%s' method!\n", method->name);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = chunk_getvalue(chunk, "New-URI", &newuri);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Missing New-URI header in uriredirect received from '%s' method!\n", method->name);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(uri);
+ return r;
+ }
+ r = uriredirect(method, uri, newuri);
+ free(uri);
+ return r;
+}
+
+static inline retvalue parsereceivedblock(struct aptmethod *method, const char *input) {
+ const char *p;
+ retvalue r;
+#define OVERLINE {while (*p != '\0' && *p != '\n') p++; if (*p == '\n') p++; }
+
+ while (*input == '\n' || *input == '\r')
+ input++;
+ if (*input == '\0') {
+ fprintf(stderr,
+"Unexpected number of newlines from '%s' method!\n", method->name);
+ return RET_NOTHING;
+ }
+ p = input;
+ switch ((*(input+1)=='0')?*input:'\0') {
+ case '1':
+ switch (*(input+2)) {
+ /* 100 Capabilities */
+ case '0':
+ OVERLINE;
+ if (verbose > 14) {
+ fprintf(stderr, "Got '%s'\n",
+ input);
+ }
+ return gotcapabilities(method, input);
+ /* 101 Log */
+ case '1':
+ if (verbose > 10) {
+ OVERLINE;
+ return logmessage(method, p, "101");
+ }
+ return RET_OK;
+ /* 102 Status */
+ case '2':
+ if (verbose > 5) {
+ OVERLINE;
+ return logmessage(method, p, "102");
+ }
+ return RET_OK;
+ /* 103 Redirect */
+ case '3':
+ OVERLINE;
+ return gotredirect(method, p);
+ default:
+ fprintf(stderr,
+"Error or unsupported message received: '%s'\n",
+ input);
+ return RET_ERROR;
+ }
+ case '2':
+ switch (*(input+2)) {
+ /* 200 URI Start */
+ case '0':
+ if (verbose > 5) {
+ OVERLINE;
+ return logmessage(method, p, "start");
+ }
+ return RET_OK;
+ /* 201 URI Done */
+ case '1':
+ OVERLINE;
+ return goturidone(method, p);
+ default:
+ fprintf(stderr,
+"Error or unsupported message received: '%s'\n",
+ input);
+ return RET_ERROR;
+ }
+
+ case '4':
+ switch (*(input+2)) {
+ case '0':
+ OVERLINE;
+ r = goturierror(method, p);
+ break;
+ case '1':
+ OVERLINE;
+ (void)logmessage(method, p, "general error");
+ method->status = ams_failed;
+ r = RET_ERROR;
+ break;
+ default:
+ fprintf(stderr,
+"Error or unsupported message received: '%s'\n",
+ input);
+ r = RET_ERROR;
+ }
+ /* a failed download is not a error yet, as it might
+ * be redone from another source later */
+ return r;
+ default:
+ fprintf(stderr,
+"Unexpected data from '%s' method: '%s'\n",
+ method->name, input);
+ return RET_ERROR;
+ }
+}
+
+static retvalue receivedata(struct aptmethod *method) {
+ retvalue result;
+ ssize_t r;
+ char *p;
+ int consecutivenewlines;
+
+ assert (method->status != ams_ok || method->tobedone != NULL);
+ if (method->status != ams_waitforcapabilities
+ && method->status != ams_ok)
+ return RET_NOTHING;
+
+ /* First look if we have enough room to read.. */
+ if (method->alreadyread + 1024 >= method->input_size) {
+ char *newptr;
+
+ if (method->input_size >= (size_t)128000) {
+ fprintf(stderr,
+"Ridiculously long answer from method!\n");
+ method->status = ams_failed;
+ return RET_ERROR;
+ }
+
+ newptr = realloc(method->inputbuffer, method->alreadyread+1024);
+ if (FAILEDTOALLOC(newptr)) {
+ return RET_ERROR_OOM;
+ }
+ method->inputbuffer = newptr;
+ method->input_size = method->alreadyread + 1024;
+ }
+ assert (method->inputbuffer != NULL);
+ /* then read as much as the pipe is able to fill of our buffer */
+
+ r = read(method->mstdout, method->inputbuffer + method->alreadyread,
+ method->input_size - method->alreadyread - 1);
+
+ if (r < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d reading pipe from aptmethod: %s\n",
+ e, strerror(e));
+ method->status = ams_failed;
+ return RET_ERRNO(e);
+ }
+ method->alreadyread += r;
+
+ result = RET_NOTHING;
+ while(true) {
+ retvalue res;
+
+ r = method->alreadyread;
+ p = method->inputbuffer;
+ consecutivenewlines = 0;
+
+ while (r > 0) {
+ if (*p == '\0') {
+ fprintf(stderr,
+"Unexpected Zeroes in method output!\n");
+ method->status = ams_failed;
+ return RET_ERROR;
+ } else if (*p == '\n') {
+ consecutivenewlines++;
+ if (consecutivenewlines >= 2)
+ break;
+ } else if (*p != '\r') {
+ consecutivenewlines = 0;
+ }
+ p++; r--;
+ }
+ if (r <= 0) {
+ return result;
+ }
+ *p ='\0'; p++; r--;
+ res = parsereceivedblock(method, method->inputbuffer);
+ if (r > 0)
+ memmove(method->inputbuffer, p, r);
+ method->alreadyread = r;
+ RET_UPDATE(result, res);
+ }
+}
+
+static retvalue senddata(struct aptmethod *method) {
+ size_t l;
+ ssize_t r;
+
+ if (method->status != ams_ok)
+ return RET_NOTHING;
+
+ if (method->command == NULL) {
+ const struct tobedone *todo;
+
+ /* nothing queued to send, nothing to be queued...*/
+ todo = method->nexttosend;
+ if (todo == NULL)
+ return RET_OK;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ method->alreadywritten = 0;
+ // TODO: make sure this is already checked for earlier...
+ assert (strchr(todo->uri, '\n') == NULL &&
+ strchr(todo->filename, '\n') == NULL);
+ /* http-aptmethod seems to loose the last byte,
+ * if the file is already in place,
+ * so we better unlink the target first...
+ * but this is done elsewhere already
+ unlink(todo->filename);
+ */
+ method->command = mprintf(
+ "600 URI Acquire\nURI: %s\nFilename: %s\n\n",
+ todo->uri, todo->filename);
+ if (FAILEDTOALLOC(method->command)) {
+ return RET_ERROR_OOM;
+ }
+ if (verbose > 20)
+ fprintf(stderr, "Will sent: '%s'\n", method->command);
+ method->output_length = strlen(method->command);
+ method->nexttosend = method->nexttosend->next;
+ }
+
+
+ l = method->output_length - method->alreadywritten;
+
+ r = write(method->mstdin, method->command + method->alreadywritten, l);
+ if (r < 0) {
+ int e = errno;
+
+ fprintf(stderr, "Error %d writing to pipe: %s\n",
+ e, strerror(e));
+ //TODO: disable the whole method??
+ method->status = ams_failed;
+ return RET_ERRNO(e);
+ } else if ((size_t)r < l) {
+ method->alreadywritten += r;
+ return RET_OK;
+ }
+
+ free(method->command);
+ method->command = NULL;
+ return RET_OK;
+}
+
+static retvalue checkchilds(struct aptmethodrun *run) {
+ pid_t child;int status;
+ retvalue result = RET_OK, r;
+
+ while ((child = waitpid(-1, &status, WNOHANG)) > 0) {
+ struct aptmethod *method;
+
+ for (method = run->methods ; method != NULL ;
+ method = method->next) {
+ if (method->child == child)
+ break;
+ }
+ if (method == NULL) {
+ /* perhaps an uncompressor terminated */
+ r = uncompress_checkpid(child, status);
+ if (RET_IS_OK(r))
+ continue;
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ continue;
+ }
+ else {
+ fprintf(stderr,
+"Unexpected child died (maybe gpg died if signing/verifing was done): %d\n",
+ (int)child);
+ continue;
+ }
+ }
+ /* Make sure we do not cope with this child any more */
+ if (method->mstdin != -1) {
+ (void)close(method->mstdin);
+ method->mstdin = -1;
+ }
+ if (method->mstdout != -1) {
+ (void)close(method->mstdout);
+ method->mstdout = -1;
+ }
+ method->child = -1;
+ if (method->status != ams_failed)
+ method->status = ams_notstarted;
+
+ /* say something if it exited unnormal: */
+ if (WIFEXITED(status)) {
+ int exitcode;
+
+ exitcode = WEXITSTATUS(status);
+ if (exitcode != 0) {
+ fprintf(stderr,
+"Method %s://%s exited with non-zero exit code %d!\n",
+ method->name, method->baseuri,
+ exitcode);
+ method->status = ams_notstarted;
+ result = RET_ERROR;
+ }
+ } else {
+ fprintf(stderr, "Method %s://%s exited unnormally!\n",
+ method->name, method->baseuri);
+ method->status = ams_notstarted;
+ result = RET_ERROR;
+ }
+ }
+ return result;
+}
+
+/* *workleft is always set, even when return indicated error.
+ * (workleft < 0 when critical)*/
+static retvalue readwrite(struct aptmethodrun *run, /*@out@*/int *workleft) {
+ int maxfd, v;
+ fd_set readfds, writefds;
+ struct aptmethod *method;
+ retvalue result, r;
+
+ /* First calculate what to look at: */
+ FD_ZERO(&readfds);
+ FD_ZERO(&writefds);
+ maxfd = 0;
+ *workleft = 0;
+ for (method = run->methods ; method != NULL ; method = method->next) {
+ if (method->status == ams_ok &&
+ (method->command != NULL || method->nexttosend != NULL)) {
+ FD_SET(method->mstdin, &writefds);
+ if (method->mstdin > maxfd)
+ maxfd = method->mstdin;
+ (*workleft)++;
+ if (verbose > 19)
+ fprintf(stderr, "want to write to '%s'\n",
+ method->baseuri);
+ }
+ if (method->status == ams_waitforcapabilities ||
+ (method->status == ams_ok &&
+ method->tobedone != NULL)) {
+ FD_SET(method->mstdout, &readfds);
+ if (method->mstdout > maxfd)
+ maxfd = method->mstdout;
+ (*workleft)++;
+ if (verbose > 19)
+ fprintf(stderr, "want to read from '%s'\n",
+ method->baseuri);
+ }
+ }
+
+ if (*workleft == 0)
+ return RET_NOTHING;
+
+ // TODO: think about a timeout...
+ v = select(maxfd + 1, &readfds, &writefds, NULL, NULL);
+ if (v < 0) {
+ int e = errno;
+ //TODO: handle (e == EINTR) && interrupted() specially
+ fprintf(stderr, "Select returned error %d: %s\n",
+ e, strerror(e));
+ *workleft = -1;
+ // TODO: what to do here?
+ return RET_ERRNO(e);
+ }
+
+ result = RET_NOTHING;
+
+ maxfd = 0;
+ for (method = run->methods ; method != NULL ; method = method->next) {
+ if (method->mstdout != -1 &&
+ FD_ISSET(method->mstdout, &readfds)) {
+ r = receivedata(method);
+ RET_UPDATE(result, r);
+ }
+ if (method->mstdin != -1 &&
+ FD_ISSET(method->mstdin, &writefds)) {
+ r = senddata(method);
+ RET_UPDATE(result, r);
+ }
+ }
+ return result;
+}
+
+retvalue aptmethod_download(struct aptmethodrun *run) {
+ struct aptmethod *method;
+ retvalue result, r;
+ int workleft;
+
+ result = RET_NOTHING;
+
+ /* fire up all methods, removing those that do not work: */
+ for (method = run->methods; method != NULL ; method = method->next) {
+ r = aptmethod_startup(method);
+ /* do not remove failed methods here any longer,
+ * and not remove methods having nothing to do,
+ * as this breaks when no index files are downloaded
+ * due to all already being in place... */
+ RET_UPDATE(result, r);
+ }
+ /* waiting for them to finish: */
+ do {
+ r = checkchilds(run);
+ RET_UPDATE(result, r);
+ r = readwrite(run, &workleft);
+ RET_UPDATE(result, r);
+ // TODO: check interrupted here...
+ } while (workleft > 0 || uncompress_running());
+
+ return result;
+}
+
diff --git a/aptmethod.h b/aptmethod.h
new file mode 100644
index 0000000..ab0cf8e
--- /dev/null
+++ b/aptmethod.h
@@ -0,0 +1,27 @@
+#ifndef REPREPRO_APTMETHOD_H
+#define REPREPRO_APTMETHOD_H
+
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_CHECKSUMS_H
+#include "checksums.h"
+#endif
+
+struct aptmethodrun;
+struct aptmethod;
+
+enum queue_action { qa_abort, qa_got, qa_error };
+
+typedef retvalue queue_callback(enum queue_action, void *, void *, const char * /*uri*/, const char * /*gotfilename*/, const char * /*wantedfilename*/, /*@null@*/const struct checksums *, const char * /*methodname*/);
+
+retvalue aptmethod_initialize_run(/*@out@*/struct aptmethodrun **);
+retvalue aptmethod_newmethod(struct aptmethodrun *, const char * /*uri*/, const char * /*fallbackuri*/, const struct strlist * /*config*/, /*@out@*/struct aptmethod **);
+
+retvalue aptmethod_enqueue(struct aptmethod *, const char * /*origfile*/, /*@only@*/char */*destfile*/, queue_callback *, void *, void *);
+retvalue aptmethod_enqueueindex(struct aptmethod *, const char * /*suite*/, const char * /*origfile*/, const char *, const char * /*destfile*/, const char *, queue_callback *, void *, void *);
+
+retvalue aptmethod_download(struct aptmethodrun *);
+retvalue aptmethod_shutdown(/*@only@*/struct aptmethodrun *);
+
+#endif
diff --git a/ar.c b/ar.c
new file mode 100644
index 0000000..02f0bfb
--- /dev/null
+++ b/ar.c
@@ -0,0 +1,299 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2005,2006 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include <archive.h>
+
+#include "error.h"
+#include "uncompression.h"
+#include "ar.h"
+
+/* Arr, me matees, Arr */
+
+#define BLOCKSIZE 10240
+#define AR_MAGIC "!<arch>\n"
+#define AR_HEADERMAGIC "`\n"
+
+struct ar_archive {
+ char *filename;
+ int fd;
+ struct ar_header {
+ char ah_filename[16];
+ char ah_date[12];
+ char ah_uid[6];
+ char ah_gid[6];
+ char ah_mode[8];
+ char ah_size[10];
+ char ah_magictrailer[2];
+ } currentheader;
+ off_t member_size, next_position;
+ void *readbuffer;
+ /*@null@*/struct compressedfile *member;
+ enum compression compression;
+};
+
+static ssize_t readwait(int fd, /*@out@*/void *buf, size_t count) {
+ ssize_t totalread;
+
+ totalread = 0;
+
+ while (count > 0) {
+ ssize_t s;
+
+ s = read(fd, buf, count);
+ if (s < 0)
+ return s;
+ if (interrupted()) {
+ errno = EINTR;
+ return -1;
+ }
+ if ((size_t)s > count) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (s == 0)
+ break;
+ totalread += s;
+ buf += s;
+ count -= s;
+ }
+ return totalread;
+}
+
+retvalue ar_open(/*@out@*/struct ar_archive **n, const char *filename) {
+ struct ar_archive *ar;
+ char buffer[sizeof(AR_MAGIC)];
+ ssize_t bytesread;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ ar = zNEW(struct ar_archive);
+ if (FAILEDTOALLOC(ar))
+ return RET_ERROR_OOM;
+ ar->fd = open(filename, O_NOCTTY|O_RDONLY);
+ if (ar->fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d opening %s: %s\n",
+ e, filename, strerror(e));
+ free(ar);
+ return RET_ERRNO(e);
+ }
+
+ bytesread = readwait(ar->fd, buffer, sizeof(AR_MAGIC) - 1);
+ if (bytesread != sizeof(AR_MAGIC)-1) {
+ int e = errno;
+ (void)close(ar->fd);
+ free(ar);
+ if (bytesread < 0) {
+ fprintf(stderr, "Error %d reading from %s: %s\n",
+ e, filename, strerror(e));
+ return RET_ERRNO(e);
+ } else {
+ fprintf(stderr, "Premature end of reading from %s\n",
+ filename);
+ return RET_ERROR;
+ }
+ }
+ if (memcmp(buffer, AR_MAGIC, sizeof(AR_MAGIC)-1) != 0) {
+ (void)close(ar->fd);
+ free(ar);
+ fprintf(stderr,
+"Missing ar header '!<arch>' at the beginning of %s\n",
+ filename);
+ return RET_ERROR;
+ }
+ ar->filename = strdup(filename);
+ if (FAILEDTOALLOC(ar->filename)) {
+ close(ar->fd);
+ free(ar);
+ return RET_ERROR_OOM;
+ }
+ ar->next_position = sizeof(AR_MAGIC) - 1;
+
+ *n = ar;
+ return RET_OK;
+}
+
+void ar_close(/*@only@*/struct ar_archive *ar) {
+ if (ar != NULL) {
+ if (ar->fd >= 0)
+ (void)close(ar->fd);
+ free(ar->filename);
+ free(ar);
+ }
+}
+
+/* RET_OK = next is there, RET_NOTHING = eof, < 0 = error */
+retvalue ar_nextmember(struct ar_archive *ar, /*@out@*/char **filename) {
+ ssize_t bytesread;
+ char *p;
+ off_t s;
+
+ assert(ar->readbuffer == NULL);
+ assert(ar->fd >= 0);
+
+ /* seek over what is left from the last part: */
+ s = lseek(ar->fd, ar->next_position, SEEK_SET);
+ if (s == (off_t)-1) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d seeking to next member in ar file %s: %s\n",
+ e, ar->filename, strerror(e));
+ return RET_ERRNO(e);
+ }
+ /* read the next header from the file */
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ bytesread = readwait(ar->fd, &ar->currentheader,
+ sizeof(ar->currentheader));
+ ar->next_position += sizeof(ar->currentheader);
+ if (bytesread == 0)
+ return RET_NOTHING;
+ if (bytesread != sizeof(ar->currentheader)){
+ int e = errno;
+ if (bytesread < 0) {
+ fprintf(stderr,
+"Error %d reading from ar file %s: %s\n",
+ e, ar->filename, strerror(e));
+ return RET_ERRNO(e);
+ } else {
+ fprintf(stderr, "Premature end of ar file %s\n",
+ ar->filename);
+ return RET_ERROR;
+ }
+ }
+ if (memcmp(ar->currentheader.ah_magictrailer, AR_HEADERMAGIC, 2) != 0) {
+ fprintf(stderr, "Corrupt ar file %s\n", ar->filename);
+ return RET_ERROR;
+ }
+
+ /* calculate the length and mark possible fillers being needed */
+
+ /* make ah_size null-terminated by overwriting the following field */
+ assert (&ar->currentheader.ah_magictrailer[0]
+ == ar->currentheader.ah_size + 10);
+ ar->currentheader.ah_magictrailer[0] = '\0';
+
+ ar->member_size = strtoul(ar->currentheader.ah_size, &p, 10);
+ if (*p != '\0' && *p != ' ') {
+ fprintf(stderr,
+"Error calculating length field in ar file %s\n",
+ ar->filename);
+ return RET_ERROR;
+ }
+ ar->next_position += ar->member_size;
+ if ((ar->member_size & 1) != 0)
+ ar->next_position ++;
+
+ /* get the name of the file */
+ if (false) {
+ /* handle long filenames */
+ // TODO!
+ } else {
+ /* normal filenames */
+ int i = sizeof(ar->currentheader.ah_filename);
+ while (i > 0 && ar->currentheader.ah_filename[i-1] == ' ')
+ i--;
+ /* hop over GNU style filenames, though they should not
+ * be in a .deb file... */
+ if (i > 0 && ar->currentheader.ah_filename[i-1] == '/')
+ i--;
+ *filename = strndup(ar->currentheader.ah_filename, i);
+ }
+ ar->compression = c_none;
+ return RET_OK;
+}
+
+void ar_archivemember_setcompression(struct ar_archive *ar, enum compression compression) {
+ ar->compression = compression;
+}
+
+ssize_t ar_archivemember_read(struct archive *a, void *d, const void **p) {
+ struct ar_archive *ar = d;
+ ssize_t bytesread;
+
+ assert (ar->readbuffer != NULL);
+ if (ar->member == NULL)
+ return 0;
+
+ *p = ar->readbuffer;
+ bytesread = uncompress_read(ar->member, ar->readbuffer, BLOCKSIZE);
+ if (bytesread < 0) {
+ const char *msg;
+ int e;
+
+ // TODO: why _fdclose instead of _abort?
+ (void)uncompress_fdclose(ar->member, &e, &msg);
+ ar->member = NULL;
+ archive_set_error(a, e, "%s", msg);
+ return -1;
+ }
+ return bytesread;
+}
+
+int ar_archivemember_open(struct archive *a, void *d) {
+ struct ar_archive *ar = d;
+ retvalue r;
+ const char *msg;
+ int e;
+
+ assert (uncompression_supported(ar->compression));
+
+ assert (ar->readbuffer == NULL);
+ ar->readbuffer = malloc(BLOCKSIZE);
+ if (FAILEDTOALLOC(ar->readbuffer)) {
+ archive_set_error(a, ENOMEM, "Out of memory");
+ return ARCHIVE_FATAL;
+ }
+ r = uncompress_fdopen(&ar->member, ar->fd, ar->member_size,
+ ar->compression, &e, &msg);
+ if (RET_IS_OK(r))
+ return ARCHIVE_OK;
+ archive_set_error(a, e, "%s", msg);
+ return ARCHIVE_FATAL;
+}
+
+int ar_archivemember_close(struct archive *a, void *d) {
+ struct ar_archive *ar = d;
+ retvalue r;
+ const char *msg;
+ int e;
+
+ free(ar->readbuffer);
+ ar->readbuffer = NULL;
+
+ if (ar->member == NULL)
+ return ARCHIVE_OK;
+
+ r = uncompress_fdclose(ar->member, &e, &msg);
+ ar->member = NULL;
+ if (RET_IS_OK(r))
+ return ARCHIVE_OK;
+ archive_set_error(a, e, "%s", msg);
+ return ARCHIVE_FATAL;
+}
diff --git a/ar.h b/ar.h
new file mode 100644
index 0000000..fe5ff7c
--- /dev/null
+++ b/ar.h
@@ -0,0 +1,22 @@
+#ifndef DEBCOMP_AR_H
+#define DEBCOMP_AR_H
+
+struct ar_archive;
+
+retvalue ar_open(/*@out@*/struct ar_archive **, const char *);
+void ar_close(/*@only@*/struct ar_archive *);
+
+/* RET_OK = next is there, RET_NOTHING = eof, < 0 = error */
+retvalue ar_nextmember(struct ar_archive *, /*@out@*/char ** /*filename*/);
+
+/* set compression for the next member */
+void ar_archivemember_setcompression(struct ar_archive *, enum compression);
+
+/* the following can be used for libarchive to read an file in the ar
+ * after ar_nextmember returned successfully.
+ * All references get invalid after the ar_nextmember is called again. */
+int ar_archivemember_close(struct archive *, void *);
+int ar_archivemember_open(struct archive *, void *);
+ssize_t ar_archivemember_read(struct archive *, void *, const void **);
+
+#endif
diff --git a/archallflood.c b/archallflood.c
new file mode 100644
index 0000000..df20247
--- /dev/null
+++ b/archallflood.c
@@ -0,0 +1,714 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2009,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "error.h"
+#include "strlist.h"
+#include "indexfile.h"
+#include "dpkgversions.h"
+#include "target.h"
+#include "distribution.h"
+#include "tracking.h"
+#include "files.h"
+#include "package.h"
+#include "archallflood.h"
+
+struct aa_source_name {
+ /*@null@*/struct aa_source_name *parent;
+ /*@null@*/struct aa_source_name *left_child;
+ /*@null@*/struct aa_source_name *right_child;
+
+ char *name;
+
+ /*@null@*/struct aa_source_version *versions;
+};
+
+struct aa_source_version {
+ /*@null@*/struct aa_source_version *next;
+ struct aa_source_name *name;
+ char *version;
+
+ /* if true, it was already verified that there is no
+ * binary package of the same source version already there,
+ * so new architecture 'all' can be added without danger */
+ bool has_no_sibling;
+ /* if true, then there is a binary package of this source
+ * package, so replacing an architecture all is only allowed
+ * if there is already a binary for the new one */
+ bool has_sibling;
+};
+
+struct aa_package_data {
+ struct aa_package_data *next;
+ /* the name of the architecture all package: */
+ char *name;
+
+ /* NULL if does not exists/not yet known */
+ /*@null@*/char *old_version;
+ /*@null@*/struct aa_source_version *old_source;
+ /*@null@*/char *new_version;
+ /*@null@*/struct aa_source_version *new_source;
+ bool new_has_sibling;
+
+ struct checksumsarray new_origfiles;
+ struct strlist new_filekeys;
+ char *new_control;
+};
+
+struct floodlist {
+ /*@dependent@*/struct target *target;
+ struct aa_source_name *sources;
+ struct aa_package_data *list;
+ /* package the next package will most probably be after.
+ * (NULL=before start of list) */
+ /*@null@*//*@dependent@*/struct aa_package_data *last;
+};
+
+static void aa_package_data_free(/*@only@*/struct aa_package_data *data){
+ if (data == NULL)
+ return;
+ free(data->name);
+ free(data->old_version);
+ free(data->new_version);
+ free(data->new_control);
+ strlist_done(&data->new_filekeys);
+ checksumsarray_done(&data->new_origfiles);
+ free(data);
+}
+
+static void floodlist_free(struct floodlist *list) {
+ struct aa_source_name *s;
+ struct aa_package_data *l;
+
+ if (list == NULL)
+ return;
+
+ l = list->list;
+ while (l != NULL) {
+ struct aa_package_data *n = l->next;
+ aa_package_data_free(l);
+ l = n;
+ }
+ s = list->sources;
+ while (s != NULL) {
+ struct aa_source_name *n;
+
+ while (s->left_child != NULL || s->right_child != NULL) {
+ if (s->left_child != NULL) {
+ n = s->left_child;
+ s->left_child = NULL;
+ s = n;
+ } else {
+ n = s->right_child;
+ s->right_child = NULL;
+ s = n;
+ }
+ }
+
+ while (s->versions != NULL) {
+ struct aa_source_version *nv;
+ nv = s->versions->next;
+ free(s->versions->version);
+ free(s->versions);
+ s->versions = nv;
+ }
+ n = s->parent;
+ free(s->name);
+ free(s);
+ s = n;
+ }
+ free(list);
+ return;
+}
+
+static retvalue find_or_add_sourcename(struct floodlist *list, struct package *pkg, /*@out@*/struct aa_source_name **src_p) {
+ struct aa_source_name *parent, **p, *n;
+ int c;
+
+ parent = NULL;
+ p = &list->sources;
+
+ /* if this gets too slow, make it a balanced tree,
+ * but it seems fast enough even as simple tree */
+
+ while (*p != NULL) {
+ c = strcmp(pkg->source, (*p)->name);
+ if (c == 0)
+ break;
+ parent = *p;
+ if (c > 0)
+ p = &parent->right_child;
+ else
+ p = &parent->left_child;
+ }
+ if (*p == NULL) {
+ /* there is not even something with this name */
+ n = zNEW(struct aa_source_name);
+ if (FAILEDTOALLOC(n)) {
+ return RET_ERROR_OOM;
+ }
+ n->name = strdup(pkg->source);
+ if (FAILEDTOALLOC(n->name)) {
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ n->parent = parent;
+ *p = n;
+ *src_p = n;
+ return RET_OK;
+ }
+ *src_p = *p;
+ return RET_OK;
+}
+
+static retvalue find_or_add_source(struct floodlist *list, struct package *pkg, /*@out@*/struct aa_source_version **src_p) {
+ retvalue r;
+ struct aa_source_name *sn;
+ struct aa_source_version **p, *n;
+ int c;
+
+ r = find_or_add_sourcename(list, pkg, &sn);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* source name found (or created), now look for version: */
+
+ p = &sn->versions;
+ c = -1;
+ while (*p != NULL && (c = strcmp(pkg->sourceversion,
+ (*p)->version)) > 0) {
+ p = &(*p)->next;
+ }
+ if (c == 0) {
+ assert (*p != NULL);
+ *src_p = *p;
+ return RET_OK;
+ }
+ n = zNEW(struct aa_source_version);
+ if (FAILEDTOALLOC(n)) {
+ return RET_ERROR_OOM;
+ }
+ n->name = sn;
+ n->version = strdup(pkg->sourceversion);
+ if (FAILEDTOALLOC(n->version)) {
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ n->next = *p;
+ *p = n;
+ *src_p = n;
+ return RET_OK;
+}
+
+static struct aa_source_version *find_source(struct floodlist *list, const char *source, const char *sourceversion) {
+ struct aa_source_name *p;
+ struct aa_source_version *v;
+ int c = -1;
+
+ p = list->sources;
+
+ while (p != NULL) {
+ c = strcmp(source, p->name);
+ if (c == 0)
+ break;
+ if (c > 0)
+ p = p->right_child;
+ else
+ p = p->left_child;
+ }
+ if (p == NULL)
+ return NULL;
+ v = p->versions;
+ while (v != NULL && (c = strcmp(sourceversion, v->version)) > 0)
+ v = v->next;
+ if (c < 0)
+ return NULL;
+ else
+ return v;
+}
+
+/* Before anything else is done the current state of one target is read into
+ * the list: list->list points to the first in the sorted list,
+ * list->last to the last one inserted */
+static retvalue save_package_version(struct floodlist *list, struct package *pkg) {
+ struct aa_source_version *src;
+ retvalue r;
+ struct aa_package_data *package;
+
+ r = package_getarchitecture(pkg);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = package_getsource(pkg);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = find_or_add_source(list, pkg, &src);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (pkg->architecture != architecture_all) {
+ src->has_sibling = true;
+ return RET_NOTHING;
+ }
+
+ r = package_getversion(pkg);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ package = zNEW(struct aa_package_data);
+ if (FAILEDTOALLOC(package)) {
+ return RET_ERROR_OOM;
+ }
+
+ package->name = strdup(pkg->name);
+ if (FAILEDTOALLOC(package->name)) {
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->old_version = package_dupversion(pkg);
+ if (FAILEDTOALLOC(package->old_version)) {
+ free(package->name);
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->old_source = src;
+
+ if (list->list == NULL) {
+ /* first chunk to add: */
+ list->list = package;
+ list->last = package;
+ } else {
+ if (strcmp(pkg->name, list->last->name) > 0) {
+ list->last->next = package;
+ list->last = package;
+ } else {
+ /* this should only happen if the underlying
+ * database-method get changed, so just throwing
+ * out here */
+ fprintf(stderr,
+"INTERNAL ERROR: Package database is not sorted!!!\n");
+ assert(false);
+ exit(EXIT_FAILURE);
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue floodlist_initialize(struct floodlist **fl, struct target *t) {
+ struct floodlist *list;
+ retvalue r, r2;
+ struct package_cursor iterator;
+
+ list = zNEW(struct floodlist);
+ if (FAILEDTOALLOC(list))
+ return RET_ERROR_OOM;
+
+ list->target = t;
+
+ /* Begin with the packages currently in the archive */
+
+ r = package_openiterator(t, READONLY, true, &iterator);
+ if (RET_WAS_ERROR(r)) {
+ floodlist_free(list);
+ return r;
+ }
+ while (package_next(&iterator)) {
+ r2 = save_package_version(list, &iterator.current);
+ RET_UPDATE(r, r2);
+ if (RET_WAS_ERROR(r2))
+ break;
+ }
+ r2 = package_closeiterator(&iterator);
+ RET_UPDATE(r, r2);
+
+ if (RET_WAS_ERROR(r)) {
+ floodlist_free(list);
+ return r;
+ }
+ list->last = NULL;
+ *fl = list;
+ return RET_OK;
+}
+
+static retvalue floodlist_trypackage(struct floodlist *list, struct package *package) {
+ retvalue r;
+ struct aa_package_data *current, *insertafter;
+
+ r = package_getversion(package);
+ if (!RET_IS_OK(r))
+ return r;
+ r = package_getsource(package);
+ if (!RET_IS_OK(r))
+ return r;
+
+ /* insertafter = NULL will mean insert before list */
+ insertafter = list->last;
+ /* the next one to test, current = NULL will mean not found */
+ if (insertafter != NULL)
+ current = insertafter->next;
+ else
+ current = list->list;
+
+ /* the algorithm assumes almost all packages are feed in
+ * alphabetically. */
+
+ while (true) {
+ int cmp;
+
+ assert (insertafter == NULL || insertafter->next == current);
+ assert (insertafter != NULL || current == list->list);
+
+ if (current == NULL)
+ cmp = -1; /* every package is before the end of list */
+ else
+ cmp = strcmp(package->name, current->name);
+
+ if (cmp == 0)
+ break;
+
+ if (cmp < 0) {
+ int precmp;
+
+ if (insertafter == NULL) {
+ /* if we are before the first
+ * package, add us there...*/
+ current = NULL;
+ break;
+ }
+ precmp = strcmp(package->name, insertafter->name);
+ if (precmp == 0) {
+ current = insertafter;
+ break;
+ } else if (precmp < 0) {
+ /* restart at the beginning: */
+ current = list->list;
+ insertafter = NULL;
+ continue;
+ } else { // precmp > 0
+ /* insert after insertafter: */
+ current = NULL;
+ break;
+ }
+ assert ("This is not reached" == NULL);
+ }
+ /* cmp > 0 : may come later... */
+ assert (current != NULL);
+ insertafter = current;
+ current = current->next;
+ if (current == NULL) {
+ /* add behind insertafter at end of list */
+ break;
+ }
+ /* otherwise repeat until place found */
+ }
+ if (current == NULL) {
+ /* adding a package not yet known */
+ struct aa_package_data *new;
+ struct aa_source_version *src;
+
+ src = find_source(list, package->source, package->sourceversion);
+ new = zNEW(struct aa_package_data);
+ if (FAILEDTOALLOC(new)) {
+ return RET_ERROR_OOM;
+ }
+ new->new_source = src;
+ new->new_version = package_dupversion(package);
+ if (FAILEDTOALLOC(new->new_version)) {
+ aa_package_data_free(new);
+ return RET_ERROR_OOM;
+ }
+ new->name = strdup(package->name);
+ if (FAILEDTOALLOC(new->name)) {
+ aa_package_data_free(new);
+ return RET_ERROR_OOM;
+ }
+ r = list->target->getinstalldata(list->target,
+ package,
+ &new->new_control, &new->new_filekeys,
+ &new->new_origfiles);
+ if (RET_WAS_ERROR(r)) {
+ aa_package_data_free(new);
+ return r;
+ }
+ if (insertafter != NULL) {
+ new->next = insertafter->next;
+ insertafter->next = new;
+ } else {
+ new->next = list->list;
+ list->list = new;
+ }
+ list->last = new;
+ } else {
+ /* The package already exists: */
+ char *control;
+ struct strlist files;
+ struct checksumsarray origfiles;
+ struct aa_source_version *src;
+ int versioncmp;
+
+ list->last = current;
+
+ if (current->new_has_sibling) {
+ /* it has a new and that has a binary sibling,
+ * which means this becomes the new version
+ * exactly when it is newer than the old newest */
+ r = dpkgversions_cmp(package->version,
+ current->new_version,
+ &versioncmp);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (versioncmp <= 0) {
+ return RET_NOTHING;
+ }
+ } else if (current->old_version != NULL) {
+ /* if it is older than the old one, we will
+ * always discard it */
+ r = dpkgversions_cmp(package->version,
+ current->old_version,
+ &versioncmp);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (versioncmp <= 0) {
+ return RET_NOTHING;
+ }
+ }
+ /* we need to get the source to know more */
+
+ src = find_source(list, package->source, package->sourceversion);
+ if (src == NULL || !src->has_sibling) {
+ /* the new one has no sibling, only allowed
+ * to override those that have: */
+ if (current->new_version == NULL) {
+ if (current->old_source->has_sibling)
+ return RET_NOTHING;
+ } else if (current->new_has_sibling) {
+ return RET_NOTHING;
+ } else {
+ /* the new one has no sibling and the old one
+ * has not too, take the newer one: */
+ r = dpkgversions_cmp(package->version,
+ current->new_version,
+ &versioncmp);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (versioncmp <= 0) {
+ return RET_NOTHING;
+ }
+ }
+ }
+ char *new_version = package_dupversion(package);
+ if (FAILEDTOALLOC(new_version))
+ return RET_ERROR_OOM;
+
+ r = list->target->getinstalldata(list->target,
+ package,
+ &control, &files, &origfiles);
+ if (RET_WAS_ERROR(r)) {
+ free(new_version);
+ return r;
+ }
+ free(current->new_version);
+ current->new_version = new_version;
+ current->new_source = src;
+ current->new_has_sibling = src != NULL && src->has_sibling;
+ strlist_done(&current->new_filekeys);
+ strlist_move(&current->new_filekeys, &files);
+ checksumsarray_done(&current->new_origfiles);
+ checksumsarray_move(&current->new_origfiles, &origfiles);
+ free(current->new_control);
+ current->new_control = control;
+ }
+ return RET_OK;
+}
+
+static retvalue floodlist_pull(struct floodlist *list, struct target *source) {
+ retvalue result, r;
+ struct package_cursor iterator;
+
+ list->last = NULL;
+ r = package_openiterator(source, READONLY, true, &iterator);
+ if (RET_WAS_ERROR(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ r = package_getarchitecture(&iterator.current);
+ if (r == RET_NOTHING)
+ continue;
+ if (!RET_IS_OK(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ if (iterator.current.architecture != architecture_all)
+ continue;
+
+ r = floodlist_trypackage(list, &iterator.current);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (interrupted()) {
+ result = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+static retvalue floodlist_install(struct floodlist *list, struct logger *logger, /*@NULL@*/struct trackingdata *td) {
+ struct aa_package_data *pkg;
+ retvalue result, r;
+
+ if (list->list == NULL)
+ return RET_NOTHING;
+
+ result = target_initpackagesdb(list->target, READWRITE);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = RET_NOTHING;
+ for (pkg = list->list ; pkg != NULL ; pkg = pkg->next) {
+ if (pkg->new_version != NULL) {
+ r = files_expectfiles(&pkg->new_filekeys,
+ pkg->new_origfiles.checksums);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ continue;
+ if (interrupted()) {
+ r = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ if (td != NULL) {
+ if (pkg->new_source != NULL) {
+ r = trackingdata_switch(td,
+ pkg->new_source->name->name,
+ pkg->new_source->version);
+ } else {
+ char *source, *sourceversion;
+
+ r = list->target->getsourceandversion(
+ pkg->new_control,
+ pkg->name,
+ &source,
+ &sourceversion);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ r = trackingdata_switch(td,
+ source, sourceversion);
+ free(source);
+ free(sourceversion);
+ }
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ }
+ r = target_addpackage(list->target,
+ logger, pkg->name, pkg->new_version,
+ pkg->new_control, &pkg->new_filekeys,
+ false, td, architecture_all,
+ NULL, NULL);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ }
+ r = target_closepackagesdb(list->target);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue flood(struct distribution *d, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, architecture_t architecture, trackingdb tracks) {
+ struct target *t, *s;
+ retvalue result = RET_NOTHING, r;
+ struct trackingdata trackingdata;
+
+ if (tracks != NULL) {
+ r = trackingdata_new(tracks, &trackingdata);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ struct floodlist *fl = NULL;
+
+ if (atom_defined(architecture)) {
+ if (architecture != t->architecture)
+ continue;
+ } else if (limitations_missed(architectures,
+ t->architecture))
+ continue;
+ if (limitations_missed(components, t->component))
+ continue;
+ if (limitations_missed(packagetypes, t->packagetype))
+ continue;
+ if (t->packagetype != pt_deb && t->packagetype != pt_udeb)
+ continue;
+
+ r = floodlist_initialize(&fl, t);
+ if (RET_WAS_ERROR(r)) {
+ if (tracks != NULL)
+ trackingdata_done(&trackingdata);
+ return r;
+ }
+
+ for (s = d->targets ; s != NULL ; s = s->next) {
+ if (s->component != t->component)
+ continue;
+ if (s->packagetype != t->packagetype)
+ continue;
+ /* no need to copy things from myself: */
+ if (s->architecture == t->architecture)
+ continue;
+ if (limitations_missed(architectures,
+ s->architecture))
+ continue;
+ r = floodlist_pull(fl, s);
+ RET_UPDATE(d->status, r);
+ if (RET_WAS_ERROR(r)) {
+ if (tracks != NULL)
+ trackingdata_done(&trackingdata);
+ floodlist_free(fl);
+ return r;
+ }
+ }
+ r = floodlist_install(fl, d->logger,
+ (tracks != NULL)?&trackingdata:NULL);
+ RET_UPDATE(result, r);
+ floodlist_free(fl);
+ if (RET_WAS_ERROR(r)) {
+ if (tracks != NULL)
+ trackingdata_done(&trackingdata);
+ return r;
+ }
+ }
+ if (tracks != NULL) {
+ r = trackingdata_finish(tracks, &trackingdata);
+ RET_ENDUPDATE(result, r);
+ }
+ return result;
+}
diff --git a/archallflood.h b/archallflood.h
new file mode 100644
index 0000000..7d5bb04
--- /dev/null
+++ b/archallflood.h
@@ -0,0 +1,6 @@
+#ifndef REPREPRO_ARCHALLFLOOD_H
+#define REPREPRO_ARCHALLFLOOD_H
+
+retvalue flood(struct distribution *, /*@null@*/const struct atomlist * /*components*/, /*@NULL@*/const struct atomlist * /*architectures*/, /*@NULL@*/const struct atomlist * /*packagetypes*/, architecture_t, trackingdb);
+
+#endif
diff --git a/atoms.c b/atoms.c
new file mode 100644
index 0000000..c6df795
--- /dev/null
+++ b/atoms.c
@@ -0,0 +1,398 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008,2009 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "atoms.h"
+
+const char **atoms_architectures;
+const char **atoms_components;
+static const char * const packagetypes[5] = { "!!NONE!!", "dsc", "deb", "udeb", "ddeb" };
+const char **atoms_packagetypes = (const char **)&packagetypes;
+const char **atoms_commands;
+static int command_count;
+static const char * const types[4] = {
+ "architecture", "component", "packagetype", "command"
+};
+const char **atomtypes = (const char **)types;
+
+/* trivial implementation for now, perhaps make it more complicated later */
+static struct strlist architectures, components;
+
+retvalue atoms_init(int count) {
+ retvalue r;
+ strlist_init(&architectures);
+ strlist_init(&components);
+
+ /* add a 0th entry to all, so 0 means uninitialized */
+
+ r = strlist_add_dup(&architectures, "!!NONE!!");
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = strlist_add_dup(&architectures, "source");
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = strlist_add_dup(&architectures, "all");
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = strlist_add_dup(&components, "!!NONE!!");
+ if (RET_WAS_ERROR(r))
+ return r;
+ /* a fallback component to put things without a component in */
+ r = strlist_add_dup(&components, "strange");
+ if (RET_WAS_ERROR(r))
+ return r;
+ atoms_components = (const char**)components.values;
+ atoms_architectures = (const char**)architectures.values;
+ command_count = count;
+ if (command_count > 0) {
+ atoms_commands = nzNEW(command_count + 1, const char*);
+ if (FAILEDTOALLOC(atoms_commands))
+ return RET_ERROR_OOM;
+ }
+ return RET_OK;
+}
+
+retvalue architecture_intern(const char *value, architecture_t *atom_p) {
+ retvalue r;
+ int i;
+
+ i = strlist_ofs(&architectures, value);
+ if (i >= 0) {
+ *atom_p = (architecture_t)i;
+ return RET_OK;
+ }
+ i = architectures.count;
+ r = strlist_add_dup(&architectures, value);
+ atoms_architectures = (const char**)architectures.values;
+ if (RET_IS_OK(r)) {
+ *atom_p = (architecture_t)i;
+ return RET_OK;
+ } else
+ return r;
+}
+retvalue component_intern(const char *value, component_t *atom_p) {
+ retvalue r;
+ int i;
+
+ i = strlist_ofs(&components, value);
+ if (i >= 0) {
+ *atom_p = (component_t)i;
+ return RET_OK;
+ }
+ i = components.count;
+ r = strlist_add_dup(&components, value);
+ atoms_components = (const char**)components.values;
+ if (RET_IS_OK(r)) {
+ *atom_p = (component_t)i;
+ return RET_OK;
+ } else
+ return r;
+}
+
+architecture_t architecture_find(const char *value) {
+ int i = strlist_ofs(&architectures, value);
+ if (i < 0)
+ return atom_unknown;
+ else
+ return (architecture_t)i;
+}
+
+architecture_t architecture_find_l(const char *value, size_t l) {
+ architecture_t a;
+
+ for (a = architectures.count - 1 ; a > 0 ; a--) {
+ const char *name = atoms_architectures[a];
+ size_t len = strlen(name);
+
+ if (len == l && memcmp(name, value, len) == 0)
+ return a;
+ }
+ return atom_unknown;
+}
+
+// TODO: this might be called a lot, perhaps optimize it...
+component_t component_find_l(const char *value, size_t l) {
+ component_t a;
+
+ for (a = components.count - 1 ; a > 0 ; a--) {
+ const char *name = atoms_components[a];
+ size_t len = strlen(name);
+
+ if (len == l && memcmp(name, value, len) == 0)
+ return a;
+ }
+ return atom_unknown;
+}
+
+component_t component_find(const char *value) {
+ int i = strlist_ofs(&components, value);
+ if (i < 0)
+ return atom_unknown;
+ else
+ return (architecture_t)i;
+}
+
+packagetype_t packagetype_find(const char *value) {
+ if (strcmp(value, "dsc") == 0)
+ return pt_dsc;
+ else if (strcmp(value, "deb") == 0)
+ return pt_deb;
+ else if (strcmp(value, "udeb") == 0)
+ return pt_udeb;
+ else if (strcmp(value, "ddeb") == 0)
+ return pt_ddeb;
+ else
+ return atom_unknown;
+}
+
+packagetype_t packagetype_find_l(const char *value, size_t len) {
+ if (len == 3) {
+ if (strncmp(value, "dsc", 3) == 0)
+ return pt_dsc;
+ else if (strncmp(value, "deb", 3) == 0)
+ return pt_deb;
+ } else if (len == 4) {
+ if (strncmp(value, "udeb", 4) == 0)
+ return pt_udeb;
+ else if (strncmp(value, "ddeb", 4) == 0)
+ return pt_ddeb;
+ }
+ return atom_unknown;
+}
+
+static inline command_t command_find(const char *value) {
+ command_t c;
+
+ for (c = command_count ; c > 0 ; c--) {
+ if (strcmp(atoms_commands[c], value) == 0)
+ return c;
+ }
+ return atom_unknown;
+}
+
+atom_t atom_find(enum atom_type type, const char *value) {
+ switch (type) {
+ case at_packagetype:
+ return packagetype_find(value);
+ case at_architecture:
+ return architecture_find(value);
+ case at_component:
+ return component_find(value);
+ case at_command:
+ return command_find(value);
+ default:
+ return atom_unknown;
+ }
+}
+
+retvalue atom_intern(enum atom_type type, const char *value, atom_t *atom_p) {
+ assert (type == at_architecture || type == at_component);
+ switch (type) {
+ case at_architecture:
+ return architecture_intern(value, atom_p);
+ case at_component:
+ return component_intern(value, atom_p);
+ default:
+ return RET_ERROR;
+ }
+}
+
+void atomlist_init(struct atomlist *list) {
+ list->count = 0; list->size = 0;
+ list->atoms = NULL;
+}
+
+void atomlist_done(struct atomlist *list) {
+ if (list->size > 0) {
+ assert (list->atoms != NULL);
+ free(list->atoms);
+ }
+ /* reset atoms but not size, so reuse can be caught */
+ list->atoms = NULL;
+}
+
+/* add a atom uniquely (perhaps sorted), RET_NOTHING when already there */
+retvalue atomlist_add_uniq(struct atomlist *list, atom_t atom) {
+ int i;
+ atom_t *n;
+
+ assert (atom_defined(atom));
+
+ for (i = 0 ; i < list->count ; i++) {
+ if (list->atoms[i] == atom)
+ return RET_NOTHING;
+ }
+ if (list->size <= list->count) {
+ n = realloc(list->atoms, (sizeof(atom_t))*(list->count + 8));
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ list->size = list->count + 8;
+ list->atoms = n;
+ }
+ list->atoms[list->count++] = atom;
+ return RET_OK;
+}
+
+retvalue atomlist_add(struct atomlist *list, atom_t atom) {
+ atom_t *n;
+
+ assert (atom_defined(atom));
+
+ if (list->size <= list->count) {
+ n = realloc(list->atoms, (sizeof(atom_t))*(list->count + 8));
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ list->size = list->count + 8;
+ list->atoms = n;
+ }
+ list->atoms[list->count++] = atom;
+ return RET_OK;
+}
+
+/* replace the contents of dest with those from orig, which get emptied */
+void atomlist_move(struct atomlist *dest, struct atomlist *orig) {
+ dest->atoms = orig->atoms;
+ dest->count = orig->count;
+ dest->size = orig->size;
+ /* reset atoms but not size, so reuse can be caught */
+ orig->atoms = NULL;
+}
+
+bool atomlist_hasexcept(const struct atomlist *list, atom_t atom) {
+ int i;
+
+ for (i = 0 ; i < list->count ; i++) {
+ if (list->atoms[i] != atom)
+ return true;
+ }
+ return false;
+}
+
+bool atomlist_in(const struct atomlist *list, atom_t atom) {
+ int i;
+
+ for (i = 0 ; i < list->count ; i++) {
+ if (list->atoms[i] == atom)
+ return true;
+ }
+ return false;
+}
+int atomlist_ofs(const struct atomlist *list, atom_t atom) {
+ int i;
+
+ for (i = 0 ; i < list->count ; i++) {
+ if (list->atoms[i] == atom)
+ return i;
+ }
+ return -1;
+}
+
+bool atomlist_subset(const struct atomlist *list, const struct atomlist *subset, atom_t *missing) {
+ int i, j;
+
+ for (j = 0 ; j < subset->count ; j++) {
+ atom_t atom = subset->atoms[j];
+
+ for (i = 0 ; i < list->count ; i++) {
+ if (list->atoms[i] == atom)
+ break;
+ }
+ if (i >= list->count) {
+ if (missing != NULL)
+ *missing = atom;
+ return false;
+ }
+ }
+ return true;
+}
+
+retvalue atomlist_fprint(FILE *file, enum atom_type type, const struct atomlist *list) {
+ const char **atoms = NULL;
+ int c;
+ atom_t *p;
+ retvalue result;
+
+ assert(list != NULL);
+ assert(file != NULL);
+
+ switch (type) {
+ case at_architecture:
+ atoms = atoms_architectures;
+ break;
+ case at_component:
+ atoms = atoms_components;
+ break;
+ case at_packagetype:
+ atoms = atoms_packagetypes;
+ break;
+ case at_command:
+ atoms = atoms_commands;
+ break;
+ }
+ assert(atoms != NULL);
+
+ c = list->count;
+ p = list->atoms;
+ result = RET_OK;
+ while (c > 0) {
+ if (fputs(atoms[*(p++)], file) == EOF)
+ result = RET_ERROR;
+ if (--c > 0 && fputc(' ', file) == EOF)
+ result = RET_ERROR;
+ }
+ return result;
+}
+
+component_t components_count(void) {
+ return components.count;
+}
+
+retvalue atomlist_filllist(enum atom_type type, struct atomlist *list, char *string, const char **missing) {
+ struct atomlist l;
+ char *e;
+ retvalue r;
+ atom_t a;
+
+ atomlist_init(&l);
+ while (*string != '\0') {
+ e = strchr(string, '|');
+ if (e == NULL)
+ e = strchr(string, '\0');
+ else
+ *(e++) = '\0';
+ a = atom_find(type, string);
+ if (!atom_defined(a)) {
+ atomlist_done(&l);
+ *missing = string;
+ return RET_NOTHING;
+ }
+ r = atomlist_add(&l, a);
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&l);
+ return r;
+ }
+ string = e;
+ }
+ atomlist_move(list, &l);
+ return RET_OK;
+}
diff --git a/atoms.h b/atoms.h
new file mode 100644
index 0000000..660adb4
--- /dev/null
+++ b/atoms.h
@@ -0,0 +1,75 @@
+#ifndef REPREPRO_ATOMS_H
+#define REPREPRO_ATOMS_H
+
+typedef int atom_t;
+typedef atom_t architecture_t;
+typedef atom_t component_t;
+typedef atom_t packagetype_t;
+typedef atom_t command_t;
+
+enum atom_type { at_architecture, at_component, at_packagetype, at_command };
+
+#define atom_unknown ((atom_t)0)
+
+#define architecture_source ((architecture_t)1)
+#define architecture_all ((architecture_t)2)
+
+#define component_strange ((component_t)1)
+
+#define pt_dsc ((packagetype_t)1)
+#define pt_deb ((packagetype_t)2)
+#define pt_udeb ((packagetype_t)3)
+#define pt_ddeb ((packagetype_t)4)
+
+#define atom_defined(a) ((a) > (atom_t)0)
+
+extern const char **atomtypes, **atoms_architectures, **atoms_components, **atoms_packagetypes, **atoms_commands;
+
+retvalue atoms_init(int command_count);
+
+retvalue architecture_intern(const char *, /*@out@*/architecture_t *);
+architecture_t architecture_find(const char *);
+architecture_t architecture_find_l(const char *, size_t);
+retvalue component_intern(const char *, /*@out@*/component_t *);
+component_t component_find(const char *);
+component_t component_find_l(const char *, size_t);
+component_t components_count(void);
+packagetype_t packagetype_find(const char *);
+packagetype_t packagetype_find_l(const char *, size_t);
+
+atom_t atom_find(enum atom_type, const char *);
+retvalue atom_intern(enum atom_type, const char *, /*@out@*/atom_t *);
+
+#define limitation_missed(a, b) ((atom_defined(a) && (a) != (b)))
+#define limitations_missed(a, b) ((a) != NULL && !atomlist_in(a, b))
+
+
+struct atomlist {
+ atom_t *atoms;
+ int count, size;
+};
+
+void atomlist_init(/*@out@*/struct atomlist *);
+void atomlist_done(/*@special@*/struct atomlist *atomlist) /*@releases atomlist->values @*/;
+
+/* add a atom uniquely (not sorted, component guessing might not like it),
+ * RET_NOTHING when already there */
+retvalue atomlist_add_uniq(struct atomlist *, atom_t);
+/* always add to the end */
+retvalue atomlist_add(struct atomlist *, atom_t);
+
+/* replace the contents of dest with those from orig, which get emptied */
+void atomlist_move(/*@out@*/struct atomlist *, /*@special@*/struct atomlist *orig) /*@releases orig->values @*/;
+
+bool atomlist_hasexcept(const struct atomlist *, atom_t);
+bool atomlist_in(const struct atomlist *, atom_t);
+int atomlist_ofs(const struct atomlist *, atom_t);
+
+/* if missing != NULL And subset no subset of atomlist, set *missing to the first missing one */
+bool atomlist_subset(const struct atomlist *, const struct atomlist * /*subset*/, /*@null@*/atom_t * /*missing*/ );
+
+/* print a space separated list of elements */
+retvalue atomlist_fprint(FILE *, enum atom_type, const struct atomlist *);
+
+retvalue atomlist_filllist(enum atom_type, /*@out@*/struct atomlist *, char * /*string*/, /*@out@*/const char ** /*missing*/);
+#endif
diff --git a/autogen.sh b/autogen.sh
new file mode 100755
index 0000000..1c2e51a
--- /dev/null
+++ b/autogen.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+set -e
+
+mkdir -p ac
+aclocal
+autoheader
+automake -a -c
+autoconf
+rm -rf autom4te.cache || true
+
+if [ $# -lt 1 ] ; then
+ exit 0
+fi
+
+if [ "x$1" = "x--configure" ] ; then
+ shift
+ repreprodir="`pwd`"
+ if [ $# -gt 0 ] ; then
+ mkdir -p -- "$1"
+ cd "$1" || exit 1
+ shift
+ fi
+ "$repreprodir"/configure --enable-maintainer-mode CFLAGS="-Wall -O2 -g -Wmissing-prototypes -Wstrict-prototypes -Wshadow -Wunused-parameter -Wsign-compare" CPPFLAGS="" "$@"
+else
+ echo "unsupported option $1" >&2
+ exit 1
+fi
diff --git a/binaries.c b/binaries.c
new file mode 100644
index 0000000..9465dfc
--- /dev/null
+++ b/binaries.c
@@ -0,0 +1,784 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2009,2010 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "chunks.h"
+#include "sources.h"
+#include "binaries.h"
+#include "names.h"
+#include "dpkgversions.h"
+#include "log.h"
+#include "override.h"
+#include "tracking.h"
+#include "debfile.h"
+#include "package.h"
+
+static const char * const deb_checksum_headers[cs_COUNT] = {
+ "MD5sum", "SHA1", "SHA256", "SHA512", "Size"};
+
+static char *calc_binary_basename(const char *name, const char *version, architecture_t arch, packagetype_t packagetype) {
+ const char *v;
+ assert (name != NULL);
+ assert (version != NULL);
+ assert (atom_defined(arch));
+ assert (atom_defined(packagetype));
+ v = strchr(version, ':');
+ if (v != NULL)
+ v++;
+ else
+ v = version;
+ return mprintf("%s_%s_%s.%s", name, v, atoms_architectures[arch],
+ atoms_packagetypes[packagetype]);
+}
+
+
+/* get checksums out of a "Packages"-chunk. */
+static retvalue binaries_parse_checksums(const char *chunk, /*@out@*/struct checksums **checksums_p) {
+ retvalue result, r;
+ char *checksums[cs_COUNT];
+ enum checksumtype type;
+ bool gothash = false;
+
+ result = RET_NOTHING;
+
+ for (type = 0 ; type < cs_COUNT ; type++) {
+ checksums[type] = NULL;
+ r = chunk_getvalue(chunk, deb_checksum_headers[type],
+ &checksums[type]);
+ if (type != cs_length && RET_IS_OK(r))
+ gothash = true;
+ RET_UPDATE(result, r);
+ }
+ if (!gothash) {
+ fprintf(stderr,
+"No checksums found in binary control chunk:\n '%s'\n",
+ chunk);
+ RET_UPDATE(result, RET_ERROR_MISSING);
+ }
+ if (checksums[cs_length] == NULL) {
+ fprintf(stderr,
+"Missing 'Size' line in binary control chunk:\n '%s'\n",
+ chunk);
+ RET_UPDATE(result, RET_ERROR_MISSING);
+ }
+ if (RET_WAS_ERROR(result)) {
+ for (type = 0 ; type < cs_COUNT ; type++)
+ free(checksums[type]);
+ return result;
+ }
+ return checksums_init(checksums_p, checksums);
+}
+
+retvalue binaries_getarchitecture(const char *chunk, architecture_t *architecture_p) {
+ char *parch;
+ retvalue r;
+
+ r = chunk_getvalue(chunk, "Architecture", &parch);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Internal Error: Missing Architecture: header in '%s'!\n",
+ chunk);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ *architecture_p = architecture_find(parch);
+ free(parch);
+
+ if (!atom_defined(*architecture_p)) {
+ fprintf(stderr,
+"Internal Error: Unexpected Architecture: header in '%s'!\n",
+ chunk);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+/* get somefields out of a "Packages.gz"-chunk.
+ * returns RET_OK on success, RET_NOTHING if incomplete, error otherwise */
+static retvalue binaries_calc_basename(struct package *package, /*@out@*/char **basename_p) {
+ retvalue r;
+ char *mybasename;
+
+ assert (package->name != NULL);
+ assert (package->version != NULL);
+
+ r = properpackagename(package->name);
+ if (!RET_WAS_ERROR(r))
+ r = properversion(package->version);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ mybasename = calc_binary_basename(package->name,
+ package->version,
+ package->architecture,
+ package->target->packagetype);
+ if (FAILEDTOALLOC(mybasename)) {
+ return RET_ERROR_OOM;
+ }
+
+ *basename_p = mybasename;
+ return RET_OK;
+}
+
+/* get files out of a "Packages.gz"-chunk. */
+retvalue binaries_getfilekeys(const char *chunk, struct strlist *files) {
+ retvalue r;
+ char *filename;
+
+ /* Read the filename given there */
+ r = chunk_getvalue(chunk, "Filename", &filename);
+ if (!RET_IS_OK(r)) {
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Data does not look like binary control: '%s'\n",
+ chunk);
+ r = RET_ERROR;
+ }
+ return r;
+ }
+ r = strlist_init_singleton(filename, files);
+ return r;
+}
+
+static retvalue calcfilekeys(component_t component, const char *sourcename, const char *basefilename, struct strlist *filekeys) {
+ char *filekey;
+ retvalue r;
+
+ r = propersourcename(sourcename);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ filekey = calc_filekey(component, sourcename, basefilename);
+ if (FAILEDTOALLOC(filekey))
+ return RET_ERROR_OOM;
+ r = strlist_init_singleton(filekey, filekeys);
+ return r;
+}
+
+static inline retvalue calcnewcontrol(const char *chunk, const char *packagename, const char *sourcename, const char *basefilename, component_t component, struct strlist *filekeys, char **newchunk) {
+ retvalue r;
+ char *n;
+
+
+ n = chunk_normalize(chunk, "Package", packagename);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ r = calcfilekeys(component, sourcename, basefilename, filekeys);
+ if (RET_WAS_ERROR(r)) {
+ free(n);
+ return r;
+ }
+
+ assert (filekeys->count == 1);
+ *newchunk = chunk_replacefield(n, "Filename",
+ filekeys->values[0], false);
+ free(n);
+ if (FAILEDTOALLOC(*newchunk)) {
+ strlist_done(filekeys);
+ return RET_ERROR_OOM;
+ }
+ return RET_OK;
+}
+
+retvalue binaries_getversion(const char *control, char **version) {
+ retvalue r;
+
+ r = chunk_getvalue(control, "Version", version);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Missing 'Version' field in chunk:'%s'\n",
+ control);
+ return RET_ERROR;
+ }
+ return r;
+}
+
+retvalue binaries_getinstalldata(const struct target *t, struct package *package, char **control, struct strlist *filekeys, struct checksumsarray *origfiles) {
+ char *basefilename;
+ struct checksumsarray origfilekeys;
+ retvalue r;
+ const char *chunk = package->control;
+
+ assert (t->packagetype == package->target->packagetype);
+
+ r = package_getsource(package);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = binaries_calc_basename(package, &basefilename);
+ if (RET_WAS_ERROR(r))
+ return RET_ERROR;
+ r = binaries_getchecksums(chunk, &origfilekeys);
+ if (RET_WAS_ERROR(r)) {
+ free(basefilename);
+ return r;
+ }
+
+ r = calcnewcontrol(chunk, package->name, package->source,
+ basefilename,
+ t->component, filekeys, control);
+ if (RET_WAS_ERROR(r)) {
+ checksumsarray_done(&origfilekeys);
+ } else {
+ assert (r != RET_NOTHING);
+ checksumsarray_move(origfiles, &origfilekeys);
+ }
+ free(basefilename);
+ return r;
+}
+
+retvalue binaries_getchecksums(const char *chunk, struct checksumsarray *filekeys) {
+ retvalue r;
+ struct checksumsarray a;
+
+ r = binaries_getfilekeys(chunk, &a.names);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (a.names.count == 1);
+ a.checksums = NEW(struct checksums *);
+ if (FAILEDTOALLOC(a.checksums)) {
+ strlist_done(&a.names);
+ return RET_ERROR_OOM;
+ }
+ r = binaries_parse_checksums(chunk, a.checksums);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ free(a.checksums);
+ strlist_done(&a.names);
+ return r;
+ }
+ checksumsarray_move(filekeys, &a);
+ return RET_OK;
+}
+
+retvalue binaries_doreoverride(const struct target *target, const char *packagename, const char *controlchunk, /*@out@*/char **newcontrolchunk) {
+ const struct overridedata *o;
+ struct fieldtoadd *fields;
+ char *newchunk;
+ retvalue r;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ o = override_search(target->distribution->overrides.deb, packagename);
+ if (o == NULL)
+ return RET_NOTHING;
+
+ r = override_allreplacefields(o, &fields);
+ if (!RET_IS_OK(r))
+ return r;
+ newchunk = chunk_replacefields(controlchunk, fields, "Filename", false);
+ addfield_free(fields);
+ if (FAILEDTOALLOC(newchunk))
+ return RET_ERROR_OOM;
+ *newcontrolchunk = newchunk;
+ return RET_OK;
+}
+
+retvalue ubinaries_doreoverride(const struct target *target, const char *packagename, const char *controlchunk, /*@out@*/char **newcontrolchunk) {
+ const struct overridedata *o;
+ struct fieldtoadd *fields;
+ char *newchunk;
+ retvalue r;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ o = override_search(target->distribution->overrides.udeb, packagename);
+ if (o == NULL)
+ return RET_NOTHING;
+
+ r = override_allreplacefields(o, &fields);
+ if (!RET_IS_OK(r))
+ return r;
+ newchunk = chunk_replacefields(controlchunk, fields, "Filename", true);
+ addfield_free(fields);
+ if (FAILEDTOALLOC(newchunk))
+ return RET_ERROR_OOM;
+ *newcontrolchunk = newchunk;
+ return RET_OK;
+}
+
+retvalue binaries_retrack(const char *packagename, const char *chunk, trackingdb tracks) {
+ retvalue r;
+ const char *sourcename;
+ char *fsourcename, *sourceversion, *arch, *filekey;
+ enum filetype filetype;
+ struct trackedpackage *pkg;
+
+ //TODO: eliminate duplicate code!
+ assert(packagename!=NULL);
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ /* is there a sourcename */
+ r = chunk_getnameandversion(chunk, "Source",
+ &fsourcename, &sourceversion);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ sourceversion = NULL;
+ sourcename = packagename;
+ fsourcename = NULL;
+ } else {
+ sourcename = fsourcename;
+ }
+ if (sourceversion == NULL) {
+ // Think about binNMUs, can something be done here?
+ r = chunk_getvalue(chunk, "Version", &sourceversion);
+ if (RET_WAS_ERROR(r)) {
+ free(fsourcename);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ free(fsourcename);
+ fprintf(stderr,
+"Missing 'Version' field in chunk:'%s'\n",
+ chunk);
+ return RET_ERROR;
+ }
+ }
+
+ r = chunk_getvalue(chunk, "Architecture", &arch);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "No Architecture field in chunk:'%s'\n",
+ chunk);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(sourceversion);
+ free(fsourcename);
+ return r;
+ }
+ if (strcmp(arch, "all") == 0) {
+ filetype = ft_ALL_BINARY;
+ } else {
+ filetype = ft_ARCH_BINARY;
+ }
+ free(arch);
+
+ r = chunk_getvalue(chunk, "Filename", &filekey);
+ if (!RET_IS_OK(r)) {
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "No Filename field in chunk: '%s'\n",
+ chunk);
+ r = RET_ERROR;
+ }
+ free(sourceversion);
+ free(fsourcename);
+ return r;
+ }
+ r = tracking_getornew(tracks, sourcename, sourceversion, &pkg);
+ free(fsourcename);
+ free(sourceversion);
+ if (RET_WAS_ERROR(r)) {
+ free(filekey);
+ return r;
+ }
+ assert (r != RET_NOTHING);
+ r = trackedpackage_addfilekey(tracks, pkg, filetype, filekey, true);
+ if (RET_WAS_ERROR(r)) {
+ trackedpackage_free(pkg);
+ return r;
+ }
+ return tracking_save(tracks, pkg);
+}
+
+retvalue binaries_getsourceandversion(const char *chunk, const char *packagename, char **source, char **version) {
+ retvalue r;
+ char *sourcename, *sourceversion;
+
+ //TODO: eliminate duplicate code!
+ assert(packagename!=NULL);
+
+ /* is there a sourcename */
+ r = chunk_getnameandversion(chunk, "Source",
+ &sourcename, &sourceversion);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ sourceversion = NULL;
+ sourcename = strdup(packagename);
+ if (FAILEDTOALLOC(sourcename))
+ return RET_ERROR_OOM;
+ }
+ if (sourceversion == NULL) {
+ r = chunk_getvalue(chunk, "Version", &sourceversion);
+ if (RET_WAS_ERROR(r)) {
+ free(sourcename);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ free(sourcename);
+ fprintf(stderr, "No Version field in chunk:'%s'\n",
+ chunk);
+ return RET_ERROR;
+ }
+ }
+ *source = sourcename;
+ *version = sourceversion;
+ return RET_OK;
+}
+
+static inline retvalue getvalue(const char *filename, const char *chunk, const char *field, char **value) {
+ retvalue r;
+
+ r = chunk_getvalue(chunk, field, value);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "No %s field in %s's control file!\n",
+ field, filename);
+ r = RET_ERROR;
+ }
+ return r;
+}
+
+static inline retvalue checkvalue(const char *filename, const char *chunk, const char *field) {
+ retvalue r;
+
+ r = chunk_checkfield(chunk, field);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "No %s field in %s's control file!\n",
+ field, filename);
+ r = RET_ERROR;
+ }
+ return r;
+}
+
+static inline retvalue getvalue_n(const char *chunk, const char *field, char **value) {
+ retvalue r;
+
+ r = chunk_getvalue(chunk, field, value);
+ if (r == RET_NOTHING) {
+ *value = NULL;
+ }
+ return r;
+}
+
+void binaries_debdone(struct deb_headers *pkg) {
+ free(pkg->name);free(pkg->version);
+ free(pkg->source);free(pkg->sourceversion);
+ free(pkg->control);
+ free(pkg->section);
+ free(pkg->priority);
+}
+
+retvalue binaries_readdeb(struct deb_headers *deb, const char *filename) {
+ retvalue r;
+ char *architecture;
+
+ r = extractcontrol(&deb->control, filename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ /* first look for fields that should be there */
+
+ r = chunk_getname(deb->control, "Package", &deb->name, false);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Package' field in %s!\n", filename);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = checkvalue(filename, deb->control, "Maintainer");
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = checkvalue(filename, deb->control, "Description");
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = getvalue(filename, deb->control, "Version", &deb->version);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = getvalue(filename, deb->control, "Architecture", &architecture);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = properfilenamepart(architecture);
+ if (RET_WAS_ERROR(r)) {
+ free(architecture);
+ return r;
+ }
+ r = architecture_intern(architecture, &deb->architecture);
+ free(architecture);
+ if (RET_WAS_ERROR(r))
+ return r;
+ /* can be there, otherwise we also know what it is */
+ r = chunk_getnameandversion(deb->control, "Source",
+ &deb->source, &deb->sourceversion);
+ if (r == RET_NOTHING) {
+ deb->source = strdup(deb->name);
+ if (FAILEDTOALLOC(deb->source))
+ r = RET_ERROR_OOM;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (deb->sourceversion == NULL) {
+ deb->sourceversion = strdup(deb->version);
+ if (FAILEDTOALLOC(deb->sourceversion))
+ return RET_ERROR_OOM;
+ }
+
+ /* normally there, but optional: */
+
+ r = getvalue_n(deb->control, PRIORITY_FIELDNAME, &deb->priority);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = getvalue_n(deb->control, SECTION_FIELDNAME, &deb->section);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+
+/* do overwrites, add Filename and Checksums to the control-item */
+retvalue binaries_complete(const struct deb_headers *pkg, const char *filekey, const struct checksums *checksums, const struct overridedata *override, const char *section, const char *priority, char **newcontrol) {
+ struct fieldtoadd *replace;
+ char *normalchunk, *newchunk;
+ enum checksumtype type;
+
+ assert (section != NULL && priority != NULL);
+ assert (filekey != NULL && checksums != NULL);
+
+ replace = NULL;
+ for (type = 0 ; type < cs_COUNT ; type++) {
+ const char *start;
+ size_t len;
+ if (checksums_getpart(checksums, type, &start, &len)) {
+ replace = addfield_newn(deb_checksum_headers[type],
+ start, len, replace);
+ if (FAILEDTOALLOC(replace))
+ return RET_ERROR_OOM;
+ }
+ }
+ replace = addfield_new("Filename", filekey, replace);
+ if (FAILEDTOALLOC(replace))
+ return RET_ERROR_OOM;
+ replace = addfield_new(SECTION_FIELDNAME, section, replace);
+ if (FAILEDTOALLOC(replace))
+ return RET_ERROR_OOM;
+ replace = addfield_new(PRIORITY_FIELDNAME, priority, replace);
+ if (FAILEDTOALLOC(replace))
+ return RET_ERROR_OOM;
+
+ replace = override_addreplacefields(override, replace);
+ if (FAILEDTOALLOC(replace))
+ return RET_ERROR_OOM;
+
+ normalchunk = chunk_normalize(pkg->control,
+ "Package", pkg->name);
+ if (FAILEDTOALLOC(normalchunk))
+ newchunk = NULL;
+ else
+ newchunk = chunk_replacefields(normalchunk, replace,
+ "Description", true);
+ free(normalchunk);
+ addfield_free(replace);
+ if (FAILEDTOALLOC(newchunk)) {
+ return RET_ERROR_OOM;
+ }
+
+ *newcontrol = newchunk;
+
+ return RET_OK;
+}
+
+/* update Checksums */
+retvalue binaries_complete_checksums(const char *chunk, const struct strlist *filekeys, struct checksums **c, char **out) {
+ struct fieldtoadd *replace;
+ char *newchunk;
+ enum checksumtype type;
+ const struct checksums *checksums;
+
+ assert (filekeys->count == 1);
+ checksums = c[0];
+
+ replace = NULL;
+ for (type = 0 ; type < cs_COUNT ; type++) {
+ const char *start;
+ size_t len;
+ if (checksums_getpart(checksums, type, &start, &len)) {
+ replace = addfield_newn(deb_checksum_headers[type],
+ start, len, replace);
+ if (FAILEDTOALLOC(replace))
+ return RET_ERROR_OOM;
+ }
+ }
+ newchunk = chunk_replacefields(chunk, replace,
+ "Description", true);
+ addfield_free(replace);
+ if (FAILEDTOALLOC(newchunk))
+ return RET_ERROR_OOM;
+ *out = newchunk;
+ return RET_OK;
+}
+
+retvalue binaries_adddeb(const struct deb_headers *deb, const struct atomlist *forcearchitectures, packagetype_t packagetype, struct distribution *distribution, struct trackingdata *trackingdata, component_t component, const struct strlist *filekeys, const char *control) {
+ retvalue r, result;
+ int i;
+
+ assert (logger_isprepared(distribution->logger));
+
+ /* finally put it into one or more architectures of the distribution */
+
+ result = RET_NOTHING;
+
+ if (deb->architecture != architecture_all) {
+ struct target *t = distribution_getpart(distribution,
+ component, deb->architecture,
+ packagetype);
+ r = target_initpackagesdb(t, READWRITE);
+ if (!RET_WAS_ERROR(r)) {
+ retvalue r2;
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_addpackage(t, distribution->logger,
+ deb->name, deb->version,
+ control,
+ filekeys,
+ false,
+ trackingdata,
+ deb->architecture,
+ NULL, NULL);
+ r2 = target_closepackagesdb(t);
+ RET_ENDUPDATE(r, r2);
+ }
+ RET_UPDATE(result, r);
+ RET_UPDATE(distribution->status, result);
+ return result;
+ }
+ /* It's an architecture all package */
+
+ /* if -A includes all, it is added everywhere, as if no -A was
+ * given. (as it behaved this way when there was only one -A possible,
+ * and to allow incoming to force it into architecture 'all' )
+ * */
+ if (forcearchitectures != NULL &&
+ atomlist_in(forcearchitectures, architecture_all))
+ forcearchitectures = NULL;
+
+ for (i = 0 ; i < distribution->architectures.count ; i++) {
+ /*@dependent@*/struct target *t;
+ architecture_t a = distribution->architectures.atoms[i];
+
+ if (a == architecture_source)
+ continue;
+ if (forcearchitectures != NULL &&
+ !atomlist_in(forcearchitectures, a))
+ continue;
+ t = distribution_getpart(distribution,
+ component, a, packagetype);
+ r = target_initpackagesdb(t, READWRITE);
+ if (!RET_WAS_ERROR(r)) {
+ retvalue r2;
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_addpackage(t, distribution->logger,
+ deb->name, deb->version,
+ control,
+ filekeys,
+ false,
+ trackingdata,
+ deb->architecture,
+ NULL, NULL);
+ r2 = target_closepackagesdb(t);
+ RET_ENDUPDATE(r, r2);
+ }
+ RET_UPDATE(result, r);
+ }
+ RET_UPDATE(distribution->status, result);
+ return result;
+}
+
+static inline retvalue checkadddeb(struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype, bool tracking, const struct deb_headers *deb, bool permitnewerold) {
+ retvalue r;
+ struct target *t;
+
+ t = distribution_getpart(distribution,
+ component, architecture, packagetype);
+ assert (t != NULL);
+ r = target_initpackagesdb(t, READONLY);
+ if (!RET_WAS_ERROR(r)) {
+ retvalue r2;
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_checkaddpackage(t,
+ deb->name, deb->version,
+ tracking,
+ permitnewerold);
+ r2 = target_closepackagesdb(t);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+}
+
+retvalue binaries_checkadddeb(const struct deb_headers *deb, architecture_t forcearchitecture, packagetype_t packagetype, struct distribution *distribution, bool tracking, component_t component, bool permitnewerold) {
+ retvalue r, result;
+ int i;
+
+ /* finally put it into one or more architectures of the distribution */
+ result = RET_NOTHING;
+
+ if (deb->architecture != architecture_all) {
+ r = checkadddeb(distribution,
+ component, deb->architecture, packagetype,
+ tracking, deb, permitnewerold);
+ RET_UPDATE(result, r);
+ } else if (atom_defined(forcearchitecture)
+ && forcearchitecture != architecture_all) {
+ r = checkadddeb(distribution,
+ component, forcearchitecture, packagetype,
+ tracking, deb, permitnewerold);
+ RET_UPDATE(result, r);
+ } else for (i = 0 ; i < distribution->architectures.count ; i++) {
+ architecture_t a = distribution->architectures.atoms[i];
+ if (a == architecture_source)
+ continue;
+ r = checkadddeb(distribution,
+ component, a, packagetype,
+ tracking, deb, permitnewerold);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+retvalue binaries_calcfilekeys(component_t component, const struct deb_headers *deb, packagetype_t packagetype, struct strlist *filekeys) {
+ retvalue r;
+ char *basefilename;
+
+ basefilename = calc_binary_basename(deb->name, deb->version,
+ deb->architecture, packagetype);
+ if (FAILEDTOALLOC(basefilename))
+ return RET_ERROR_OOM;
+
+ r = calcfilekeys(component, deb->source, basefilename, filekeys);
+ free(basefilename);
+ return r;
+}
diff --git a/binaries.h b/binaries.h
new file mode 100644
index 0000000..dcae019
--- /dev/null
+++ b/binaries.h
@@ -0,0 +1,57 @@
+#ifndef REPREPRO_BINARIES_H
+#define REPREPRO_BINARIES_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_TARGET_H
+#include "target.h"
+#endif
+#ifndef REPREPRO_CHECKSUMS_H
+#include "checksums.h"
+#endif
+
+
+/* Functions for the target.h-stuff: */
+get_version binaries_getversion;
+get_installdata binaries_getinstalldata;
+get_architecture binaries_getarchitecture;
+get_filekeys binaries_getfilekeys;
+get_checksums binaries_getchecksums;
+do_reoverride binaries_doreoverride;
+do_reoverride ubinaries_doreoverride;
+do_retrack binaries_retrack;
+get_sourceandversion binaries_getsourceandversion;
+complete_checksums binaries_complete_checksums;
+
+/* Functions for checkindeb.c and incoming.c: */
+
+struct deb_headers {
+ char *name, *version;
+ char *source;
+ architecture_t architecture;
+ char *control;
+ /* only extracted when requested: */
+ /*@null@*/char *sourceversion;
+ /* optional fields: */
+ /*@null@*/char *section;
+ /*@null@*/char *priority;
+};
+
+/* read contents of filename into deb_headers.
+ * - does not follow retvalue conventions, some fields may be set even when
+ * error returned
+ * - no checks for sanity of values, left to the caller */
+
+retvalue binaries_readdeb(struct deb_headers *, const char *filename);
+void binaries_debdone(struct deb_headers *);
+
+retvalue binaries_calcfilekeys(component_t, const struct deb_headers *, packagetype_t, /*@out@*/struct strlist *);
+
+struct overridedata;
+retvalue binaries_complete(const struct deb_headers *, const char * /*filekey*/, const struct checksums *, const struct overridedata *, const char * /*section*/, const char * /*priority*/, char **/*newcontrol_p*/);
+
+retvalue binaries_adddeb(const struct deb_headers *, const struct atomlist */*forcedarchitectures*/, packagetype_t, struct distribution *, /*@null@*/struct trackingdata *, component_t, const struct strlist */*filekeys*/, const char */*control*/);
+retvalue binaries_checkadddeb(const struct deb_headers *, architecture_t /*forcearchitecture*/, packagetype_t, struct distribution *, bool tracking, component_t, bool /*permitnewerold*/);
+#endif
diff --git a/byhandhook.c b/byhandhook.c
new file mode 100644
index 0000000..c4f18fd
--- /dev/null
+++ b/byhandhook.c
@@ -0,0 +1,241 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2010 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "error.h"
+#include "filecntl.h"
+#include "names.h"
+#include "configparser.h"
+#include "globmatch.h"
+#include "hooks.h"
+#include "byhandhook.h"
+
+struct byhandhook {
+ /*@null@*/struct byhandhook *next;
+ char *sectionglob;
+ char *priorityglob;
+ char *filenameglob;
+ char *script;
+};
+
+void byhandhooks_free(struct byhandhook *l) {
+ while (l != NULL) {
+ /*@null@*/struct byhandhook *n = l->next;
+
+ free(l->sectionglob);
+ free(l->priorityglob);
+ free(l->filenameglob);
+ free(l->script);
+ free(l);
+ l = n;
+ }
+}
+
+retvalue byhandhooks_parse(struct configiterator *iter, struct byhandhook **hooks_p) {
+ retvalue r;
+ char *v;
+ struct byhandhook *h, *hooks = NULL, **nexthook_p = &hooks;
+
+ r = config_getwordinline(iter, &v);
+ if (RET_IS_OK(r)) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: unexpected input '%s'"
+" (each hook must be in its own line)!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ v);
+ free(v);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ while (config_nextline(iter)) {
+ r = config_getwordinline(iter, &v);
+ if (r == RET_NOTHING)
+ continue;
+ if (RET_WAS_ERROR(r))
+ break;
+ h = zNEW(struct byhandhook);
+ if (FAILEDTOALLOC(h)) {
+ r = RET_ERROR_OOM;
+ break;
+ }
+ *nexthook_p = h;
+ nexthook_p = &h->next;
+ h->sectionglob = v;
+ r = config_getwordinline(iter, &v);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: each byhand hooks needs 4 arguments, found only 1!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter));
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+ h->priorityglob = v;
+ r = config_getwordinline(iter, &v);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: each byhand hooks needs 4 arguments, found only 2!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter));
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+ h->filenameglob = v;
+ r = config_getwordinline(iter, &v);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: each byhand hooks needs 4 arguments, found only 2!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter));
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+ assert (v != NULL && v[0] != '\0'); \
+ h->script = configfile_expandname(v, v);
+ if (FAILEDTOALLOC(h->script)) {
+ r = RET_ERROR_OOM;
+ break;
+ }
+ r = config_getwordinline(iter, &v);
+ if (RET_IS_OK(r)) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: each byhand hooks needs exactly 4 arguments, but there are more (first unexpected: '%s'!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter), v);
+ free(v);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ if (RET_WAS_ERROR(r)) {
+ byhandhooks_free(hooks);
+ return r;
+ }
+ *hooks_p = hooks;
+ return RET_OK;
+}
+
+bool byhandhooks_matched(const struct byhandhook *list, const struct byhandhook **touse, const char *section, const char *priority, const char *filename) {
+ const struct byhandhook *h;
+
+ /* for each file the first matching hook is called
+ * it might later be extended to allow multiple with some keywords */
+ if (*touse != NULL)
+ /* if ((*touse)->nonexclusive) list = (*touse)->next ; else */
+ return false;
+ for (h = list ; h != NULL ; h = h->next) {
+ if (!globmatch(section, h->sectionglob))
+ continue;
+ if (!globmatch(priority, h->priorityglob))
+ continue;
+ if (!globmatch(filename, h->filenameglob))
+ continue;
+ *touse = h;
+ return true;
+ }
+ return false;
+}
+
+retvalue byhandhook_call(const struct byhandhook *h, const char *codename, const char *section, const char *priority, const char *name, const char *fullfilename) {
+ pid_t child;
+
+ child = fork();
+ if (child == 0) {
+ /* Try to close all open fd but 0,1,2 */
+ closefrom(3);
+ sethookenvironment(causingfile, NULL, NULL, NULL);
+ (void)execl(h->script, h->script, codename,
+ section, priority, name,
+ fullfilename, (char*)NULL);
+ {
+ int e = errno;
+ fprintf(stderr, "Error %d executing '%s': %s\n",
+ e, h->script,
+ strerror(e));
+ }
+ _exit(255);
+ }
+ if (child < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d forking: %s!\n", e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ while (true) {
+ int status;
+ pid_t pid;
+
+ pid = waitpid(child, &status, 0);
+ if (pid == child) {
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) == 0) {
+ return RET_OK;
+ }
+ fprintf(stderr,
+"Byhandhook '%s' '%s' '%s' '%s' '%s' '%s' failed with exit code %d!\n",
+ h->script, codename,
+ section, priority, name,
+ fullfilename,
+ (int)(WEXITSTATUS(status)));
+ } else if (WIFSIGNALED(status)) {
+ fprintf(stderr,
+"Byhandhook '%s' '%s' '%s' '%s' '%s' '%s' killed by signal %d!\n",
+ h->script, codename,
+ section, priority, name,
+ fullfilename,
+ (int)(WTERMSIG(status)));
+ } else {
+ fprintf(stderr,
+"Byhandhook '%s' '%s' '%s' '%s' '%s' '%s' failed!\n",
+ h->script, codename,
+ section, priority, name,
+ fullfilename);
+ }
+ return RET_ERROR;
+ } else if (pid == (pid_t)-1) {
+ int e = errno;
+
+ if (e == EINTR)
+ continue;
+ fprintf(stderr,
+"Error %d calling waitpid on byhandhook child: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ }
+ /* NOT REACHED */
+}
+
diff --git a/byhandhook.h b/byhandhook.h
new file mode 100644
index 0000000..dd72e02
--- /dev/null
+++ b/byhandhook.h
@@ -0,0 +1,26 @@
+#ifndef REPREPRO_BYHANDHOOK_H
+#define REPREPRO_BYHANDHOOK_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+
+struct byhandhook;
+
+retvalue byhandhooks_parse(struct configiterator *, /*@out@*/struct byhandhook **);
+
+/* 2nd argument starts as NULL, returns true as long as there are hooks */
+bool byhandhooks_matched(const struct byhandhook *, const struct byhandhook **, const char * /*section*/, const char * /*priority*/, const char * /*name*/);
+
+retvalue byhandhook_call(const struct byhandhook *, const char * /*codename*/, const char * /*section*/, const char * /*priority*/, const char * /*basename*/, const char * /*fullfilename*/);
+
+void byhandhooks_free(/*@null@*//*@only@*/struct byhandhook *);
+
+#endif
+
diff --git a/changes.c b/changes.c
new file mode 100644
index 0000000..8b4b5c8
--- /dev/null
+++ b/changes.c
@@ -0,0 +1,335 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2008 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include "error.h"
+#include "names.h"
+#include "uncompression.h"
+#include "checksums.h"
+#include "changes.h"
+
+retvalue changes_parsefileline(const char *fileline, /*@out@*/filetype *result_type, /*@out@*/char **result_basename, /*@out@*/struct hash_data *hash_p, /*@out@*/struct hash_data *size_p, /*@out@*/char **result_section, /*@out@*/char **result_priority, /*@out@*/architecture_t *result_architecture, /*@out@*/char **result_name) {
+
+ const char *p, *md5start, *md5end;
+ const char *sizestart, *sizeend;
+ const char *sectionstart, *sectionend;
+ const char *priostart, *prioend;
+ const char *filestart, *nameend, *fileend;
+ const char *archstart, *archend;
+ const char *versionstart;
+ filetype type;
+ char *section, *priority, *basefilename, *name;
+ architecture_t architecture;
+ size_t l;
+ bool checkfilename = false;
+
+ p = fileline;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ md5start = p;
+ while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f'))
+ p++;
+ if (*p == '\0') {
+ fprintf(stderr, "Missing md5sum in '%s'!\n", fileline);
+ return RET_ERROR;
+ }
+ if (!xisspace(*p)) {
+ fprintf(stderr, "Malformed md5 hash in '%s'!\n", fileline);
+ return RET_ERROR;
+ }
+ md5end = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ while (*p == '0' && p[1] >= '0' && p[1] <= '9')
+ p++;
+ sizestart = p;
+ while (*p >= '0' && *p <= '9')
+ p++;
+ if (*p == '\0') {
+ fprintf(stderr,
+"Missing size (second argument) in '%s'!\n", fileline);
+ return RET_ERROR;
+ }
+ if (!xisspace(*p)) {
+ fprintf(stderr,
+"Malformed size (second argument) in '%s'!\n", fileline);
+ return RET_ERROR;
+ }
+ sizeend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ sectionstart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ sectionend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ priostart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ prioend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ filestart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ fileend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ if (*p != '\0') {
+ fprintf(stderr,
+"Unexpected sixth argument in '%s'!\n", fileline);
+ return RET_ERROR;
+ }
+ if (*md5start == '\0' || *sizestart == '\0' || *sectionstart == '\0'
+ || *priostart == '\0' || *filestart == '\0') {
+ fprintf(stderr,
+"Wrong number of arguments in '%s' (5 expected)!\n",
+ fileline);
+ return RET_ERROR;
+ }
+ if ((sectionend - sectionstart == 6 &&
+ strncmp(sectionstart, "byhand", 6) == 0) ||
+ (sectionend - sectionstart > 4 &&
+ strncmp(sectionstart, "raw-", 4) == 0)) {
+ section = strndup(sectionstart, sectionend - sectionstart);
+ priority = strndup(priostart, prioend - priostart);
+ basefilename = strndup(filestart, fileend - filestart);
+ if (FAILEDTOALLOC(section) || FAILEDTOALLOC(priority) ||
+ FAILEDTOALLOC(basefilename)) {
+ free(section); free(priority);
+ free(basefilename);
+ return RET_ERROR_OOM;
+ }
+ hash_p->start = md5start;
+ hash_p->len = md5end - md5start;
+ size_p->start = sizestart;
+ size_p->len = sizeend - sizestart;
+ *result_section = section;
+ *result_priority = priority;
+ *result_basename = basefilename;
+ *result_architecture = atom_unknown;
+ *result_name = NULL;
+ *result_type = fe_BYHAND;
+ return RET_OK;
+ }
+
+ p = filestart;
+ while (*p != '\0' && *p != '_' && !xisspace(*p))
+ p++;
+ if (*p != '_') {
+ if (*p == '\0')
+ fprintf(stderr,
+"No underscore found in file name in '%s'!\n",
+ fileline);
+ else
+ fprintf(stderr,
+"Unexpected character '%c' in file name in '%s'!\n",
+ *p, fileline);
+ return RET_ERROR;
+ }
+ nameend = p;
+ p++;
+ versionstart = p;
+
+ /* changing 3.0 format to now also allow _ in source files
+ * makes this parsing quite more ugly... */
+
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ l = p - versionstart;
+
+ /* identify the binary types (they have no compression
+ * and will need a _ */
+
+ if (l >= 4 && memcmp(p-4, ".deb", 4) == 0)
+ type = fe_DEB;
+ else if (l >= 5 && memcmp(p-5, ".ddeb", 5) == 0)
+ type = fe_DDEB;
+ else if (l >= 5 && memcmp(p-5, ".udeb", 5) == 0)
+ type = fe_UDEB;
+ else
+ type = fe_UNKNOWN;
+
+ if (type != fe_UNKNOWN) {
+ /* a _ should separate the version from the rest */
+ p = versionstart;
+ names_overversion(&p, true);
+ if (*p != '\0' && *p != '_') {
+ fprintf(stderr,
+"Unexpected character '%c' in file name within '%s'!\n", *p, fileline);
+ return RET_ERROR;
+ }
+ if (*p != '_') {
+ fprintf(stderr,
+"Cannot cope with .[u]deb filename not containing an underscore (in '%s')!",
+ fileline);
+ return RET_ERROR;
+ }
+ p++;
+ archstart = p;
+ if (type == fe_DEB)
+ archend = versionstart + l - 4;
+ else {
+ assert (type == fe_DDEB || type == fe_UDEB);
+ archend = versionstart + l - 5;
+ }
+ if (archend - archstart == 6 &&
+ strncmp(archstart, "source", 6) == 0) {
+ fprintf(stderr,
+"Architecture 'source' not allowed for .[u]debs ('%s')!\n", filestart);
+ return RET_ERROR;
+ }
+ } else {
+ enum compression c;
+ const char *eoi;
+ bool issignature = false;
+
+ /* without those, it gets more complicated.
+ * It's not .deb, .ddeb or .udeb, so most likely a
+ * source file (or perhaps a log (reprepro extension)) */
+
+ /* if it uses a known compression, things are easy,
+ * so try this first: */
+
+ if (l > 4 && memcmp(versionstart + l - 4, ".asc", 4) == 0 ) {
+ issignature = true;
+ l -= 4;
+ }
+ c = compression_by_suffix(versionstart, &l);
+ p = versionstart + l;
+
+ archstart = "source";
+ archend = archstart + 6;
+ if (l > 9 && strncmp(p-9, ".orig.tar", 9) == 0) {
+ type = fe_ORIG;
+ eoi = p - 9;
+ } else if (l > 4 && strncmp(p-4, ".tar", 4) == 0) {
+ type = fe_TAR;
+ eoi = p - 4;
+ } else if (issignature) {
+ /* only .tar.* files are allowed to have .asc files: */
+ issignature = false;
+ } else if (l > 5 && strncmp(p-5, ".diff", 5) == 0) {
+ type = fe_DIFF;
+ eoi = p - 5;
+ } else if (l > 4 && strncmp(p-4, ".dsc", 4) == 0
+ && c == c_none) {
+ type = fe_DSC;
+ eoi = p - 4;
+ } else if (l > 4 && strncmp(p-4, ".git", 4) == 0
+ && c == c_none) {
+ type = fe_ALTSRC;
+ eoi = p - 4;
+ } else if (l > 4 && strncmp(p-4, ".log", 4) == 0) {
+ type = fe_LOG;
+ eoi = p - 4;
+ } else if (l > 6 && strncmp(p-6, ".build", 6) == 0) {
+ type = fe_LOG;
+ eoi = p - 6;
+ } else if (l > 10 && strncmp(p-10, ".buildinfo", 10) == 0) {
+ type = fe_BUILDINFO;
+ eoi = p - 10;
+ }
+ if (type != fe_UNKNOWN) {
+ /* check for a proper version */
+ p = versionstart;
+ names_overversion(&p, true);
+ if (p >= eoi) {
+ /* all well */
+ } else if (type == fe_TAR) {
+ /* a tar might be a component with ugly
+ * data between .orig- and the .tar.c */
+ const char *o = strstr(versionstart, ".orig-");
+ if (o == NULL || o > eoi) {
+ fprintf(stderr,
+"Unexpected character '%c' in file name within '%s'!\n",
+ *p, fileline);
+ return RET_ERROR;
+ }
+ checkfilename = true;
+ } else if (type == fe_LOG || type == fe_BUILDINFO) {
+ if (*p == '_') {
+ archstart = p + 1;
+ archend = eoi;
+ checkfilename = true;
+ } else {
+ fprintf(stderr,
+"Unexpected character '%c' in file name within '%s'!\n",
+ *p, fileline);
+ }
+ } else {
+ fprintf(stderr,
+"Unexpected character '%c' in file name within '%s'!\n",
+ *p, fileline);
+ return RET_ERROR;
+
+ }
+ if (issignature)
+ type = fe_SIG;
+ } else {
+ /* everything else is assumed to be source */
+ checkfilename = true;
+ fprintf(stderr,
+"Unknown file type: '%s', assuming source format...\n", fileline);
+ }
+ }
+ section = strndup(sectionstart, sectionend - sectionstart);
+ priority = strndup(priostart, prioend - priostart);
+ basefilename = strndup(filestart, fileend - filestart);
+ // TODO: this does not make much sense for log files, as they might
+ // list multiple..
+ architecture = architecture_find_l(archstart, archend - archstart);
+ name = strndup(filestart, nameend - filestart);
+ if (FAILEDTOALLOC(section) || FAILEDTOALLOC(priority) ||
+ FAILEDTOALLOC(basefilename) || FAILEDTOALLOC(name)) {
+ free(section); free(priority);
+ free(basefilename); free(name);
+ return RET_ERROR_OOM;
+ }
+ if (checkfilename || !atom_defined(architecture)) {
+ retvalue r;
+
+ /* as we no longer run properversion over the whole
+ * rest of the string, at least make sure nothing evil
+ * is in this name */
+ r = properfilename(basefilename);
+ if (!RET_IS_OK(r)) {
+ assert (r != RET_NOTHING);
+ free(section); free(priority);
+ free(basefilename); free(name);
+ return r;
+ }
+ }
+ hash_p->start = md5start;
+ hash_p->len = md5end - md5start;
+ size_p->start = sizestart;
+ size_p->len = sizeend - sizestart;
+ *result_section = section;
+ *result_priority = priority;
+ *result_basename = basefilename;
+ *result_architecture = architecture;
+ *result_name = name;
+ *result_type = type;
+ return RET_OK;
+}
diff --git a/changes.h b/changes.h
new file mode 100644
index 0000000..d046e18
--- /dev/null
+++ b/changes.h
@@ -0,0 +1,24 @@
+#ifndef REPREPRO_CHANGES_H
+#define REPREPRO_CHANGES_H
+
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+
+typedef enum {
+ fe_UNKNOWN=0,
+ fe_DEB, fe_UDEB, fe_DDEB,
+ fe_DSC, fe_DIFF, fe_ORIG, fe_TAR,
+ fe_SIG,
+ fe_ALTSRC,
+ fe_BYHAND, fe_LOG, fe_CHANGES,
+ fe_BUILDINFO
+} filetype;
+
+#define FE_PACKAGE(ft) ((ft) == fe_DEB || (ft) == fe_UDEB || (ft) == fe_DSC || (ft) == fe_DDEB)
+#define FE_BINARY(ft) ((ft) == fe_DEB || (ft) == fe_DDEB || (ft) == fe_UDEB)
+#define FE_SOURCE(ft) ((ft) == fe_DIFF || (ft) == fe_ORIG || (ft) == fe_TAR || (ft) == fe_DSC || (ft) == fe_UNKNOWN || (ft) == fe_ALTSRC || (ft) == fe_SIG)
+
+struct hash_data;
+retvalue changes_parsefileline(const char * /*fileline*/, /*@out@*/filetype *, /*@out@*/char ** /*result_basename*/, /*@out@*/struct hash_data *, /*@out@*/struct hash_data *, /*@out@*/char ** /*result_section*/, /*@out@*/char ** /*result_priority*/, /*@out@*/architecture_t *, /*@out@*/char ** /*result_name*/);
+#endif
diff --git a/checkin.c b/checkin.c
new file mode 100644
index 0000000..b8321a9
--- /dev/null
+++ b/checkin.c
@@ -0,0 +1,1657 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2009,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <string.h>
+#include <ctype.h>
+#include "error.h"
+#include "ignore.h"
+#include "strlist.h"
+#include "mprintf.h"
+#include "atoms.h"
+#include "checksums.h"
+#include "names.h"
+#include "filecntl.h"
+#include "dirs.h"
+#include "chunks.h"
+#include "reference.h"
+#include "signature.h"
+#include "sources.h"
+#include "files.h"
+#include "tracking.h"
+#include "guesscomponent.h"
+#include "override.h"
+#include "checkindsc.h"
+#include "checkindeb.h"
+#include "checkin.h"
+#include "uploaderslist.h"
+#include "log.h"
+#include "dpkgversions.h"
+#include "changes.h"
+
+/* Things to do when including a .changes-file:
+ * - Read in the chunk of the possible signed file.
+ * (In later versions possibly checking the signature)
+ * - Parse it, extracting:
+ * + Distribution
+ * + Source
+ * + Architecture
+ * + Binary
+ * + Version
+ * + ...
+ * + Files
+ * - Calculate what files are expectable...
+ * - Compare supplied filed with files expected.
+ * - (perhaps: write what was done and changes to some logfile)
+ * - add supplied files to the pool and register them in files.db
+ * - add the .dsc-files via checkindsc.c
+ * - add the .deb-filed via checkindeb.c
+ *
+ */
+
+struct fileentry {
+ struct fileentry *next;
+ char *basename;
+ filetype type;
+ struct checksums *checksums;
+ char *section;
+ char *priority;
+ architecture_t architecture_into;
+ char *name;
+ /* this might be different for different files,
+ * (though this is only allowed in rare cases),
+ * will be set by _fixfields */
+ component_t component;
+ /* only set after changes_includefiles */
+ char *filekey;
+ /* was already found in the pool before */
+ bool wasalreadythere;
+ /* set between checkpkg and includepkg */
+ struct strlist needed_filekeys;
+ union { struct dsc_headers dsc;
+ struct debpackage *deb;} pkg;
+ /* only valid while parsing: */
+ struct hashes hashes;
+};
+
+struct changes {
+ /* Things read by changes_read: */
+ char *source, *sourceversion, *changesversion;
+ struct strlist distributions,
+ architectures,
+ binaries;
+ struct fileentry *files;
+ char *control;
+ struct signatures *signatures;
+ /* Things to be set by changes_fixfields: */
+ /* the component source files are put into */
+ component_t srccomponent;
+ /* != NULL if changesfile was put into pool/ */
+ /*@null@*/ char *changesfilekey;
+ /* the directory where source files are put into */
+ char *srcdirectory;
+ /* (only to warn if multiple are used) */
+ component_t firstcomponent;
+ /* the directory the .changes file resides in */
+ char *incomingdirectory;
+ /* the Version: and the version in Source: differ */
+ bool isbinnmu;
+};
+
+static void freeentries(/*@only@*/struct fileentry *entry) {
+ struct fileentry *h;
+
+ while (entry != NULL) {
+ h = entry->next;
+ free(entry->filekey);
+ free(entry->basename);
+ checksums_free(entry->checksums);
+ free(entry->section);
+ free(entry->priority);
+ free(entry->name);
+ if (FE_BINARY(entry->type))
+ deb_free(entry->pkg.deb);
+ else if (entry->type == fe_DSC) {
+ strlist_done(&entry->needed_filekeys);
+ sources_done(&entry->pkg.dsc);
+ }
+ free(entry);
+ entry = h;
+ }
+}
+
+static void changes_free(/*@only@*/struct changes *changes) {
+ if (changes != NULL) {
+ free(changes->source);
+ free(changes->sourceversion);
+ free(changes->changesversion);
+ strlist_done(&changes->architectures);
+ strlist_done(&changes->binaries);
+ freeentries(changes->files);
+ strlist_done(&changes->distributions);
+ free(changes->control);
+ free(changes->srcdirectory);
+ free(changes->changesfilekey);
+// trackedpackage_free(changes->trackedpkg);
+ free(changes->incomingdirectory);
+ signatures_free(changes->signatures);
+ }
+ free(changes);
+}
+
+
+static retvalue newentry(struct fileentry **entry, const char *fileline, const struct atomlist *packagetypes, const struct atomlist *forcearchitectures, const char *sourcename, bool includebyhand, bool includelogs, bool includebuildinfos, bool *ignoredlines_p, bool skip_binaries) {
+ struct fileentry *e;
+ retvalue r;
+
+ e = zNEW(struct fileentry);
+ if (FAILEDTOALLOC(e))
+ return RET_ERROR_OOM;
+
+ r = changes_parsefileline(fileline, &e->type, &e->basename,
+ &e->hashes.hashes[cs_md5sum],
+ &e->hashes.hashes[cs_length],
+ &e->section, &e->priority, &e->architecture_into,
+ &e->name);
+ if (RET_WAS_ERROR(r)) {
+ free(e);
+ return r;
+ }
+ assert (RET_IS_OK(r));
+ if (e->type == fe_BYHAND) {
+ if (!includebyhand) {
+ // TODO: at least check them and fail if wrong?
+ fprintf(stderr, "Ignoring byhand file: '%s'!\n",
+ e->basename);
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ e->next = *entry;
+ *entry = e;
+ return RET_OK;
+ }
+ if (FE_SOURCE(e->type) && limitations_missed(packagetypes, pt_dsc)) {
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ if (e->type == fe_DEB && limitations_missed(packagetypes, pt_deb)) {
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ if (e->type == fe_UDEB && limitations_missed(packagetypes, pt_udeb)) {
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ if (e->type == fe_DDEB && limitations_missed(packagetypes, pt_ddeb)) {
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ if (e->type != fe_LOG && e->type != fe_BUILDINFO &&
+ e->architecture_into == architecture_source &&
+ strcmp(e->name, sourcename) != 0) {
+ fprintf(stderr,
+"Warning: File '%s' looks like source but does not start with '%s_'!\n",
+ e->basename, sourcename);
+ } else if (e->type == fe_BUILDINFO) {
+ if (strcmp(e->name, sourcename) != 0) {
+ fprintf(stderr,
+"Warning: File '%s' looks like buildinfo but does not start with '%s_'!\n",
+ e->basename, sourcename);
+ }
+ if (!includebuildinfos) {
+ if (verbose > 2)
+ fprintf(stderr,
+"Ignoring buildinfo file: '%s'!\n", e->basename);
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ /* otherwise the normal rules like for .deb, see below */
+ }
+ if (e->type == fe_LOG) {
+ if (strcmp(e->name, sourcename) != 0) {
+ fprintf(stderr,
+"Warning: File '%s' looks like log but does not start with '%s_'!\n",
+ e->basename, sourcename);
+ }
+ if (!includelogs) {
+ // TODO: at least check them and fail if wrong?
+ fprintf(stderr, "Ignoring log file: '%s'!\n",
+ e->basename);
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ /* a log file without parseable architecture (atom undefined)
+ * might still belong to an forced architecture (as it might
+ * list multiples), so cannot be excluded here: */
+ if (atom_defined(e->architecture_into) &&
+ limitations_missed(forcearchitectures,
+ e->architecture_into)) {
+ if (verbose > 1)
+ fprintf(stderr,
+"Skipping '%s' as not for architecture ",
+ e->basename);
+ atomlist_fprint(stderr, at_architecture,
+ forcearchitectures);
+ fputs(".\n", stderr);
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ } else if (forcearchitectures != NULL) {
+ if (e->architecture_into == architecture_all &&
+ !skip_binaries) {
+ if (verbose > 2) {
+ fprintf(stderr,
+"Limiting '%s' to architectures ",
+ e->basename);
+ atomlist_fprint(stderr, at_architecture,
+ forcearchitectures);
+ fputs(" as requested.\n", stderr);
+ }
+ /* keep e->architecture_into to all, this wil
+ * be restricted to forcearchitectures when added */
+ } else if (!atomlist_in(forcearchitectures,
+ e->architecture_into)) {
+ if (verbose > 1) {
+ if (atom_defined(e->architecture_into))
+ fprintf(stderr,
+"Skipping '%s' as architecture '%s' is not in the requested set.\n",
+ e->basename,
+ atoms_architectures[
+ e->architecture_into]);
+ else
+ fprintf(stderr,
+"Skipping '%s' as architecture is not in the requested set.\n",
+ e->basename);
+ }
+ freeentries(e);
+ *ignoredlines_p = true;
+ return RET_NOTHING;
+ }
+ }
+
+ e->next = *entry;
+ *entry = e;
+ return RET_OK;
+}
+
+/* Parse the Files-header to see what kind of files we carry around */
+static retvalue changes_parsefilelines(const char *filename, struct changes *changes, const struct strlist *filelines, const struct atomlist *packagetypes, const struct atomlist *forcearchitectures, bool includebyhand, bool includelogs, bool includebuildinfos, bool *ignoredlines_p, bool skip_binaries) {
+ retvalue result, r;
+ int i;
+
+ assert (changes->files == NULL);
+ result = RET_NOTHING;
+
+ for (i = 0 ; i < filelines->count ; i++) {
+ const char *fileline = filelines->values[i];
+
+ r = newentry(&changes->files, fileline,
+ packagetypes, forcearchitectures,
+ changes->source,
+ includebyhand, includelogs, includebuildinfos,
+ ignoredlines_p, skip_binaries);
+ RET_UPDATE(result, r);
+ if (r == RET_ERROR)
+ return r;
+ }
+ if (result == RET_NOTHING) {
+ fprintf(stderr,
+"%s: Not enough files in .changes!\n", filename);
+ return RET_ERROR;
+ }
+ return result;
+}
+
+static retvalue changes_addhashes(const char *filename, struct changes *changes, enum checksumtype cs, struct strlist *filelines, bool ignoresomefiles) {
+ int i;
+ retvalue r;
+
+ for (i = 0 ; i < filelines->count ; i++) {
+ struct hash_data data, size;
+ const char *fileline = filelines->values[i];
+ struct fileentry *e;
+ const char *basefilename;
+
+ r = hashline_parse(filename, fileline, cs, &basefilename,
+ &data, &size);
+ if (r == RET_NOTHING)
+ continue;
+ if (RET_WAS_ERROR(r))
+ return r;
+ e = changes->files;
+ while (e != NULL && strcmp(e->basename, basefilename) != 0)
+ e = e->next;
+ if (e == NULL) {
+ if (ignoresomefiles)
+ /* we might already have ignored files when
+ * creating changes->files, so we cannot say
+ * if this is an error. */
+ continue;
+ fprintf(stderr,
+"In '%s': file '%s' listed in '%s' but not in 'Files'\n",
+ filename, basefilename,
+ changes_checksum_names[cs]);
+ return RET_ERROR;
+ }
+ if (e->hashes.hashes[cs_length].len != size.len ||
+ memcmp(e->hashes.hashes[cs_length].start,
+ size.start, size.len) != 0) {
+ fprintf(stderr,
+"In '%s': file '%s' listed in '%s' with different size than in 'Files'\n",
+ filename, basefilename,
+ changes_checksum_names[cs]);
+ return RET_ERROR;
+ }
+ e->hashes.hashes[cs] = data;
+ }
+ return RET_OK;
+}
+
+static retvalue changes_finishhashes(struct changes *changes) {
+ struct fileentry *e;
+ retvalue r;
+
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ r = checksums_initialize(&e->checksums, e->hashes.hashes);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+
+static retvalue check(const char *filename, struct changes *changes, const char *field) {
+ retvalue r;
+
+ r = chunk_checkfield(changes->control, field);
+ if (r == RET_NOTHING) {
+ if (IGNORING(missingfield,
+"In '%s': Missing '%s' field!\n", filename, field)) {
+ return RET_OK;
+ } else {
+ return RET_ERROR;
+ }
+ }
+ return r;
+}
+
+static retvalue changes_read(const char *filename, /*@out@*/struct changes **changes, const struct atomlist *packagetypes, const struct atomlist *forcearchitectures, bool includebyhand, bool includelogs, bool includebuildinfos) {
+ retvalue r;
+ struct changes *c;
+ struct strlist filelines[cs_hashCOUNT];
+ enum checksumtype cs;
+ bool broken, ignoredlines;
+ int versioncmp;
+ bool skip_binaries;
+
+#define E(err) { \
+ if (r == RET_NOTHING) { \
+ fprintf(stderr, "In '%s': " err "\n", filename); \
+ r = RET_ERROR; \
+ } \
+ if (RET_WAS_ERROR(r)) { \
+ changes_free(c); \
+ return r; \
+ } \
+ }
+#define R { \
+ if (RET_WAS_ERROR(r)) { \
+ changes_free(c); \
+ return r; \
+ } \
+ }
+
+
+ c = zNEW(struct changes);
+ if (FAILEDTOALLOC(c))
+ return RET_ERROR_OOM;
+ r = signature_readsignedchunk(filename, filename,
+ &c->control, &c->signatures, &broken);
+ R;
+ if (broken && !IGNORING(brokensignatures,
+"'%s' contains only broken signatures.\n"
+"This most likely means the file was damaged or edited improperly.\n",
+ filename)) {
+ r = RET_ERROR;
+ R;
+ }
+ r = check(filename, c, "Format");
+ R;
+ r = check(filename, c, "Date");
+ R;
+ r = chunk_getnameandversion(c->control, "Source",
+ &c->source, &c->sourceversion);
+ E("Missing 'Source' field");
+ r = propersourcename(c->source);
+ R;
+ if (c->sourceversion != NULL) {
+ r = properversion(c->sourceversion);
+ R;
+ }
+ r = chunk_getwordlist(c->control, "Architecture", &c->architectures);
+ E("Missing 'Architecture' field");
+ r = chunk_getwordlist(c->control, "Binary", &c->binaries);
+ if (r == RET_NOTHING) {
+ /* this could print a waring if architectures
+ * contains anything but 'source', but the .deb
+ * files are checked anyway... */
+ strlist_init(&c->binaries);
+ }
+ R;
+ r = chunk_getvalue(c->control, "Version", &c->changesversion);
+ E("Missing 'Version' field");
+ r = properversion(c->changesversion);
+ E("Malforce Version number");
+ if (c->sourceversion == NULL) {
+ c->sourceversion = strdup(c->changesversion);
+ if (FAILEDTOALLOC(c->sourceversion)) {
+ changes_free(c);
+ return RET_ERROR_OOM;
+ }
+ c->isbinnmu = false;
+ } else {
+ r = dpkgversions_cmp(c->sourceversion, c->changesversion,
+ &versioncmp);
+ E("Error comparing versions. (That should have been caught earlier, why now?)");
+ c->isbinnmu = versioncmp != 0;
+ }
+ r = chunk_getwordlist(c->control, "Distribution", &c->distributions);
+ E("Missing 'Distribution' field");
+ r = check(filename, c, "Maintainer");
+ R;
+ r = chunk_getextralinelist(c->control,
+ changes_checksum_names[cs_md5sum],
+ &filelines[cs_md5sum]);
+ E("Missing 'Files' field!");
+ ignoredlines = false;
+ /* check if forcearchitectures allows non-source binaries,
+ * (used to check if Architecture all are skipped) */
+ if (forcearchitectures == NULL) {
+ skip_binaries = false;
+ } else {
+ skip_binaries = !atomlist_hasexcept(forcearchitectures,
+ architecture_source);
+ }
+ r = changes_parsefilelines(filename, c, &filelines[cs_md5sum],
+ packagetypes, forcearchitectures,
+ includebyhand, includelogs, includebuildinfos,
+ &ignoredlines, skip_binaries);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&filelines[cs_md5sum]);
+ changes_free(c);
+ return r;
+ }
+ for (cs = cs_firstEXTENDED ; cs < cs_hashCOUNT ; cs++) {
+ r = chunk_getextralinelist(c->control,
+ changes_checksum_names[cs], &filelines[cs]);
+ if (RET_IS_OK(r))
+ r = changes_addhashes(filename, c, cs, &filelines[cs],
+ ignoredlines);
+ else
+ strlist_init(&filelines[cs]);
+ if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum)
+ strlist_done(&filelines[cs]);
+ changes_free(c);
+ return r;
+ }
+ }
+ r = changes_finishhashes(c);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++)
+ strlist_done(&filelines[cs]);
+ R;
+ r = dirs_getdirectory(filename, &c->incomingdirectory);
+ R;
+
+ *changes = c;
+ return RET_OK;
+#undef E
+#undef R
+}
+
+static retvalue changes_fixfields(const struct distribution *distribution, const char *filename, struct changes *changes, component_t forcecomponent, /*@null@*/const char *forcesection, /*@null@*/const char *forcepriority) {
+ struct fileentry *e;
+ retvalue r;
+ bool needsourcedir = false;
+ struct fileentry *needs_source_package = NULL;
+ bool has_source_package = false;
+
+ r = propersourcename(changes->source);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ e = changes->files;
+ if (e == NULL) {
+ fprintf(stderr, "No files given in '%s'!\n", filename);
+ return RET_ERROR;
+ }
+
+ for (; e != NULL ; e = e->next) {
+ const struct overridedata *oinfo = NULL;
+ const char *force = NULL;
+
+ if (e->type == fe_BYHAND ||
+ e->type == fe_BUILDINFO ||
+ e->type == fe_LOG) {
+ needsourcedir = true;
+ continue;
+ }
+
+ /* section and priority are only needed for the dsc,
+ * not for the other source files */
+ if (FE_SOURCE(e->type) && !FE_PACKAGE(e->type)) {
+ needs_source_package = e;
+ continue;
+ }
+
+ if (forcesection == NULL || forcepriority == NULL) {
+ oinfo = override_search(
+ FE_BINARY(e->type)?(e->type==fe_UDEB?
+ distribution->overrides.udeb
+ :distribution->overrides.deb)
+ :distribution->overrides.dsc,
+ e->name);
+ }
+
+ if (forcesection != NULL)
+ force = forcesection;
+ else
+ force = override_get(oinfo, SECTION_FIELDNAME);
+ if (force != NULL) {
+ free(e->section);
+ e->section = strdup(force);
+ if (FAILEDTOALLOC(e->section))
+ return RET_ERROR_OOM;
+ }
+ if (strcmp(e->section, "unknown") == 0 && verbose >= 0) {
+ fprintf(stderr, "Warning: '%s' contains strange section '%s'!\n",
+ filename, e->section);
+ }
+ if (strcmp(e->section, "-") == 0) {
+ fprintf(stderr,
+"No section specified for '%s' in '%s'!\n", e->basename, filename);
+ return RET_ERROR;
+ }
+ if (forcepriority != NULL)
+ force = forcepriority;
+ else
+ force = override_get(oinfo, PRIORITY_FIELDNAME);
+ if (force != NULL) {
+ free(e->priority);
+ e->priority = strdup(force);
+ if (FAILEDTOALLOC(e->priority))
+ return RET_ERROR_OOM;
+ }
+ if (strcmp(e->priority, "-") == 0) {
+ fprintf(stderr,
+"No priority specified for '%s'!\n", filename);
+ return RET_ERROR;
+ }
+ if (!atom_defined(forcecomponent)) {
+ const char *fc;
+
+ fc = override_get(oinfo, "$Component");
+ if (fc != NULL) {
+ forcecomponent = component_find(fc);
+ if (!atom_defined(forcecomponent)) {
+ fprintf(stderr,
+"Unparseable component '%s' in $Component override of '%s'\n",
+ fc, e->name);
+ return RET_ERROR;
+ }
+ }
+ }
+
+ // I'm undecided here. If this is a udeb, one could also use
+ // distribution->udebcomponents. Though this might result
+ // in not really predictable guesses for the section.
+ r = guess_component(distribution->codename,
+ &distribution->components, changes->source,
+ e->section, forcecomponent,
+ &e->component);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert(atom_defined(e->component));
+
+ if (!atom_defined(changes->firstcomponent)) {
+ changes->firstcomponent = e->component;
+ } else if (changes->firstcomponent != e->component) {
+ fprintf(stderr,
+"Warning: %s contains files guessed to be in different components ('%s' vs '%s)!\n",
+ filename,
+ atoms_components[e->component],
+ atoms_components[changes->firstcomponent]);
+ }
+
+ if (FE_SOURCE(e->type)) {
+ assert (FE_PACKAGE(e->type));
+ has_source_package = true;
+ if (strcmp(changes->source, e->name) != 0) {
+ r = propersourcename(e->name);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (!atom_defined(changes->srccomponent)) {
+ changes->srccomponent = e->component;
+ } else if (changes->srccomponent != e->component) {
+ fprintf(stderr,
+"%s contains source files guessed to be in different components ('%s' vs '%s)!\n",
+ filename,
+ atoms_components[e->component],
+ atoms_components[changes->srccomponent]);
+ return RET_ERROR;
+ }
+ } else if (FE_BINARY(e->type)) {
+ r = properpackagename(e->name);
+ if (RET_WAS_ERROR(r))
+ return r;
+ // Let's just check here, perhaps
+ if (e->type == fe_UDEB &&
+ !atomlist_in(
+ &distribution->udebcomponents,
+ e->component)) {
+ fprintf(stderr,
+"Cannot put file '%s' into component '%s', as it is not listed in UDebComponents!\n",
+ e->basename,
+ atoms_components[e->component]);
+ return RET_ERROR;
+ }
+ } else {
+ assert (FE_SOURCE(e->type) || FE_BINARY(e->type));
+ fprintf(stderr, "Internal Error!\n");
+ return RET_ERROR;
+ }
+ }
+
+ if (needs_source_package != NULL && !has_source_package) {
+ fprintf(stderr,
+"'%s' looks like part of an source package, but no dsc file listed in the .changes file!\n",
+ needs_source_package->basename);
+ return RET_ERROR;
+ }
+ if (atom_defined(changes->srccomponent)) {
+ changes->srcdirectory = calc_sourcedir(changes->srccomponent,
+ changes->source);
+ if (FAILEDTOALLOC(changes->srcdirectory))
+ return RET_ERROR_OOM;
+ } else if (distribution->trackingoptions.includechanges ||
+ needsourcedir) {
+ component_t component = forcecomponent;
+ if (!atom_defined(forcecomponent)) {
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ if (FE_PACKAGE(e->type)){
+ component = e->component;
+ break;
+ }
+ }
+ }
+ if (!atom_defined(component)) {
+ fprintf(stderr,
+"No component found to place .changes or byhand files in. Aborting.\n");
+ return RET_ERROR;
+ }
+ changes->srcdirectory = calc_sourcedir(component,
+ changes->source);
+ if (FAILEDTOALLOC(changes->srcdirectory))
+ return RET_ERROR_OOM;
+ }
+
+ return RET_OK;
+}
+
+static inline retvalue checkforarchitecture(const struct fileentry *e, architecture_t architecture) {
+ if (!atom_defined(architecture))
+ return RET_NOTHING;
+ while (e != NULL && e->architecture_into != architecture)
+ e = e->next;
+ if (e == NULL) {
+ if (!IGNORING(unusedarch,
+"Architecture header lists architecture '%s', but no files for it!\n",
+ atoms_architectures[architecture]))
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static bool can_add_all(const struct atomlist *forcearchitectures, const struct distribution *d) {
+ const struct atomlist *da = &d->architectures;
+ int i;
+
+ if (forcearchitectures == NULL) {
+ return atomlist_hasexcept(da, architecture_source);
+ }
+ for (i = 0 ; i < forcearchitectures->count ; i++) {
+ architecture_t a = forcearchitectures->atoms[i];
+
+ if (a == architecture_source)
+ continue;
+ if (a == architecture_all)
+ return atomlist_hasexcept(da, architecture_source);
+ if (atomlist_in(da, a))
+ return true;
+ }
+ return false;
+}
+
+static retvalue changes_check(const struct distribution *distribution, const char *filename, struct changes *changes, const struct atomlist *forcearchitectures, const struct atomlist *packagetypes) {
+ int i;
+ struct fileentry *e;
+ retvalue r = RET_OK;
+ bool havedsc = false,
+ haveorig = false,
+ havetar = false,
+ havediff = false,
+ havealtsrc = false;
+
+ /* First check for each given architecture, if it has files: */
+ if (forcearchitectures != NULL) {
+ for (i = 0 ; i < forcearchitectures->count ; i++) {
+ architecture_t a = forcearchitectures->atoms[i];
+
+ if (!strlist_in(&changes->architectures,
+ atoms_architectures[a])) {
+ // TODO: check if this is sensible
+ if (!IGNORING(surprisingarch,
+"Architecture header does not list the"
+" architecture '%s' to be forced in!\n",
+ atoms_architectures[a]))
+ return RET_ERROR_MISSING;
+ }
+ r = checkforarchitecture(changes->files, a);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ } else {
+ bool limitedtosource = false;
+ bool limitedtononsource = false;
+
+ if (packagetypes != NULL) {
+ limitedtosource = true;
+ limitedtononsource = true;
+ for (i = 0 ; i < packagetypes->count ; i++) {
+ if (packagetypes->atoms[i] == pt_dsc)
+ limitedtononsource = false;
+ else
+ limitedtosource = false;
+ }
+ }
+
+ for (i = 0 ; i < changes->architectures.count ; i++) {
+ const char *architecture = changes->architectures.values[i];
+ if (strcmp(architecture, "source") == 0) {
+ if (limitedtononsource)
+ continue;
+ } else {
+ if (limitedtosource)
+ continue;
+ }
+ r = checkforarchitecture(changes->files,
+ architecture_find(architecture));
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ /* Then check for each file, if its architecture is sensible
+ * and listed. */
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ if (e->type == fe_BYHAND || e->type == fe_LOG) {
+ /* don't insist on a single architecture for those */
+ continue;
+ }
+ if (atom_defined(e->architecture_into)) {
+ if (e->architecture_into == architecture_all) {
+ /* "all" can be added if at least one binary
+ * architecture */
+ if (!can_add_all(forcearchitectures,
+ distribution))
+ e->architecture_into = atom_unknown;
+ } else if (!atomlist_in(&distribution->architectures,
+ e->architecture_into))
+ e->architecture_into = atom_unknown;
+ }
+ if (!atom_defined(e->architecture_into)) {
+ fprintf(stderr,
+"Error: '%s' has the wrong architecture to add it to %s!\n",
+ e->basename, distribution->codename);
+ return RET_ERROR;
+
+ }
+ if (!strlist_in(&changes->architectures,
+ atoms_architectures[e->architecture_into])) {
+ if (!IGNORING(surprisingarch,
+"'%s' looks like architecture '%s', but this is not listed in the Architecture-Header!\n",
+ e->basename,
+ atoms_architectures[e->architecture_into]))
+ return RET_ERROR;
+ }
+
+ if (e->type == fe_DSC) {
+ char *calculatedname;
+ if (havedsc) {
+ fprintf(stderr,
+"I don't know what to do with multiple .dsc files in '%s'!\n", filename);
+ return RET_ERROR;
+ }
+ havedsc = true;
+ calculatedname = calc_source_basename(changes->source,
+ changes->sourceversion);
+ if (FAILEDTOALLOC(calculatedname))
+ return RET_ERROR_OOM;
+ if (strcmp(calculatedname, e->basename) != 0) {
+ fprintf(stderr,
+"dsc file name is '%s' instead of the expected '%s'!\n",
+ e->basename, calculatedname);
+ free(calculatedname);
+ return RET_ERROR;
+ }
+ free(calculatedname);
+ } else if (e->type == fe_DIFF) {
+ if (havediff) {
+ fprintf(stderr,
+"I don't know what to do with multiple .diff files in '%s'!\n", filename);
+ return RET_ERROR;
+ }
+ havediff = true;
+ } else if (e->type == fe_ORIG) {
+ if (haveorig) {
+ fprintf(stderr,
+"I don't know what to do with multiple .orig.tar.gz files in '%s'!\n", filename);
+ return RET_ERROR;
+ }
+ haveorig = true;
+ } else if (e->type == fe_TAR) {
+ havetar = true;
+ } else if (e->type == fe_ALTSRC) {
+ havealtsrc = true;
+ }
+ }
+ if (havetar && !haveorig && havediff) {
+ fprintf(stderr,
+"I don't know what to do having a .tar.gz not being a .orig.tar.gz and a .diff.gz in '%s'!\n",
+ filename);
+ return RET_ERROR;
+ }
+ if (strlist_in(&changes->architectures, "source") && !havedsc &&
+ !limitations_missed(forcearchitectures, architecture_source) &&
+ !limitations_missed(packagetypes, pt_dsc)) {
+ fprintf(stderr,
+"I don't know what to do with a source-upload not containing a .dsc in '%s'!\n",
+ filename);
+ return RET_ERROR;
+ }
+ if (havedsc && !havediff && !haveorig && !havetar && !havealtsrc) {
+ fprintf(stderr,
+"I don't know what to do having a .dsc without a .diff.gz or .tar.gz in '%s'!\n",
+ filename);
+ return RET_ERROR;
+ }
+
+ /* check if signatures match files signed: */
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ size_t el;
+ struct fileentry *f;
+
+ if (e->type != fe_SIG)
+ continue;
+
+ el = strlen(e->basename);
+
+ if (el <= 4 || memcmp(e->basename + el - 4, ".asc", 4) != 0)
+ continue;
+
+ for (f = changes->files ; f != NULL ; f = f->next) {
+ size_t fl = strlen(f->basename);
+
+ if (el != fl + 4)
+ continue;
+ if (memcmp(e->basename, f->basename, fl) != 0)
+ continue;
+ break;
+ }
+ if (f == NULL) {
+ fprintf(stderr,
+"Signature file without file to be signed: '%s'!\n",
+ e->basename);
+ return RET_ERROR;
+ }
+ }
+
+ return r;
+}
+
+static retvalue changes_checkfiles(const char *filename, struct changes *changes) {
+ struct fileentry *e, **pe;
+ retvalue r;
+
+ r = RET_NOTHING;
+
+ pe = &changes->files;
+ while ((e = *pe) != NULL) {
+ //TODO: decide earlier which files to include
+ if (e->type == fe_BYHAND) {
+ /* byhand files might have the same name and not
+ * contain the version, so store separately */
+ assert(changes->srcdirectory!=NULL);
+ e->filekey = mprintf("%s/%s_%s_byhand/%s",
+ changes->srcdirectory,
+ changes->source,
+ changes->changesversion,
+ e->basename);
+ } else if (FE_SOURCE(e->type) || e->type == fe_LOG
+ || e->type == fe_BUILDINFO) {
+ assert(changes->srcdirectory!=NULL);
+ e->filekey = calc_dirconcat(changes->srcdirectory,
+ e->basename);
+ } else {
+ char *directory;
+
+ // TODO: make this in-situ?
+ /* as the directory depends on the sourcename, it can be
+ * different for every file... */
+ directory = calc_sourcedir(e->component,
+ changes->source);
+ if (FAILEDTOALLOC(directory))
+ return RET_ERROR_OOM;
+ e->filekey = calc_dirconcat(directory, e->basename);
+ free(directory);
+ }
+
+ if (FAILEDTOALLOC(e->filekey))
+ return RET_ERROR_OOM;
+ /* do not copy yet, but only check if it could be included */
+ r = files_canadd(e->filekey, e->checksums);
+ if (r == RET_ERROR_WRONG_MD5 &&
+ e->architecture_into == architecture_all &&
+ IGNORABLE(conflictingarchall)) {
+ struct fileentry *removedentry;
+
+ fprintf(stderr,
+"Ignoring '%s' as --ignore=conflictingarchall given and there is already a file with different contents of that name.\n",
+ e->name);
+
+ removedentry = e;
+ *pe = removedentry->next;
+ removedentry->next = NULL;
+ freeentries(removedentry);
+ continue;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ /* If is was already there, remember that */
+ if (r == RET_NOTHING) {
+ e->wasalreadythere = true;
+ } else {
+ /* and if it needs inclusion check if there is a file */
+ char *fullfilename;
+
+ assert(RET_IS_OK(r));
+ // TODO: add a --paranoid to also check md5sums before copying?
+
+ fullfilename = calc_dirconcat(
+ changes->incomingdirectory,
+ e->basename);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+ if (!isregularfile(fullfilename)) {
+ fprintf(stderr,
+"Cannot find file '%s' needed by '%s'!\n", fullfilename, filename);
+ free(fullfilename);
+ return RET_ERROR_MISSING;
+ }
+ free(fullfilename);
+ }
+ pe = &e->next;
+ }
+
+ return RET_OK;
+}
+
+static retvalue changes_includefiles(struct changes *changes) {
+ struct fileentry *e;
+ retvalue r;
+
+ r = RET_NOTHING;
+
+ for (e = changes->files; e != NULL ; e = e->next) {
+ assert (e->filekey != NULL);
+
+ if (e->wasalreadythere && checksums_iscomplete(e->checksums))
+ continue;
+
+ r = files_checkincludefile(changes->incomingdirectory,
+ e->basename, e->filekey, &e->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ return r;
+}
+
+/* delete the files included */
+static retvalue changes_deleteleftoverfiles(struct changes *changes, int delete) {
+ struct fileentry *e;
+ retvalue result, r;
+
+ if (delete < D_MOVE)
+ return RET_OK;
+
+ result = RET_OK;
+ // TODO: we currently only see files included here, so D_DELETE
+ // only affacts the .changes file.
+
+ for (e = changes->files; e != NULL ; e = e->next) {
+ char *fullorigfilename;
+
+ if (delete < D_DELETE && e->filekey == NULL)
+ continue;
+
+ fullorigfilename = calc_dirconcat(changes->incomingdirectory,
+ e->basename);
+
+ if (unlink(fullorigfilename) != 0) {
+ int err = errno;
+ fprintf(stderr, "Error deleting '%s': %d=%s\n",
+ fullorigfilename, err, strerror(err));
+ r = RET_ERRNO(err);
+ RET_UPDATE(result, r);
+ }
+ free(fullorigfilename);
+ }
+
+ return result;
+}
+
+static retvalue changes_check_sourcefile(struct changes *changes, struct fileentry *dsc, const char *basefilename, const char *filekey, struct checksums **checksums_p) {
+ retvalue r;
+
+ r = files_expect(filekey, *checksums_p, false);
+ if (RET_WAS_ERROR(r))
+ return r;
+ // TODO: get additionals checksum out of database, as future
+ // source file completion code might need them...
+ if (RET_IS_OK(r))
+ return RET_OK;
+ if (!IGNORABLE(missingfile)) {
+ fprintf(stderr,
+"Unable to find %s needed by %s!\n"
+"Perhaps you forgot to give dpkg-buildpackage the -sa option,\n"
+" or you could try --ignore=missingfile to guess possible files to use.\n",
+ filekey, dsc->basename);
+ return RET_ERROR_MISSING;
+ }
+ fprintf(stderr,
+"Unable to find %s!\n"
+"Perhaps you forgot to give dpkg-buildpackage the -sa option.\n"
+"--ignore=missingfile was given, searching for file...\n", filekey);
+
+ return files_checkincludefile(changes->incomingdirectory,
+ basefilename, filekey, checksums_p);
+}
+
+static retvalue dsc_prepare(struct changes *changes, struct fileentry *dsc, struct distribution *distribution, const char *dscfilename){
+ retvalue r;
+ const struct overridedata *oinfo;
+ char *dscbasename;
+ char *control;
+ int i;
+ bool broken;
+
+ assert (dsc->section != NULL);
+ assert (dsc->priority != NULL);
+ assert (atom_defined(changes->srccomponent));
+ assert (dsc->basename != NULL);
+ assert (dsc->checksums != NULL);
+ assert (changes->source != NULL);
+ assert (changes->sourceversion != NULL);
+
+ /* First make sure this distribution has a source section at all,
+ * for which it has to be listed in the "Architectures:"-field ;-) */
+ if (!atomlist_in(&distribution->architectures, architecture_source)) {
+ fprintf(stderr,
+"Cannot put a source package into Distribution '%s' not having 'source' in its 'Architectures:'-field!\n",
+ distribution->codename);
+ /* nota bene: this cannot be forced or ignored, as no target has
+ been created for this. */
+ return RET_ERROR;
+ }
+
+ /* Then take a closer look in the file: */
+ r = sources_readdsc(&dsc->pkg.dsc, dscfilename, dscfilename, &broken);
+ if (RET_IS_OK(r) && broken && !IGNORING(brokensignatures,
+"'%s' contains only broken signatures.\n"
+"This most likely means the file was damaged or edited improperly\n",
+ dscfilename))
+ r = RET_ERROR;
+ if (RET_IS_OK(r))
+ r = propersourcename(dsc->pkg.dsc.name);
+ if (RET_IS_OK(r))
+ r = properversion(dsc->pkg.dsc.version);
+ if (RET_IS_OK(r))
+ r = properfilenames(&dsc->pkg.dsc.files.names);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (strcmp(changes->source, dsc->pkg.dsc.name) != 0) {
+ /* This cannot be ignored, as too much depends on it yet */
+ fprintf(stderr,
+"'%s' says it is '%s', while .changes file said it is '%s'\n",
+ dsc->basename, dsc->pkg.dsc.name,
+ changes->source);
+ return RET_ERROR;
+ }
+ if (strcmp(changes->sourceversion, dsc->pkg.dsc.version) != 0 &&
+ !IGNORING(wrongversion,
+"'%s' says it is version '%s', while .changes file said it is '%s'\n",
+ dsc->basename, dsc->pkg.dsc.version,
+ changes->sourceversion)) {
+ return RET_ERROR;
+ }
+
+ oinfo = override_search(distribution->overrides.dsc, dsc->pkg.dsc.name);
+
+ free(dsc->pkg.dsc.section);
+ dsc->pkg.dsc.section = strdup(dsc->section);
+ if (FAILEDTOALLOC(dsc->pkg.dsc.section))
+ return RET_ERROR_OOM;
+ free(dsc->pkg.dsc.priority);
+ dsc->pkg.dsc.priority = strdup(dsc->priority);
+ if (FAILEDTOALLOC(dsc->pkg.dsc.priority))
+ return RET_ERROR_OOM;
+
+ assert (dsc->pkg.dsc.name != NULL && dsc->pkg.dsc.version != NULL);
+
+ /* Add the dsc file to the list of files in this source package: */
+ dscbasename = strdup(dsc->basename);
+ if (FAILEDTOALLOC(dscbasename))
+ r = RET_ERROR_OOM;
+ else
+ r = checksumsarray_include(&dsc->pkg.dsc.files,
+ dscbasename, dsc->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* Calculate the filekeys: */
+ r = calc_dirconcats(changes->srcdirectory,
+ &dsc->pkg.dsc.files.names, &dsc->needed_filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* no one else might have looked yet, if we have them: */
+
+ assert (dsc->pkg.dsc.files.names.count == dsc->needed_filekeys.count);
+ for (i = 1 ; i < dsc->pkg.dsc.files.names.count ; i ++) {
+ if (!RET_WAS_ERROR(r)) {
+ r = changes_check_sourcefile(
+ changes, dsc,
+ dsc->pkg.dsc.files.names.values[i],
+ dsc->needed_filekeys.values[i],
+ &dsc->pkg.dsc.files.checksums[i]);
+ }
+ }
+
+ if (!RET_WAS_ERROR(r))
+ r = sources_complete(&dsc->pkg.dsc, changes->srcdirectory,
+ oinfo,
+ dsc->pkg.dsc.section, dsc->pkg.dsc.priority,
+ &control);
+ if (RET_IS_OK(r)) {
+ free(dsc->pkg.dsc.control);
+ dsc->pkg.dsc.control = control;
+ }
+ return r;
+}
+
+
+static retvalue changes_checkpkgs(struct distribution *distribution, struct changes *changes) {
+ struct fileentry *e;
+ retvalue r;
+
+ r = RET_NOTHING;
+
+ e = changes->files;
+ while (e != NULL) {
+ char *fullfilename;
+ if (!FE_PACKAGE(e->type)) {
+ e = e->next;
+ continue;
+ }
+ fullfilename = files_calcfullfilename(e->filekey);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+ if (e->type == fe_DEB) {
+ r = deb_prepare(&e->pkg.deb,
+ e->component, e->architecture_into,
+ e->section, e->priority,
+ pt_deb,
+ distribution, fullfilename,
+ e->filekey, e->checksums,
+ &changes->binaries,
+ changes->source, changes->sourceversion);
+ } else if (e->type == fe_UDEB) {
+ r = deb_prepare(&e->pkg.deb,
+ e->component, e->architecture_into,
+ e->section, e->priority,
+ pt_udeb,
+ distribution, fullfilename,
+ e->filekey, e->checksums,
+ &changes->binaries,
+ changes->source, changes->sourceversion);
+ } else if (e->type == fe_DDEB) {
+ r = deb_prepare(&e->pkg.deb,
+ e->component, e->architecture_into,
+ e->section, e->priority,
+ pt_ddeb,
+ distribution, fullfilename,
+ e->filekey, e->checksums,
+ &changes->binaries,
+ changes->source, changes->sourceversion);
+ } else if (e->type == fe_DSC) {
+ if (!changes->isbinnmu || IGNORING(dscinbinnmu,
+"File '%s' looks like a source package, but this .changes looks like a binNMU\n"
+"(as '%s' (from Source:) and '%s' (From Version:) differ.)\n",
+ e->filekey, changes->sourceversion,
+ changes->changesversion)) {
+
+ assert (atom_defined(changes->srccomponent));
+ assert (changes->srcdirectory!=NULL);
+ r = dsc_prepare(changes, e,
+ distribution, fullfilename);
+ } else
+ r = RET_ERROR;
+ }
+
+ free(fullfilename);
+ if (RET_WAS_ERROR(r))
+ break;
+ e = e->next;
+ }
+
+ return r;
+}
+
+static retvalue changes_includepkgs(struct distribution *distribution, struct changes *changes, /*@null@*/struct trackingdata *trackingdata, const struct atomlist *forcearchitectures, bool *missed_p) {
+ struct fileentry *e;
+ retvalue result, r;
+
+ *missed_p = false;
+ r = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ e = changes->files;
+ while (e != NULL) {
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ if (e->type == fe_DEB) {
+ r = deb_addprepared(e->pkg.deb,
+ /* architecture all needs this, the rest is
+ * already filtered out */
+ (e->architecture_into == architecture_all)?
+ forcearchitectures:NULL,
+ pt_deb, distribution, trackingdata);
+ if (r == RET_NOTHING)
+ *missed_p = true;
+ } else if (e->type == fe_DDEB) {
+ r = deb_addprepared(e->pkg.deb,
+ /* architecture all needs this, the rest is
+ * already filtered out */
+ (e->architecture_into == architecture_all)?
+ forcearchitectures:NULL,
+ pt_ddeb, distribution, trackingdata);
+ if (r == RET_NOTHING)
+ *missed_p = true;
+ } else if (e->type == fe_UDEB) {
+ r = deb_addprepared(e->pkg.deb,
+ /* architecture all needs this, the rest is
+ * already filtered out */
+ (e->architecture_into == architecture_all)?
+ forcearchitectures:NULL,
+ pt_udeb, distribution, trackingdata);
+ if (r == RET_NOTHING)
+ *missed_p = true;
+ } else if (e->type == fe_DSC) {
+ r = dsc_addprepared(&e->pkg.dsc,
+ changes->srccomponent,
+ &e->needed_filekeys,
+ distribution, trackingdata);
+ if (r == RET_NOTHING)
+ *missed_p = true;
+ } else if (e->type == fe_BUILDINFO && trackingdata != NULL) {
+ r = trackedpackage_addfilekey(trackingdata->tracks,
+ trackingdata->pkg,
+ ft_BUILDINFO, e->filekey, false);
+ e->filekey = NULL;
+ } else if (e->type == fe_LOG && trackingdata != NULL) {
+ r = trackedpackage_addfilekey(trackingdata->tracks,
+ trackingdata->pkg,
+ ft_LOG, e->filekey, false);
+ e->filekey = NULL;
+ } else if (e->type == fe_BYHAND && trackingdata != NULL) {
+ r = trackedpackage_addfilekey(trackingdata->tracks,
+ trackingdata->pkg,
+ ft_XTRA_DATA, e->filekey, false);
+ e->filekey = NULL;
+ } else
+ r = RET_NOTHING;
+ RET_UPDATE(result, r);
+
+ if (RET_WAS_ERROR(r))
+ break;
+ e = e->next;
+ }
+
+ logger_wait();
+
+ return result;
+}
+
+static void verifyarchitectures(const struct changes *changes, struct upload_conditions *conditions) {
+ const struct fileentry *e;
+
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ if (FE_SOURCE(e->type)) {
+ if (!uploaders_verifyatom(conditions,
+ architecture_source))
+ break;
+ } else if (FE_BINARY(e->type)) {
+ if (!uploaders_verifyatom(conditions,
+ e->architecture_into))
+ break;
+ }
+ }
+}
+static void verifysection(const struct changes *changes, struct upload_conditions *conditions) {
+ const struct fileentry *e;
+
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ if (FE_SOURCE(e->type)) {
+ if (!uploaders_verifystring(conditions, e->section))
+ break;
+ } else if (FE_BINARY(e->type)) {
+ if (!uploaders_verifystring(conditions, e->section))
+ break;
+ }
+ }
+}
+static void verifybinary(const struct changes *changes, struct upload_conditions *conditions) {
+ const struct fileentry *e;
+
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ if (FE_BINARY(e->type)) {
+ if (!uploaders_verifystring(conditions, e->name))
+ break;
+ }
+ }
+}
+static void verifybyhands(const struct changes *changes, struct upload_conditions *conditions) {
+ const struct fileentry *e;
+
+ for (e = changes->files ; e != NULL ; e = e->next) {
+ if (e->type == fe_BYHAND) {
+ if (!uploaders_verifystring(conditions, e->name))
+ break;
+ }
+ }
+}
+
+static bool permissionssuffice(struct changes *changes, const struct distribution *into, struct upload_conditions *conditions) {
+ do switch (uploaders_nextcondition(conditions)) {
+ case uc_ACCEPTED:
+ return true;
+ case uc_REJECTED:
+ return false;
+ case uc_CODENAME:
+ (void)uploaders_verifystring(conditions,
+ into->codename);
+ break;
+ case uc_SOURCENAME:
+ assert (changes->source != NULL);
+ (void)uploaders_verifystring(conditions,
+ changes->source);
+ break;
+ case uc_SECTIONS:
+ verifysection(changes, conditions);
+ break;
+ case uc_BINARIES:
+ verifybinary(changes, conditions);
+ break;
+ case uc_BYHAND:
+ verifybyhands(changes, conditions);
+ break;
+ case uc_ARCHITECTURES:
+ verifyarchitectures(changes, conditions);
+ break;
+ } while (true);
+}
+
+/* insert the given .changes into the mirror in the <distribution>
+ * if forcecomponent, forcesection or forcepriority is NULL
+ * get it from the files or try to guess it. */
+retvalue changes_add(trackingdb const tracks, const struct atomlist *packagetypes, component_t forcecomponent, const struct atomlist *forcearchitectures, const char *forcesection, const char *forcepriority, struct distribution *distribution, const char *changesfilename, int delete) {
+ retvalue result, r;
+ struct changes *changes;
+ struct trackingdata trackingdata;
+ bool somethingwasmissed;
+
+ causingfile = changesfilename;
+
+ r = changes_read(changesfilename, &changes,
+ packagetypes, forcearchitectures,
+ distribution->trackingoptions.includebyhand,
+ distribution->trackingoptions.includelogs,
+ distribution->trackingoptions.includebuildinfos);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if ((distribution->suite == NULL ||
+ !strlist_in(&changes->distributions, distribution->suite)) &&
+ !strlist_in(&changes->distributions, distribution->codename) &&
+ !strlist_intersects(&changes->distributions,
+ &distribution->alsoaccept)) {
+ if (!IGNORING(wrongdistribution,
+".changes put in a distribution not listed within it!\n")) {
+ changes_free(changes);
+ return RET_ERROR;
+ }
+ }
+
+ /* make sure caller has called distribution_loaduploaders */
+ assert (distribution->uploaders == NULL
+ || distribution->uploaderslist != NULL);
+ if (distribution->uploaderslist != NULL) {
+ struct upload_conditions *conditions;
+
+ r = uploaders_permissions(distribution->uploaderslist,
+ changes->signatures, &conditions);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ changes_free(changes);
+ return r;
+ }
+ if (!permissionssuffice(changes, distribution, conditions) &&
+ !IGNORING(uploaders,
+"No rule allowing this package in found in %s!\n",
+ distribution->uploaders)) {
+ free(conditions);
+ changes_free(changes);
+ return RET_ERROR;
+ }
+ free(conditions);
+ }
+
+ /*look for component, section and priority to be correct or guess them*/
+ r = changes_fixfields(distribution, changesfilename, changes,
+ forcecomponent, forcesection, forcepriority);
+
+ /* do some tests if values are sensible */
+ if (!RET_WAS_ERROR(r))
+ r = changes_check(distribution, changesfilename, changes,
+ forcearchitectures, packagetypes);
+
+ if (interrupted())
+ RET_UPDATE(r, RET_ERROR_INTERRUPTED);
+
+ if (!RET_WAS_ERROR(r))
+ r = changes_checkfiles(changesfilename, changes);
+
+ if (interrupted())
+ RET_UPDATE(r, RET_ERROR_INTERRUPTED);
+
+ /* add files in the pool */
+ if (!RET_WAS_ERROR(r))
+ r = changes_includefiles(changes);
+
+ if (!RET_WAS_ERROR(r))
+ r = changes_checkpkgs(distribution, changes);
+
+ if (RET_WAS_ERROR(r)) {
+ changes_free(changes);
+ return r;
+ }
+
+ if (tracks != NULL) {
+ r = trackingdata_summon(tracks, changes->source,
+ changes->sourceversion, &trackingdata);
+ if (RET_WAS_ERROR(r)) {
+ changes_free(changes);
+ return r;
+ }
+ if (distribution->trackingoptions.includechanges) {
+ char *basefilename;
+ assert (changes->srcdirectory != NULL);
+
+ basefilename = calc_changes_basename(changes->source,
+ changes->changesversion,
+ &changes->architectures);
+ changes->changesfilekey =
+ calc_dirconcat(changes->srcdirectory,
+ basefilename);
+ free(basefilename);
+ if (FAILEDTOALLOC(changes->changesfilekey)) {
+ changes_free(changes);
+ trackingdata_done(&trackingdata);
+ return RET_ERROR_OOM;
+ }
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = files_preinclude(changesfilename,
+ changes->changesfilekey,
+ NULL);
+ if (RET_WAS_ERROR(r)) {
+ changes_free(changes);
+ trackingdata_done(&trackingdata);
+ return r;
+ }
+ }
+ }
+ if (interrupted()) {
+ if (tracks != NULL)
+ trackingdata_done(&trackingdata);
+ changes_free(changes);
+ return RET_ERROR_INTERRUPTED;
+ }
+
+ /* add the source and binary packages in the given distribution */
+ result = changes_includepkgs(distribution, changes,
+ (tracks!=NULL)?&trackingdata:NULL, forcearchitectures,
+ &somethingwasmissed);
+
+ if (RET_WAS_ERROR(result)) {
+ if (tracks != NULL) {
+ trackingdata_done(&trackingdata);
+ }
+ changes_free(changes);
+ return result;
+ }
+
+ if (tracks != NULL) {
+ if (changes->changesfilekey != NULL) {
+ char *changesfilekey = strdup(changes->changesfilekey);
+ assert (changes->srcdirectory != NULL);
+ if (FAILEDTOALLOC(changesfilekey)) {
+ trackingdata_done(&trackingdata);
+ changes_free(changes);
+ return RET_ERROR_OOM;
+ }
+
+ r = trackedpackage_addfilekey(tracks, trackingdata.pkg,
+ ft_CHANGES, changesfilekey, false);
+ RET_ENDUPDATE(result, r);
+ }
+ r = trackingdata_finish(tracks, &trackingdata);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(result)) {
+ changes_free(changes);
+ return result;
+ }
+ }
+
+ /* if something was included, call --changes notify scripts */
+ if (RET_IS_OK(result)) {
+ assert (logger_isprepared(distribution->logger));
+ logger_logchanges(distribution->logger, distribution->codename,
+ changes->source, changes->changesversion,
+ changesfilename, changes->changesfilekey);
+ }
+ /* wait for notify scripts (including those for the packages)
+ * before deleting the .changes */
+ logger_wait();
+
+ if ((delete >= D_MOVE && changes->changesfilekey != NULL) ||
+ delete >= D_DELETE) {
+ if (somethingwasmissed && delete < D_DELETE) {
+ if (verbose >= 0) {
+ fprintf(stderr,
+"Not deleting '%s' as no package was added or some package was missed.\n"
+"(Use --delete --delete to delete anyway in such cases)\n",
+ changesfilename);
+ }
+ } else {
+ if (verbose >= 5) {
+ printf("Deleting '%s'.\n", changesfilename);
+ }
+ if (unlink(changesfilename) != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d deleting '%s': %s\n",
+ e, changesfilename,
+ strerror(e));
+ }
+ }
+ }
+ result = changes_deleteleftoverfiles(changes, delete);
+ (void)changes_free(changes);
+
+ return result;
+}
diff --git a/checkin.h b/checkin.h
new file mode 100644
index 0000000..4bbf6b3
--- /dev/null
+++ b/checkin.h
@@ -0,0 +1,26 @@
+#ifndef REPREPRO_CHECKIN_H
+#define REPREPRO_CHECKIN_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_DISTRIBUTION_H
+#include "distribution.h"
+#endif
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+
+/* insert the given .changes into the mirror in the <distribution>
+ * if forcecomponent, forcesection or forcepriority is NULL
+ * get it from the files or try to guess it.
+ * if dereferencedfilekeys is != NULL, add filekeys that lost reference,
+ * if tracks != NULL, update/add tracking information there... */
+retvalue changes_add(/*@null@*/trackingdb, const struct atomlist * /*packagetypes*/, component_t, const struct atomlist * /*forcearchitecture*/, /*@null@*/const char * /*forcesection*/, /*@null@*/const char * /*forcepriority*/, struct distribution *, const char * /*changesfilename*/, int /*delete*/);
+
+#endif
+
diff --git a/checkindeb.c b/checkindeb.c
new file mode 100644
index 0000000..00c0de4
--- /dev/null
+++ b/checkindeb.c
@@ -0,0 +1,444 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2009,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <string.h>
+#include <ctype.h>
+#include "error.h"
+#include "ignore.h"
+#include "filecntl.h"
+#include "strlist.h"
+#include "checksums.h"
+#include "names.h"
+#include "checkindeb.h"
+#include "reference.h"
+#include "binaries.h"
+#include "files.h"
+#include "guesscomponent.h"
+#include "tracking.h"
+#include "override.h"
+#include "hooks.h"
+
+/* This file includes the code to include binaries, i.e.
+ to create the chunk for the Packages.gz-file and
+ to put it in the various databases.
+
+Things to do with .deb's checkin by hand: (by comparison with apt-ftparchive)
+- extract the control file (that's the hard part -> extractcontrol.c )
+- check for Package, Version, Architecture, Maintainer, Description
+- apply overwrite if necessary (section, priority and perhaps maintainer).
+- add Size, MD5sum, Filename, Priority, Section
+- remove Status (warning if existent?)
+- check for Optional-field and reject then..
+*/
+
+struct debpackage {
+ /* things to be set by deb_read: */
+ struct deb_headers deb;
+ /* things that will still be NULL then: */
+ component_t component;
+ /* with deb_calclocations: */
+ const char *filekey;
+ struct strlist filekeys;
+};
+
+void deb_free(/*@only@*/struct debpackage *pkg) {
+ if (pkg != NULL) {
+ binaries_debdone(&pkg->deb);
+ if (pkg->filekey != NULL)
+ strlist_done(&pkg->filekeys);
+ }
+ free(pkg);
+}
+
+/* read the data from a .deb, make some checks and extract some data */
+static retvalue deb_read(/*@out@*/struct debpackage **pkg, const char *filename) {
+ retvalue r;
+ struct debpackage *deb;
+
+ deb = zNEW(struct debpackage);
+ if (FAILEDTOALLOC(deb))
+ return RET_ERROR_OOM;
+
+ r = binaries_readdeb(&deb->deb, filename);
+ if (RET_IS_OK(r))
+ r = properpackagename(deb->deb.name);
+ if (RET_IS_OK(r))
+ r = propersourcename(deb->deb.source);
+ if (RET_IS_OK(r))
+ r = properversion(deb->deb.sourceversion);
+ if (RET_IS_OK(r))
+ r = properversion(deb->deb.version);
+ if (RET_WAS_ERROR(r)) {
+ deb_free(deb);
+ return r;
+ }
+ *pkg = deb;
+
+ return RET_OK;
+}
+
+static retvalue deb_preparelocation(struct debpackage *pkg, component_t forcecomponent, const struct atomlist *forcearchitectures, const char *forcesection, const char *forcepriority, packagetype_t packagetype, struct distribution *distribution, const struct overridedata **oinfo_ptr, const char *debfilename){
+ const struct atomlist *components;
+ const struct overridefile *binoverride;
+ const struct overridedata *oinfo;
+ retvalue r;
+
+ if (verbose >= 15) {
+ fprintf(stderr, "trace: deb_preparelocation(pkg={deb={name=%s}}, forcecomponent=%s, forcearchitectures=[",
+ pkg == NULL ? NULL : pkg->deb.name, atoms_components[forcecomponent]);
+ atomlist_fprint(stderr, at_architecture, forcearchitectures);
+ fprintf(stderr, "], forcesection=%s, packagetype=%s, distribution={codename=%s}, debfilename=%s) called.\n",
+ forcesection, atoms_packagetypes[packagetype], distribution->codename, debfilename);
+ }
+
+ if (packagetype == pt_ddeb) {
+ /* ddebs don't have overrides */
+ forcesection = "debug";
+ forcepriority = "extra";
+ binoverride = NULL;
+ components = &distribution->ddebcomponents;
+ } else if (packagetype == pt_udeb) {
+ binoverride = distribution->overrides.udeb;
+ components = &distribution->udebcomponents;
+ } else {
+ binoverride = distribution->overrides.deb;
+ components = &distribution->components;
+ }
+
+ if (binoverride == NULL) {
+ oinfo = NULL;
+ } else {
+ oinfo = override_search(binoverride, pkg->deb.name);
+
+ if (forcesection == NULL) {
+ forcesection = override_get(oinfo, SECTION_FIELDNAME);
+ }
+
+ if (forcepriority == NULL) {
+ forcepriority = override_get(oinfo, PRIORITY_FIELDNAME);
+ }
+ }
+ *oinfo_ptr = oinfo;
+
+ if (!atom_defined(forcecomponent)) {
+ const char *fc;
+
+ fc = override_get(oinfo, "$Component");
+ if (fc != NULL) {
+ forcecomponent = component_find(fc);
+ if (!atom_defined(forcecomponent)) {
+ fprintf(stderr,
+"Unparseable component '%s' in $Component override of '%s'\n",
+ fc, pkg->deb.name);
+ return RET_ERROR;
+ }
+ }
+ }
+
+ if (forcesection != NULL) {
+ free(pkg->deb.section);
+ pkg->deb.section = strdup(forcesection);
+ if (FAILEDTOALLOC(pkg->deb.section)) {
+ return RET_ERROR_OOM;
+ }
+ }
+ if (forcepriority != NULL) {
+ free(pkg->deb.priority);
+ pkg->deb.priority = strdup(forcepriority);
+ if (FAILEDTOALLOC(pkg->deb.priority)) {
+ return RET_ERROR_OOM;
+ }
+ }
+
+ if (pkg->deb.section == NULL) {
+ fprintf(stderr, "No section given for '%s', skipping.\n",
+ pkg->deb.name);
+ return RET_ERROR;
+ }
+ if (pkg->deb.priority == NULL) {
+ fprintf(stderr, "No priority given for '%s', skipping.\n",
+ pkg->deb.name);
+ return RET_ERROR;
+ }
+ if (strcmp(pkg->deb.section, "unknown") == 0 && verbose >= 0) {
+ fprintf(stderr, "Warning: strange section '%s'!\n",
+ pkg->deb.section);
+ }
+
+ /* decide where it has to go */
+
+ r = guess_component(distribution->codename, components,
+ pkg->deb.name, pkg->deb.section,
+ forcecomponent, &pkg->component);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (verbose > 0 && !atom_defined(forcecomponent)) {
+ fprintf(stderr, "%s: component guessed as '%s'\n", debfilename,
+ atoms_components[pkg->component]);
+ }
+
+ /* some sanity checks: */
+
+ if (forcearchitectures != NULL &&
+ pkg->deb.architecture != architecture_all &&
+ !atomlist_in(forcearchitectures,
+ pkg->deb.architecture)) {
+ fprintf(stderr,
+"Cannot add '%s', as it is architecture '%s' and you specified to only include ",
+ debfilename,
+ atoms_architectures[pkg->deb.architecture]);
+ atomlist_fprint(stderr, at_architecture, forcearchitectures);
+ fputs(".\n", stderr);
+ return RET_ERROR;
+ } else if (pkg->deb.architecture != architecture_all &&
+ !atomlist_in(&distribution->architectures,
+ pkg->deb.architecture)) {
+ (void)fprintf(stderr,
+"Error looking at '%s': '%s' is not one of the valid architectures: '",
+ debfilename,
+ atoms_architectures[pkg->deb.architecture]);
+ (void)atomlist_fprint(stderr, at_architecture,
+ &distribution->architectures);
+ (void)fputs("'\n", stderr);
+ return RET_ERROR;
+ }
+ if (!atomlist_in(components, pkg->component)) {
+ fprintf(stderr,
+"Error looking at %s': Would be placed in unavailable component '%s'!\n",
+ debfilename,
+ atoms_components[pkg->component]);
+ /* this cannot be ignored
+ * as there is not data structure available */
+ return RET_ERROR;
+ }
+
+ r = binaries_calcfilekeys(pkg->component, &pkg->deb,
+ packagetype, &pkg->filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ pkg->filekey = pkg->filekeys.values[0];
+ return RET_OK;
+}
+
+
+retvalue deb_prepare(/*@out@*/struct debpackage **deb, component_t forcecomponent, architecture_t forcearchitecture, const char *forcesection, const char *forcepriority, packagetype_t packagetype, struct distribution *distribution, const char *debfilename, const char * const givenfilekey, const struct checksums * checksums, const struct strlist *allowed_binaries, const char *expectedsourcepackage, const char *expectedsourceversion){
+ retvalue r;
+ struct debpackage *pkg;
+ const struct overridedata *oinfo;
+ char *control;
+ struct atomlist forcearchitectures;
+ const char *packagenametocheck;
+ char *base;
+ size_t l;
+
+ assert (givenfilekey != NULL);
+ assert (checksums != NULL);
+ assert (allowed_binaries != NULL);
+ assert (expectedsourcepackage != NULL);
+ assert (expectedsourceversion != NULL);
+
+ /* First taking a closer look in the file: */
+
+ r = deb_read(&pkg, debfilename);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+
+ /* -dbgsym packages are not listed in the Binary header, so look
+ * for the base name instead */
+ packagenametocheck = pkg->deb.name;
+ l = strlen(pkg->deb.name);
+ if (l > sizeof("-dbgsym")-1 &&
+ strcmp(pkg->deb.name + l - (sizeof("dbgsym")), "-dbgsym") == 0) {
+ base = strndup(pkg->deb.name, l - (sizeof("dbgsym")));
+ if (FAILEDTOALLOC(base)) {
+ deb_free(pkg);
+ return RET_ERROR_OOM;
+ }
+ packagenametocheck = base;
+ } else {
+ base = NULL;
+ }
+
+ if (packagetype == pt_ddeb) {
+ /* ddebs are allowed if they are an allowed
+ * binary + "-dbgsym" */
+ int i;
+ bool found = false;
+
+ for (i = 0; i < allowed_binaries->count; i++) {
+ const char *s = allowed_binaries->values[i];
+ size_t len = strlen(s);
+
+ if (strncmp(s, pkg->deb.name, len) == 0 &&
+ strcmp(pkg->deb.name + len, "-dbgsym") == 0) {
+ found = true;
+ }
+ }
+
+ if (!found && !IGNORING(surprisingbinary,
+ "'%s' has packagename '%s' not corresponding to a .deb listed in the .changes file!\n",
+ debfilename, pkg->deb.name)) {
+ deb_free(pkg);
+ return RET_ERROR;
+ }
+ } else if (!strlist_in(allowed_binaries, packagenametocheck) &&
+ !IGNORING(surprisingbinary,
+"'%s' has packagename '%s' not listed in the .changes file!\n",
+ debfilename, packagenametocheck)) {
+ deb_free(pkg);
+ free(base);
+ return RET_ERROR;
+ }
+ free(base);
+ if (strcmp(pkg->deb.source, expectedsourcepackage) != 0) {
+ /* this cannot be ignored easily, as it determines
+ * the directory this file is stored into */
+ fprintf(stderr,
+"'%s' lists source package '%s', but .changes says it is '%s'!\n",
+ debfilename, pkg->deb.source,
+ expectedsourcepackage);
+ deb_free(pkg);
+ return RET_ERROR;
+ }
+ if (strcmp(pkg->deb.sourceversion, expectedsourceversion) != 0 &&
+ !IGNORING(wrongsourceversion,
+"'%s' lists source version '%s', but .changes says it is '%s'!\n",
+ debfilename, pkg->deb.sourceversion,
+ expectedsourceversion)) {
+ deb_free(pkg);
+ return RET_ERROR;
+ }
+
+ forcearchitectures.count = 1;
+ forcearchitectures.size = 1;
+ forcearchitectures.atoms = &forcearchitecture;
+
+ r = deb_preparelocation(pkg, forcecomponent, &forcearchitectures,
+ forcesection, forcepriority, packagetype, distribution,
+ &oinfo, debfilename);
+ if (RET_WAS_ERROR(r)) {
+ deb_free(pkg);
+ return r;
+ }
+
+ if (strcmp(givenfilekey, pkg->filekey) != 0) {
+ fprintf(stderr,
+"Name mismatch: .changes indicates '%s', but the file itself says '%s'!\n",
+ givenfilekey, pkg->filekey);
+ deb_free(pkg);
+ return RET_ERROR;
+ }
+ /* Prepare everything that can be prepared beforehand */
+ r = binaries_complete(&pkg->deb, pkg->filekey, checksums, oinfo,
+ pkg->deb.section, pkg->deb.priority, &control);
+ if (RET_WAS_ERROR(r)) {
+ deb_free(pkg);
+ return r;
+ }
+ free(pkg->deb.control); pkg->deb.control = control;
+ *deb = pkg;
+ return RET_OK;
+}
+
+retvalue deb_addprepared(const struct debpackage *pkg, const struct atomlist *forcearchitectures, packagetype_t packagetype, struct distribution *distribution, struct trackingdata *trackingdata) {
+ return binaries_adddeb(&pkg->deb, forcearchitectures,
+ packagetype, distribution, trackingdata,
+ pkg->component, &pkg->filekeys,
+ pkg->deb.control);
+}
+
+/* insert the given .deb into the mirror in <component> in the <distribution>
+ * putting things with architecture of "all" into <d->architectures> (and also
+ * causing error, if it is not one of them otherwise)
+ * if component is NULL, guessing it from the section. */
+retvalue deb_add(component_t forcecomponent, const struct atomlist *forcearchitectures, const char *forcesection, const char *forcepriority, packagetype_t packagetype, struct distribution *distribution, const char *debfilename, int delete, /*@null@*/trackingdb tracks) {
+ struct debpackage *pkg;
+ retvalue r;
+ struct trackingdata trackingdata;
+ const struct overridedata *oinfo;
+ char *control;
+ struct checksums *checksums;
+
+ causingfile = debfilename;
+
+ r = deb_read(&pkg, debfilename);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ r = deb_preparelocation(pkg, forcecomponent, forcearchitectures,
+ forcesection, forcepriority, packagetype, distribution,
+ &oinfo, debfilename);
+ if (RET_WAS_ERROR(r)) {
+ deb_free(pkg);
+ return r;
+ }
+ r = files_preinclude(debfilename, pkg->filekey, &checksums);
+ if (RET_WAS_ERROR(r)) {
+ deb_free(pkg);
+ return r;
+ }
+ /* Prepare everything that can be prepared beforehand */
+ r = binaries_complete(&pkg->deb, pkg->filekey, checksums, oinfo,
+ pkg->deb.section, pkg->deb.priority, &control);
+ checksums_free(checksums);
+ if (RET_WAS_ERROR(r)) {
+ deb_free(pkg);
+ return r;
+ }
+ free(pkg->deb.control); pkg->deb.control = control;
+
+ if (tracks != NULL) {
+ assert(pkg->deb.sourceversion != NULL);
+ r = trackingdata_summon(tracks,
+ pkg->deb.source, pkg->deb.sourceversion,
+ &trackingdata);
+ if (RET_WAS_ERROR(r)) {
+ deb_free(pkg);
+ return r;
+ }
+ }
+
+ r = binaries_adddeb(&pkg->deb, forcearchitectures,
+ packagetype, distribution,
+ (tracks!=NULL)?&trackingdata:NULL,
+ pkg->component, &pkg->filekeys,
+ pkg->deb.control);
+ RET_UPDATE(distribution->status, r);
+ deb_free(pkg);
+
+ if (tracks != NULL) {
+ retvalue r2;
+ r2 = trackingdata_finish(tracks, &trackingdata);
+ RET_ENDUPDATE(r, r2);
+ }
+
+ if (RET_IS_OK(r) && delete >= D_MOVE) {
+ deletefile(debfilename);
+ } else if (r == RET_NOTHING && delete >= D_DELETE)
+ deletefile(debfilename);
+
+ return r;
+}
diff --git a/checkindeb.h b/checkindeb.h
new file mode 100644
index 0000000..ebc2ec8
--- /dev/null
+++ b/checkindeb.h
@@ -0,0 +1,28 @@
+#ifndef REPREPRO_CHECKINDEB_H
+#define REPREPRO_CHECKINDEB_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_DISTRIBUTION_H
+#include "distribution.h"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+
+/* insert the given .deb into the mirror in <component> in the <distribution>
+ * putting things with architecture of "all" into <architectures> (and also
+ * causing error, if it is not one of them otherwise)
+ * if overwrite is not NULL, it will be search for fields to reset for this
+ * package. (forcesection and forcepriority have higher priority than the
+ * information there), */
+retvalue deb_add(component_t, const struct atomlist * /*forcearchitectures*/, /*@null@*/const char * /*forcesection*/, /*@null@*/const char * /*forcepriority*/, packagetype_t, struct distribution *, const char * /*debfilename*/, int /*delete*/, /*@null@*/trackingdb);
+
+/* in two steps */
+struct debpackage;
+retvalue deb_addprepared(const struct debpackage *, const struct atomlist * /*forcearchitectures*/, packagetype_t, struct distribution *, struct trackingdata *);
+retvalue deb_prepare(/*@out@*/struct debpackage **, component_t, architecture_t /*forcearchitectures*/, const char * /*forcesection*/, const char * /*forcepriority*/, packagetype_t, struct distribution *, const char * /*debfilename*/, const char * const /*filekey*/, const struct checksums *, const struct strlist * /*allowed_binaries*/, const char * /*expectedsourcename*/, const char * /*expectedsourceversion*/);
+void deb_free(/*@only@*/struct debpackage *);
+#endif
diff --git a/checkindsc.c b/checkindsc.c
new file mode 100644
index 0000000..43fa5f2
--- /dev/null
+++ b/checkindsc.c
@@ -0,0 +1,435 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <string.h>
+#include <ctype.h>
+#include "error.h"
+#include "filecntl.h"
+#include "strlist.h"
+#include "checksums.h"
+#include "names.h"
+#include "checksums.h"
+#include "dirs.h"
+#include "checkindsc.h"
+#include "reference.h"
+#include "sources.h"
+#include "files.h"
+#include "guesscomponent.h"
+#include "tracking.h"
+#include "ignore.h"
+#include "override.h"
+#include "log.h"
+#include "sourceextraction.h"
+
+/* This file includes the code to include sources, i.e.
+ to create the chunk for the Sources.gz-file and
+ to put it in the various databases.
+
+things to do with .dsc's checkin by hand: (by comparison with apt-ftparchive)
+* Get all from .dsc (search the chunk with
+ the Source:-field. end the chunk artificial
+ before the pgp-end-block.(in case someone
+ missed the newline there))
+
+* check to have source, version, maintainer,
+ standards-version, files. And also look
+ at binary, architecture and build*, as
+ described in policy 5.4
+
+* Get overwrite information, especially
+ the priority(if there is a binaries field,
+ check the one with the highest) and the section
+ (...what else...?)
+
+* Rename Source-Field to Package-Field
+
+* add dsc to files-list. (check other files md5sum and size)
+
+* add Directory-field
+
+* Add Priority and Status
+
+* apply possible maintainer-updates from the overwrite-file
+ or arbitrary tag changes from the extra-overwrite-file
+
+* keep rest (perhaps sort alphabetical)
+
+*/
+
+struct dscpackage {
+ /* things to be set by dsc_read: */
+ struct dsc_headers dsc;
+ /* things that will still be NULL then: */
+ component_t component;
+ /* Things that may be calculated by dsc_calclocations: */
+ struct strlist filekeys;
+};
+
+static void dsc_free(/*@only@*/struct dscpackage *pkg) {
+ if (pkg != NULL) {
+ sources_done(&pkg->dsc);
+ strlist_done(&pkg->filekeys);
+ free(pkg);
+ }
+}
+
+static retvalue dsc_read(/*@out@*/struct dscpackage **pkg, const char *filename) {
+ retvalue r;
+ struct dscpackage *dsc;
+ bool broken;
+
+
+ dsc = zNEW(struct dscpackage);
+ if (FAILEDTOALLOC(dsc))
+ return RET_ERROR_OOM;
+
+ r = sources_readdsc(&dsc->dsc, filename, filename, &broken);
+ if (RET_IS_OK(r) && broken && !IGNORING(brokensignatures,
+"'%s' contains only broken signatures.\n"
+"This most likely means the file was damaged or edited improperly\n",
+ filename))
+ r = RET_ERROR;
+ if (RET_IS_OK(r))
+ r = propersourcename(dsc->dsc.name);
+ if (RET_IS_OK(r))
+ r = properversion(dsc->dsc.version);
+ if (RET_IS_OK(r))
+ r = properfilenames(&dsc->dsc.files.names);
+ if (RET_WAS_ERROR(r)) {
+ dsc_free(dsc);
+ return r;
+ }
+ dsc->component = atom_unknown;
+ *pkg = dsc;
+
+ return RET_OK;
+}
+
+retvalue dsc_addprepared(const struct dsc_headers *dsc, component_t component, const struct strlist *filekeys, struct distribution *distribution, struct trackingdata *trackingdata){
+ retvalue r;
+ struct target *t = distribution_getpart(distribution,
+ component, architecture_source, pt_dsc);
+
+ assert (logger_isprepared(distribution->logger));
+
+ /* finally put it into the source distribution */
+ r = target_initpackagesdb(t, READWRITE);
+ if (!RET_WAS_ERROR(r)) {
+ retvalue r2;
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_addpackage(t, distribution->logger,
+ dsc->name, dsc->version,
+ dsc->control, filekeys,
+ false, trackingdata,
+ architecture_source,
+ NULL, NULL);
+ r2 = target_closepackagesdb(t);
+ RET_ENDUPDATE(r, r2);
+ }
+ RET_UPDATE(distribution->status, r);
+ return r;
+}
+
+/* insert the given .dsc into the mirror in <component> in the <distribution>
+ * if component is NULL, guessing it from the section.
+ * If basename, filekey and directory are != NULL, then they are used instead
+ * of being newly calculated.
+ * (And all files are expected to already be in the pool). */
+retvalue dsc_add(component_t forcecomponent, const char *forcesection, const char *forcepriority, struct distribution *distribution, const char *dscfilename, int delete, trackingdb tracks){
+ retvalue r;
+ struct dscpackage *pkg;
+ struct trackingdata trackingdata;
+ char *destdirectory, *origdirectory;
+ const struct overridedata *oinfo;
+ char *control;
+ int i;
+
+ causingfile = dscfilename;
+
+ /* First make sure this distribution has a source section at all,
+ * for which it has to be listed in the "Architectures:"-field ;-) */
+ if (!atomlist_in(&distribution->architectures, architecture_source)) {
+ fprintf(stderr,
+"Cannot put a source package into Distribution '%s' not having 'source' in its 'Architectures:'-field!\n",
+ distribution->codename);
+ /* nota bene: this cannot be forced or ignored, as no target has
+ been created for this. */
+ return RET_ERROR;
+ }
+
+ r = dsc_read(&pkg, dscfilename);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+
+ oinfo = override_search(distribution->overrides.dsc, pkg->dsc.name);
+ if (forcesection == NULL) {
+ forcesection = override_get(oinfo, SECTION_FIELDNAME);
+ }
+ if (forcepriority == NULL) {
+ forcepriority = override_get(oinfo, PRIORITY_FIELDNAME);
+ }
+
+ if (forcesection != NULL) {
+ free(pkg->dsc.section);
+ pkg->dsc.section = strdup(forcesection);
+ if (FAILEDTOALLOC(pkg->dsc.section)) {
+ dsc_free(pkg);
+ return RET_ERROR_OOM;
+ }
+ }
+ if (forcepriority != NULL) {
+ free(pkg->dsc.priority);
+ pkg->dsc.priority = strdup(forcepriority);
+ if (FAILEDTOALLOC(pkg->dsc.priority)) {
+ dsc_free(pkg);
+ return RET_ERROR_OOM;
+ }
+ }
+
+ r = dirs_getdirectory(dscfilename, &origdirectory);
+ if (RET_WAS_ERROR(r)) {
+ dsc_free(pkg);
+ return r;
+ }
+
+ if (pkg->dsc.section == NULL || pkg->dsc.priority == NULL) {
+ struct sourceextraction *extraction;
+
+ extraction = sourceextraction_init(
+ (pkg->dsc.section == NULL)?&pkg->dsc.section:NULL,
+ (pkg->dsc.priority == NULL)?&pkg->dsc.priority:NULL);
+ if (FAILEDTOALLOC(extraction)) {
+ free(origdirectory);
+ dsc_free(pkg);
+ return RET_ERROR_OOM;
+ }
+ for (i = 0 ; i < pkg->dsc.files.names.count ; i ++)
+ sourceextraction_setpart(extraction, i,
+ pkg->dsc.files.names.values[i]);
+ while (sourceextraction_needs(extraction, &i)) {
+ char *fullfilename = calc_dirconcat(origdirectory,
+ pkg->dsc.files.names.values[i]);
+ if (FAILEDTOALLOC(fullfilename)) {
+ free(origdirectory);
+ dsc_free(pkg);
+ return RET_ERROR_OOM;
+ }
+ /* while it would nice to try at the pool if we
+ * do not have the file here, to know its location
+ * in the pool we need to know the component. And
+ * for the component we might need the section first */
+ // TODO: but if forcecomponent is set it might be possible.
+ r = sourceextraction_analyse(extraction, fullfilename);
+ free(fullfilename);
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ dsc_free(pkg);
+ sourceextraction_abort(extraction);
+ return r;
+ }
+ }
+ r = sourceextraction_finish(extraction);
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ dsc_free(pkg);
+ return r;
+ }
+ }
+
+ if (pkg->dsc.section == NULL && pkg->dsc.priority == NULL) {
+ fprintf(stderr,
+"No section and no priority for '%s', skipping.\n",
+ pkg->dsc.name);
+ free(origdirectory);
+ dsc_free(pkg);
+ return RET_ERROR;
+ }
+ if (pkg->dsc.section == NULL) {
+ fprintf(stderr, "No section for '%s', skipping.\n",
+ pkg->dsc.name);
+ free(origdirectory);
+ dsc_free(pkg);
+ return RET_ERROR;
+ }
+ if (pkg->dsc.priority == NULL) {
+ fprintf(stderr, "No priority for '%s', skipping.\n",
+ pkg->dsc.name);
+ free(origdirectory);
+ dsc_free(pkg);
+ return RET_ERROR;
+ }
+ if (strcmp(pkg->dsc.section, "unknown") == 0 && verbose >= 0) {
+ fprintf(stderr, "Warning: strange section '%s'!\n",
+ pkg->dsc.section);
+ }
+ if (!atom_defined(forcecomponent)) {
+ const char *fc;
+
+ fc = override_get(oinfo, "$Component");
+ if (fc != NULL) {
+ forcecomponent = component_find(fc);
+ if (!atom_defined(forcecomponent)) {
+ fprintf(stderr,
+"Unparseable component '%s' in $Component override of '%s'\n",
+ fc, pkg->dsc.name);
+ return RET_ERROR;
+ }
+ }
+ }
+
+ /* decide where it has to go */
+
+ r = guess_component(distribution->codename, &distribution->components,
+ pkg->dsc.name, pkg->dsc.section, forcecomponent,
+ &pkg->component);
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ dsc_free(pkg);
+ return r;
+ }
+ if (verbose > 0 && !atom_defined(forcecomponent)) {
+ fprintf(stderr, "%s: component guessed as '%s'\n", dscfilename,
+ atoms_components[pkg->component]);
+ }
+
+ { char *dscbasename, *dscfilekey;
+ struct checksums *dscchecksums;
+
+ dscbasename = calc_source_basename(pkg->dsc.name, pkg->dsc.version);
+ destdirectory = calc_sourcedir(pkg->component, pkg->dsc.name);
+ /* Calculate the filekeys: */
+ if (destdirectory != NULL)
+ r = calc_dirconcats(destdirectory,
+ &pkg->dsc.files.names,
+ &pkg->filekeys);
+ if (dscbasename == NULL || destdirectory == NULL || RET_WAS_ERROR(r)) {
+ free(dscbasename);
+ free(destdirectory); free(origdirectory);
+ dsc_free(pkg);
+ return r;
+ }
+ dscfilekey = calc_dirconcat(destdirectory, dscbasename);
+ dscchecksums = NULL;
+ if (FAILEDTOALLOC(dscfilename))
+ r = RET_ERROR_OOM;
+ else
+ /* then look if we already have this, or copy it in */
+ r = files_preinclude(
+ dscfilename, dscfilekey,
+ &dscchecksums);
+
+ if (!RET_WAS_ERROR(r)) {
+ /* Add the dsc-file to basenames, filekeys and md5sums,
+ * so that it will be listed in the Sources.gz */
+
+ r = checksumsarray_include(&pkg->dsc.files,
+ dscbasename, dscchecksums);
+ if (RET_IS_OK(r))
+ r = strlist_include(&pkg->filekeys, dscfilekey);
+ else
+ free(dscfilekey);
+ } else {
+ free(dscfilekey);
+ free(dscbasename);
+ }
+ checksums_free(dscchecksums);
+ }
+
+ assert (pkg->dsc.files.names.count == pkg->filekeys.count);
+ for (i = 1 ; i < pkg->dsc.files.names.count ; i ++) {
+ if (!RET_WAS_ERROR(r)) {
+ r = files_checkincludefile(origdirectory,
+ pkg->dsc.files.names.values[i],
+ pkg->filekeys.values[i],
+ &pkg->dsc.files.checksums[i]);
+ }
+ }
+
+ /* Calculate the chunk to include: */
+
+ if (!RET_WAS_ERROR(r))
+ r = sources_complete(&pkg->dsc, destdirectory, oinfo,
+ pkg->dsc.section, pkg->dsc.priority, &control);
+ free(destdirectory);
+ if (RET_IS_OK(r)) {
+ free(pkg->dsc.control);
+ pkg->dsc.control = control;
+ } else {
+ free(origdirectory);
+ dsc_free(pkg);
+ return r;
+ }
+
+ if (interrupted()) {
+ dsc_free(pkg);
+ free(origdirectory);
+ return RET_ERROR_INTERRUPTED;
+ }
+
+ if (tracks != NULL) {
+ r = trackingdata_summon(tracks, pkg->dsc.name,
+ pkg->dsc.version, &trackingdata);
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ dsc_free(pkg);
+ return r;
+ }
+ }
+
+ r = dsc_addprepared(&pkg->dsc, pkg->component,
+ &pkg->filekeys, distribution,
+ (tracks!=NULL)?&trackingdata:NULL);
+
+ /* delete source files, if they are to be */
+ if ((RET_IS_OK(r) && delete >= D_MOVE) ||
+ (r == RET_NOTHING && delete >= D_DELETE)) {
+ char *fullfilename;
+
+ for (i = 0 ; i < pkg->dsc.files.names.count ; i++) {
+ fullfilename = calc_dirconcat(origdirectory,
+ pkg->dsc.files.names.values[i]);
+ if (FAILEDTOALLOC(fullfilename)) {
+ r = RET_ERROR_OOM;
+ break;
+ }
+ if (isregularfile(fullfilename))
+ deletefile(fullfilename);
+ free(fullfilename);
+ }
+ }
+ free(origdirectory);
+ dsc_free(pkg);
+
+ if (tracks != NULL) {
+ retvalue r2;
+ r2 = trackingdata_finish(tracks, &trackingdata);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+}
diff --git a/checkindsc.h b/checkindsc.h
new file mode 100644
index 0000000..cf732ac
--- /dev/null
+++ b/checkindsc.h
@@ -0,0 +1,31 @@
+#ifndef REPREPRO_CHECKINDSC_H
+#define REPREPRO_CHECKINDSC_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_DISTRIBUTION_H
+#include "distribution.h"
+#endif
+#ifndef REPREPRO_SOURCES_H
+#include "sources.h"
+#endif
+
+/* insert the given .dsc into the mirror in <component> in the <distribution>
+ * if component is NULL, guess it from the section. */
+retvalue dsc_add(component_t, /*@null@*/const char * /*forcesection*/, /*@null@*/const char * /*forcepriority*/, struct distribution *, const char * /*dscfilename*/, int /*delete*/, /*@null@*/trackingdb);
+
+/* in two steps:
+ * If basename, filekey and directory are != NULL, then they are used instead
+ * of being newly calculated.
+ * (And all files are expected to already be in the pool),
+ * delete should be D_INPLACE then
+ */
+
+retvalue dsc_addprepared(const struct dsc_headers *, component_t, const struct strlist * /*filekeys*/, struct distribution *, /*@null@*/struct trackingdata *);
+
+#endif
diff --git a/checks.c b/checks.c
new file mode 100644
index 0000000..bbba928
--- /dev/null
+++ b/checks.c
@@ -0,0 +1,382 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+#include <config.h>
+
+#include <assert.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include "error.h"
+#include "ignore.h"
+#include "strlist.h"
+#include "mprintf.h"
+#include "names.h"
+#include "checks.h"
+
+typedef unsigned char uchar;
+
+/* check if the character starting where <character> points
+ * at is a overlong one */
+static inline bool overlongUTF8(const char *character) {
+ /* This checks for overlong utf-8 characters.
+ * (as they might mask '.' '\0' or '/' chars).
+ * we assume no filesystem/ar/gpg code will parse
+ * invalid utf8, as we would only be able to rule
+ * this out if we knew it is utf8 we are coping
+ * with. (Well, you should not use --ignore=validchars
+ * anyway). */
+ uchar c = *character;
+
+ if ((c & (uchar)0xC2 /*11000010*/) == (uchar)0xC0 /*11000000*/) {
+ uchar nextc = *(character+1);
+
+ if ((nextc & (uchar)0xC0 /*11000000*/)
+ != (uchar)0x80 /*10000000*/)
+ return false;
+
+ if ((c & (uchar)0x3E /* 00111110 */) == (uchar)0)
+ return true;
+ if (c == (uchar)0xE0 /*11100000*/ &&
+ (nextc & (uchar)0x20 /*00100000*/) == (uchar)0)
+ return true;
+ if (c == (uchar)0xF0 /*11110000*/ &&
+ (nextc & (uchar)0x30 /*00110000*/) == (uchar)0)
+ return true;
+ if (c == (uchar)0xF8 /*11111000*/ &&
+ (nextc & (uchar)0x38 /*00111000*/) == (uchar)0)
+ return true;
+ if (c == (uchar)0xFC /*11111100*/ &&
+ (nextc & (uchar)0x3C /*00111100*/) == (uchar)0)
+ return true;
+ }
+ return false;
+}
+
+#define REJECTLOWCHARS(s, str, descr) \
+ if ((uchar)*s < (uchar)' ') { \
+ fprintf(stderr, \
+ "Character 0x%02hhx not allowed within %s '%s'!\n", \
+ *s, descr, str); \
+ return RET_ERROR; \
+ }
+
+#define REJECTCHARIF(c, s, str, descr) \
+ if (c) { \
+ fprintf(stderr, \
+ "Character '%c' not allowed within %s '%s'!\n", \
+ *s, descr, string); \
+ return RET_ERROR; \
+ }
+
+
+/* check if this is something that can be used as directory safely */
+retvalue propersourcename(const char *string) {
+ const char *s;
+ bool firstcharacter = true;
+
+ if (string[0] == '\0') {
+ /* This is not really ignoreable, as this will lead
+ * to paths not normalized, so all checks go wrong */
+ fprintf(stderr, "Source name is not allowed to be empty!\n");
+ return RET_ERROR;
+ }
+ if (string[0] == '.') {
+ /* A dot is not only hard to see, it would cause the directory
+ * to become /./.bla, which is quite dangerous. */
+ fprintf(stderr,
+"Source names are not allowed to start with a dot!\n");
+ return RET_ERROR;
+ }
+ s = string;
+ while (*s != '\0') {
+ if ((*s > 'z' || *s < 'a') &&
+ (*s > '9' || *s < '0') &&
+ (firstcharacter ||
+ (*s != '+' && *s != '-' && *s != '.'))) {
+ REJECTLOWCHARS(s, string, "sourcename");
+ REJECTCHARIF (*s == '/', s, string, "sourcename");
+ if (overlongUTF8(s)) {
+ fprintf(stderr,
+"This could contain an overlong UTF8 sequence, rejecting source name '%s'!\n",
+ string);
+ return RET_ERROR;
+ }
+ if (!IGNORING_(forbiddenchar,
+"Character 0x%02hhx not allowed in sourcename: '%s'!\n", *s, string)) {
+ return RET_ERROR;
+ }
+ if (ISSET(*s, 0x80)) {
+ if (!IGNORING_(8bit,
+"8bit character in source name: '%s'!\n", string)) {
+ return RET_ERROR;
+ }
+ }
+ }
+ s++;
+ firstcharacter = false;
+ }
+ return RET_OK;
+}
+
+/* check if this is something that can be used as directory safely */
+retvalue properfilename(const char *string) {
+ const char *s;
+
+ if (string[0] == '\0') {
+ fprintf(stderr, "Error: empty filename!\n");
+ return RET_ERROR;
+ }
+ if ((string[0] == '.' && string[1] == '\0') ||
+ (string[0] == '.' && string[1] == '.' && string[2] == '\0')) {
+ fprintf(stderr, "File name not allowed: '%s'!\n", string);
+ return RET_ERROR;
+ }
+ for (s = string ; *s != '\0' ; s++) {
+ REJECTLOWCHARS(s, string, "filename");
+ REJECTCHARIF (*s == '/' , s, string, "filename");
+ if (ISSET(*s, 0x80)) {
+ if (overlongUTF8(s)) {
+ fprintf(stderr,
+"This could contain an overlong UTF8 sequence, rejecting file name '%s'!\n",
+ string);
+ return RET_ERROR;
+ }
+ if (!IGNORING_(8bit,
+"8bit character in file name: '%s'!\n", string)) {
+ return RET_ERROR;
+ }
+ }
+ }
+ return RET_OK;
+}
+
+static const char *formaterror(const char *format, ...) {
+ va_list ap;
+ static char *data = NULL;
+
+ if (data != NULL)
+ free(data);
+ va_start(ap, format);
+ data = vmprintf(format, ap);
+ va_end(ap);
+ if (data == NULL)
+ return "Out of memory";
+ return data;
+}
+
+/* check if this is something that can be used as directory *and* identifier safely */
+const char *checkfordirectoryandidentifier(const char *string) {
+ const char *s;
+
+ assert (string != NULL && string[0] != '\0');
+
+ if ((string[0] == '.' && (string[1] == '\0'||string[1]=='/')))
+ return "'.' is not allowed as directory part";
+ if ((string[0] == '.' && string[1] == '.'
+ && (string[2] == '\0'||string[2] =='/')))
+ return "'..' is not allowed as directory part";
+ for (s = string; *s != '\0'; s++) {
+ if (*s == '|')
+ return "'|' is not allowed";
+ if ((uchar)*s < (uchar)' ')
+ return formaterror("Character 0x%02hhx not allowed", *s);
+ if (*s == '/' && s[1] == '.' && (s[2] == '\0' || s[2] == '/'))
+ return "'.' is not allowed as directory part";
+ if (*s == '/' && s[1] == '.' && s[2] == '.'
+ && (s[3] == '\0' || s[3] =='/'))
+ return "'..' is not allowed as directory part";
+ if (*s == '/' && s[1] == '/')
+ return "\"//\" is not allowed";
+ if (ISSET(*s, 0x80)) {
+ if (overlongUTF8(s))
+ return
+"Contains overlong UTF-8 sequence if treated as UTF-8";
+ if (!IGNORABLE(8bit))
+ return
+"Contains 8bit character (use --ignore=8bit to ignore)";
+ }
+ }
+ return NULL;
+}
+
+/* check if this can be used as part of identifier (and as part of a filename) */
+const char *checkforidentifierpart(const char *string) {
+ const char *s;
+
+ assert (string != NULL && string[0] != '\0');
+
+ for (s = string; *s != '\0' ; s++) {
+ if (*s == '|')
+ return "'|' is not allowed";
+ if (*s == '/')
+ return "'/' is not allowed";
+ if ((uchar)*s < (uchar)' ')
+ return formaterror("Character 0x%02hhx not allowed", *s);
+ if (ISSET(*s, 0x80)) {
+ if (overlongUTF8(s))
+ return
+"Contains overlong UTF-8 sequence if treated as UTF-8";
+ if (!IGNORABLE(8bit))
+ return
+"Contains 8bit character (use --ignore=8bit to ignore)";
+ }
+ }
+ return NULL;
+}
+
+retvalue properfilenamepart(const char *string) {
+ const char *s;
+
+ for (s = string ; *s != '\0' ; s++) {
+ REJECTLOWCHARS(s, string, "filenamepart");
+ REJECTCHARIF (*s == '/' , s, string, "filenamepart");
+ if (ISSET(*s, 0x80)) {
+ if (overlongUTF8(s)) {
+ fprintf(stderr,
+"This could contain an overlong UTF8 sequence, rejecting part of file name '%s'!\n",
+ string);
+ return RET_ERROR;
+ }
+ if (!IGNORING_(8bit,
+"8bit character in part of file name: '%s'!\n",
+ string))
+ return RET_ERROR;
+ }
+ }
+ return RET_OK;
+}
+
+retvalue properversion(const char *string) {
+ const char *s = string;
+ bool hadepoch = false;
+ bool first = true;
+ bool yetonlydigits = true;
+
+ if (string[0] == '\0' && !IGNORING(emptyfilenamepart,
+"A version string is empty!\n")) {
+ return RET_ERROR;
+ }
+ if ((*s < '0' || *s > '9') &&
+ ((*s >= 'a' && *s <= 'z') || (*s >='A' && *s <= 'Z'))) {
+ /* As there are official packages violating the rule
+ * of policy 5.6.11 to start with a digit, disabling
+ * this test, and only omitting a warning. */
+ if (verbose >= 0)
+ fprintf(stderr,
+"Warning: Package version '%s' does not start with a digit, violating 'should'-directive in policy 5.6.11\n",
+ string);
+ }
+ for (; *s != '\0' ; s++, first=false) {
+ if ((*s <= '9' && *s >= '0')) {
+ continue;
+ }
+ if (!first && yetonlydigits && *s == ':') {
+ hadepoch = true;
+ continue;
+ }
+ yetonlydigits = false;
+ if ((*s >= 'A' && *s <= 'Z') ||
+ (*s >= 'a' && *s <= 'z')) {
+ yetonlydigits = false;
+ continue;
+ }
+ if (first || (*s != '+' && *s != '-'
+ && *s != '.' && *s != '~'
+ && (!hadepoch || *s != ':'))) {
+ REJECTLOWCHARS(s, string, "version");
+ REJECTCHARIF (*s == '/' , s, string, "version");
+ if (overlongUTF8(s)) {
+ fprintf(stderr,
+"This could contain an overlong UTF8 sequence, rejecting version '%s'!\n",
+ string);
+ return RET_ERROR;
+ }
+ if (!IGNORING_(forbiddenchar,
+"Character '%c' not allowed in version: '%s'!\n", *s, string))
+ return RET_ERROR;
+ if (ISSET(*s, 0x80)) {
+ if (!IGNORING_(8bit,
+"8bit character in version: '%s'!\n", string))
+ return RET_ERROR;
+ }
+ }
+ }
+ return RET_OK;
+}
+
+retvalue properfilenames(const struct strlist *names) {
+ int i;
+
+ for (i = 0 ; i < names->count ; i ++) {
+ retvalue r = properfilename(names->values[i]);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+retvalue properpackagename(const char *string) {
+ const char *s;
+ bool firstcharacter = true;
+
+ /* To be able to avoid multiple warnings,
+ * this should always be a subset of propersourcename */
+
+ if (string[0] == '\0') {
+ /* This is not really ignoreable, as this is a primary
+ * key for our database */
+ fprintf(stderr, "Package name is not allowed to be empty!\n");
+ return RET_ERROR;
+ }
+ s = string;
+ while (*s != '\0') {
+ /* DAK also allowed upper case letters last I looked, policy
+ * does not, so they are not allowed without --ignore=forbiddenchar */
+ // perhaps some extra ignore-rule for upper case?
+ if ((*s > 'z' || *s < 'a') &&
+ (*s > '9' || *s < '0') &&
+ (firstcharacter
+ || (*s != '+' && *s != '-' && *s != '.'))) {
+ REJECTLOWCHARS(s, string, "package name");
+ REJECTCHARIF (*s == '/' , s, string, "package name");
+ if (overlongUTF8(s)) {
+ fprintf(stderr,
+"This could contain an overlong UTF8 sequence, rejecting package name '%s'!\n",
+ string);
+ return RET_ERROR;
+ }
+ if (!IGNORING(forbiddenchar,
+"Character 0x%02hhx not allowed in package name: '%s'!\n", *s, string)) {
+ return RET_ERROR;
+ }
+ if (ISSET(*s, 0x80)) {
+ if (!IGNORING_(8bit,
+"8bit character in package name: '%s'!\n", string)) {
+ return RET_ERROR;
+ }
+ }
+ }
+ s++;
+ firstcharacter = false;
+ }
+ return RET_OK;
+}
+
diff --git a/checks.h b/checks.h
new file mode 100644
index 0000000..ed5a167
--- /dev/null
+++ b/checks.h
@@ -0,0 +1,16 @@
+#ifndef REPREPRO_CHECKS_H
+#define REPREPRO_CHECKS_H
+
+/* return NULL if no problem, statically allocated string otherwise */
+
+typedef const char *checkfunc(const char *);
+
+const char *checkfordirectoryandidentifier(const char *);
+#define checkforcomponent checkfordirectoryandidentifier
+#define checkforcodename checkfordirectoryandidentifier
+const char *checkforidentifierpart(const char *);
+#define checkforarchitecture checkforidentifierpart
+
+/* not yet used */
+static inline void checkerror_free(UNUSED(const char *dummy)) {};
+#endif
diff --git a/checksums.c b/checksums.c
new file mode 100644
index 0000000..b3edbd2
--- /dev/null
+++ b/checksums.c
@@ -0,0 +1,1503 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006,2007,2008,2009 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#define CHECKSUMS_CONTEXT visible
+#include "error.h"
+#include "mprintf.h"
+#include "checksums.h"
+#include "filecntl.h"
+#include "names.h"
+#include "dirs.h"
+#include "configparser.h"
+
+const char * const changes_checksum_names[] = {
+ "Files", "Checksums-Sha1", "Checksums-Sha256", "Checksums-Sha512"
+};
+const char * const source_checksum_names[] = {
+ "Files", "Checksums-Sha1", "Checksums-Sha256", "Checksums-Sha512"
+};
+const char * const release_checksum_names[cs_hashCOUNT] = {
+ "MD5Sum", "SHA1", "SHA256", "SHA512"
+};
+
+
+/* The internal representation of a checksum, as written to the databases,
+ * is \(:[1-9a-z]:[^ ]\+ \)*[0-9a-fA-F]\+ [0-9]\+
+ * first some hashes, whose type is determined by a single character
+ * (also yet unknown hashes are supported and should be preserved, but are
+ * not generated)
+ * after that the md5sum and finally the size in dezimal representation.
+ *
+ * Checksums are parsed and stored in a structure for fast access of their
+ * known parts:
+ */
+#ifdef SPLINT
+typedef size_t hashlen_t;
+#else
+typedef unsigned short hashlen_t;
+#endif
+
+struct checksums {
+ struct { unsigned short ofs;
+ hashlen_t len;
+ } parts[cs_COUNT];
+ char representation[];
+};
+#define checksums_hashpart(c, t) ((c)->representation + (c)->parts[t].ofs)
+#define checksums_totallength(c) ((c)->parts[cs_length].ofs + (c)->parts[cs_length].len)
+
+
+static const char * const hash_name[cs_COUNT] =
+ { "md5", "sha1", "sha256", "sha512", "size" };
+
+void checksums_free(struct checksums *checksums) {
+ free(checksums);
+}
+
+retvalue checksums_init(/*@out@*/struct checksums **checksums_p, char *hashes[cs_COUNT]) {
+ const char *p, *size;
+ char *d;
+ struct checksums *n;
+ enum checksumtype type;
+ size_t len, hashlens[cs_COUNT];
+
+ /* everything assumes yet that this is available */
+ if (hashes[cs_length] == NULL) {
+ for (type = cs_md5sum ; type < cs_COUNT ; type++)
+ free(hashes[type]);
+ *checksums_p = NULL;
+ return RET_OK;
+ }
+
+ size = hashes[cs_length];
+ while (*size == '0' && size[1] >= '0' && size[1] <= '9')
+ size++;
+
+ if (hashes[cs_md5sum] == NULL)
+ hashlens[cs_md5sum] = 1;
+ else
+ hashlens[cs_md5sum] = strlen(hashes[cs_md5sum]);
+ hashlens[cs_length] = strlen(size);
+ len = hashlens[cs_md5sum] + 1 + hashlens[cs_length];
+
+ p = hashes[cs_md5sum];
+ if (p != NULL) {
+ while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')
+ || (*p >= 'A' && *p <= 'F'))
+ p++;
+ if (*p != '\0') {
+ // TODO: find way to give more meaningful error message
+ fprintf(stderr, "Invalid md5 hash: '%s'\n",
+ hashes[cs_md5sum]);
+ for (type = cs_md5sum ; type < cs_COUNT ; type++)
+ free(hashes[type]);
+ return RET_ERROR;
+ }
+ }
+ p = size;
+ while ((*p >= '0' && *p <= '9'))
+ p++;
+ if (*p != '\0') {
+ // TODO: find way to give more meaningful error message
+ fprintf(stderr, "Invalid size: '%s'\n", size);
+ for (type = cs_md5sum ; type < cs_COUNT ; type++)
+ free(hashes[type]);
+ return RET_ERROR;
+ }
+
+ for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) {
+ if (hashes[type] == NULL)
+ continue;
+ p = hashes[type];
+ while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')
+ || (*p >= 'A' && *p <= 'F'))
+ p++;
+ if (*p != '\0') {
+ // TODO: find way to give more meaningful error message
+ fprintf(stderr, "Invalid hash: '%s'\n", hashes[type]);
+ for (type = cs_md5sum ; type < cs_COUNT ; type++)
+ free(hashes[type]);
+ return RET_ERROR;
+ }
+ hashlens[type] = (size_t)(p - hashes[type]);
+ len += strlen(" :x:") + hashlens[type];
+ }
+
+ n = malloc(sizeof(struct checksums) + len + 1);
+ if (FAILEDTOALLOC(n)) {
+ for (type = cs_md5sum ; type < cs_COUNT ; type++)
+ free(hashes[type]);
+ return RET_ERROR_OOM;
+ }
+ setzero(struct checksums, n);
+ d = n->representation;
+
+ for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) {
+ if (hashes[type] == NULL)
+ continue;
+ *(d++) = ':';
+ *(d++) = '1' + (char)(type - cs_sha1sum);
+ *(d++) = ':';
+ n->parts[type].ofs = d - n->representation;
+ n->parts[type].len = (hashlen_t)hashlens[type];
+ memcpy(d, hashes[type], hashlens[type]);
+ d += hashlens[type];
+ *(d++) = ' ';
+ }
+ if (hashes[cs_md5sum] == NULL) {
+ n->parts[cs_md5sum].ofs = d - n->representation;
+ n->parts[cs_md5sum].len = 0;
+ *(d++) = '-';
+ } else {
+ n->parts[cs_md5sum].ofs = d - n->representation;
+ n->parts[cs_md5sum].len = (hashlen_t)hashlens[cs_md5sum];
+ memcpy(d, hashes[cs_md5sum], hashlens[cs_md5sum]);
+ d += hashlens[cs_md5sum];
+ }
+ *(d++) = ' ';
+ n->parts[cs_length].ofs = d - n->representation;
+ n->parts[cs_length].len = (hashlen_t)hashlens[cs_length];
+ memcpy(d, size, hashlens[cs_length] + 1);
+ d += hashlens[cs_length] + 1;
+ assert ((size_t)(d-n->representation) == len + 1);
+
+ for (type = cs_md5sum ; type < cs_COUNT ; type++)
+ free(hashes[type]);
+ *checksums_p = n;
+ return RET_OK;
+}
+
+retvalue checksums_initialize(struct checksums **checksums_p, const struct hash_data *hashes) {
+ char *d;
+ struct checksums *n;
+ enum checksumtype type;
+ size_t len;
+
+ /* everything assumes that this is available */
+ if (hashes[cs_length].start == NULL) {
+ assert (0 == 1);
+ *checksums_p = NULL;
+ return RET_ERROR;
+ }
+
+ len = hashes[cs_md5sum].len + 1 + hashes[cs_length].len;
+ if (hashes[cs_md5sum].start == NULL) {
+ assert(hashes[cs_md5sum].len == 0);
+ len++;
+ }
+
+ for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) {
+ if (hashes[type].start == NULL)
+ continue;
+ len += strlen(" :x:") + hashes[type].len;
+ }
+
+ n = malloc(sizeof(struct checksums) + len + 1);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+
+ setzero(struct checksums, n);
+ d = n->representation;
+
+ for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) {
+ if (hashes[type].start == NULL)
+ continue;
+ *(d++) = ':';
+ *(d++) = '1' + (char)(type - cs_firstEXTENDED);
+ *(d++) = ':';
+ n->parts[type].ofs = d - n->representation;
+ n->parts[type].len = (hashlen_t)hashes[type].len;
+ memcpy(d, hashes[type].start, hashes[type].len);
+ d += hashes[type].len;
+ *(d++) = ' ';
+ }
+ if (hashes[cs_md5sum].start == NULL) {
+ n->parts[cs_md5sum].ofs = d - n->representation;
+ n->parts[cs_md5sum].len = 0;
+ *(d++) = '-';
+ } else {
+ n->parts[cs_md5sum].ofs = d - n->representation;
+ n->parts[cs_md5sum].len = (hashlen_t)hashes[cs_md5sum].len;
+ memcpy(d, hashes[cs_md5sum].start, hashes[cs_md5sum].len);
+ d += hashes[cs_md5sum].len;
+ }
+ *(d++) = ' ';
+ n->parts[cs_length].ofs = d - n->representation;
+ n->parts[cs_length].len = (hashlen_t)hashes[cs_length].len;
+ memcpy(d, hashes[cs_length].start, hashes[cs_length].len);
+ d += hashes[cs_length].len;
+ *(d++) = '\0';
+ assert ((size_t)(d-n->representation) == len + 1);
+ *checksums_p = n;
+ return RET_OK;
+}
+
+retvalue checksums_setall(/*@out@*/struct checksums **checksums_p, const char *combinedchecksum, UNUSED(size_t len)) {
+ // This comes from our database, so it surely well formed
+ // (as alreadyassumed above), so this should be possible to
+ // do faster than that...
+ return checksums_parse(checksums_p, combinedchecksum);
+}
+
+retvalue checksums_parse(struct checksums **checksums_p, const char *combinedchecksum) {
+ struct checksums *n;
+ size_t len = strlen(combinedchecksum);
+ const char *p = combinedchecksum;
+ /*@dependent@*/char *d;
+ char type;
+ /*@dependent@*/const char *start;
+
+ n = malloc(sizeof(struct checksums) + len + 1);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ setzero(struct checksums, n);
+ d = n->representation;
+ while (*p == ':') {
+
+ p++;
+ if (p[0] == '\0' || p[1] != ':') {
+ // TODO: how to get some context in this?
+ fprintf(stderr,
+"Malformed checksums representation: '%s'!\n",
+ combinedchecksum);
+ free(n);
+ return RET_ERROR;
+ }
+ type = p[0];
+ p += 2;
+ *(d++) = ':';
+ *(d++) = type;
+ *(d++) = ':';
+ if (type == '1') {
+ start = d;
+ n->parts[cs_sha1sum].ofs = d - n->representation;
+ while (*p != ' ' && *p != '\0')
+ *(d++) = *(p++);
+ n->parts[cs_sha1sum].len = (hashlen_t)(d - start);
+ } else if (type == '2') {
+ start = d;
+ n->parts[cs_sha256sum].ofs = d - n->representation;
+ while (*p != ' ' && *p != '\0')
+ *(d++) = *(p++);
+ n->parts[cs_sha256sum].len = (hashlen_t)(d - start);
+ } else if (type == '3') {
+ start = d;
+ n->parts[cs_sha512sum].ofs = d - n->representation;
+ while (*p != ' ' && *p != '\0')
+ *(d++) = *(p++);
+ n->parts[cs_sha512sum].len = (hashlen_t)(d - start);
+ } else {
+ while (*p != ' ' && *p != '\0')
+ *(d++) = *(p++);
+ }
+
+ *(d++) = ' ';
+ while (*p == ' ')
+ p++;
+ }
+ n->parts[cs_md5sum].ofs = d - n->representation;
+ start = d;
+ if (*p == '-' && p[1] == ' ') {
+ p++;
+ *(d++) = '-';
+ start = d;
+ } else while (*p != ' ' && *p != '\0') {
+ if ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) {
+ *(d++) = *(p++);
+ } else if (*p >= 'A' && *p <= 'F') {
+ *(d++) = *(p++) + ('a' - 'A');
+ } else {
+ // TODO: how to get some context in this?
+ fprintf(stderr,
+"Malformed checksums representation (invalid md5sum): '%s'!\n",
+ combinedchecksum);
+ free(n);
+ return RET_ERROR;
+ }
+ }
+ n->parts[cs_md5sum].len = (hashlen_t)(d - start);
+ *(d++) = ' ';
+ while (*p == ' ')
+ p++;
+ n->parts[cs_length].ofs = d - n->representation;
+ while (*p == '0' && (p[1] >= '0' && p[1] <= '9'))
+ p++;
+ start = d;
+ while (*p != '\0') {
+ if (*p >= '0' && *p <= '9') {
+ *(d++) = *(p++);
+ } else {
+ // TODO: how to get some context in this?
+ fprintf(stderr,
+"Malformed checksums representation (invalid size): '%s'!\n",
+ combinedchecksum);
+ free(n);
+ return RET_ERROR;
+ }
+ }
+ n->parts[cs_length].len = (hashlen_t)(d - start);
+ if (d == start) {
+ // TODO: how to get some context in this?
+ fprintf(stderr,
+"Malformed checksums representation (no size): '%s'!\n",
+ combinedchecksum);
+ free(n);
+ return RET_ERROR;
+ }
+ *d = '\0';
+ assert ((size_t)(d - n->representation) <= len);
+ *checksums_p = n;
+ return RET_OK;
+}
+
+struct checksums *checksums_dup(const struct checksums *checksums) {
+ struct checksums *n;
+ size_t len;
+
+ assert (checksums != NULL);
+ len = checksums_totallength(checksums);
+ assert (checksums->representation[len] == '\0');
+
+ n = malloc(sizeof(struct checksums) + len + 1);
+ if (FAILEDTOALLOC(n))
+ return NULL;
+ memcpy(n, checksums, sizeof(struct checksums) + len + 1);
+ assert (n->representation[len] == '\0');
+ return n;
+}
+
+bool checksums_getpart(const struct checksums *checksums, enum checksumtype type, const char **sum_p, size_t *size_p) {
+
+ assert (type < cs_COUNT);
+
+ if (checksums->parts[type].len == 0)
+ return false;
+ *size_p = checksums->parts[type].len;
+ *sum_p = checksums_hashpart(checksums, type);
+ return true;
+}
+
+bool checksums_gethashpart(const struct checksums *checksums, enum checksumtype type, const char **hash_p, size_t *hashlen_p, const char **size_p, size_t *sizelen_p) {
+ assert (type < cs_hashCOUNT);
+ if (checksums->parts[type].len == 0)
+ return false;
+ *hashlen_p = checksums->parts[type].len;
+ *hash_p = checksums_hashpart(checksums, type);
+ *sizelen_p = checksums->parts[cs_length].len;
+ *size_p = checksums_hashpart(checksums, cs_length);
+ return true;
+}
+
+retvalue checksums_getcombined(const struct checksums *checksums, /*@out@*/const char **data_p, /*@out@*/size_t *datalen_p) {
+ size_t len;
+
+ assert (checksums != NULL);
+ len = checksums->parts[cs_length].ofs + checksums->parts[cs_length].len;
+ assert (checksums->representation[len] == '\0');
+
+ *data_p = checksums->representation;
+ *datalen_p = len;
+ return RET_OK;
+}
+
+off_t checksums_getfilesize(const struct checksums *checksums) {
+ const char *p = checksums_hashpart(checksums, cs_length);
+ off_t filesize;
+
+ filesize = 0;
+ while (*p <= '9' && *p >= '0') {
+ filesize = filesize*10 + (size_t)(*p-'0');
+ p++;
+ }
+ assert (*p == '\0');
+ return filesize;
+}
+
+bool checksums_matches(const struct checksums *checksums, enum checksumtype type, const char *sum) {
+ size_t len = (size_t)checksums->parts[type].len;
+
+ assert (type < cs_hashCOUNT);
+
+ if (len == 0)
+ return true;
+
+ if (strncmp(sum, checksums_hashpart(checksums, type), len) != 0)
+ return false;
+ if (sum[len] != ' ')
+ return false;
+ /* assuming count is the last part: */
+ if (strncmp(sum + len + 1, checksums_hashpart(checksums, cs_length),
+ checksums->parts[cs_length].len + 1) != 0)
+ return false;
+ return true;
+}
+
+static inline bool differ(const struct checksums *a, const struct checksums *b, enum checksumtype type) {
+ if (a->parts[type].len == 0 || b->parts[type].len == 0)
+ return false;
+ if (a->parts[type].len != b->parts[type].len)
+ return true;
+ return memcmp(checksums_hashpart(a, type),
+ checksums_hashpart(b, type),
+ a->parts[type].len) != 0;
+}
+
+bool checksums_check(const struct checksums *checksums, const struct checksums *realchecksums, bool *improves) {
+ enum checksumtype type;
+ bool additional = false;
+
+ for (type = cs_md5sum ; type < cs_COUNT ; type++) {
+ if (differ(checksums, realchecksums, type))
+ return false;
+ if (checksums->parts[type].len == 0 &&
+ realchecksums->parts[type].len != 0)
+ additional = true;
+ }
+ if (improves != NULL)
+ *improves = additional;
+ return true;
+}
+
+void checksums_printdifferences(FILE *f, const struct checksums *expected, const struct checksums *got) {
+ enum checksumtype type;
+
+ for (type = cs_md5sum ; type < cs_COUNT ; type++) {
+ if (differ(expected, got, type)) {
+ fprintf(f, "%s expected: %.*s, got: %.*s\n",
+ hash_name[type],
+ (int)expected->parts[type].len,
+ checksums_hashpart(expected, type),
+ (int)got->parts[type].len,
+ checksums_hashpart(got, type));
+ }
+ }
+}
+
+retvalue checksums_combine(struct checksums **checksums_p, const struct checksums *by, bool *improvedhashes) /*@requires only *checksums_p @*/ /*@ensures only *checksums_p @*/ {
+ struct checksums *old = *checksums_p, *n;
+ size_t len = checksums_totallength(old) + checksums_totallength(by);
+ const char *o, *b, *start;
+ char /*@dependent@*/ *d;
+ char typeid;
+
+ n = malloc(sizeof(struct checksums)+ len + 1);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ setzero(struct checksums, n);
+ o = old->representation;
+ b = by->representation;
+ d = n->representation;
+ while (*o == ':' || *b == ':') {
+ if (b[0] != ':' || (o[0] == ':' && o[1] <= b[1])) {
+ *(d++) = *(o++);
+ typeid = *o;
+ *(d++) = *(o++);
+ *(d++) = *(o++);
+ if (typeid == '1') {
+ start = d;
+ n->parts[cs_sha1sum].ofs = d - n->representation;
+ while (*o != ' ' && *o != '\0')
+ *(d++) = *(o++);
+ n->parts[cs_sha1sum].len = (hashlen_t)(d - start);
+ } else if (typeid == '2') {
+ start = d;
+ n->parts[cs_sha256sum].ofs = d - n->representation;
+ while (*o != ' ' && *o != '\0')
+ *(d++) = *(o++);
+ n->parts[cs_sha256sum].len = (hashlen_t)(d - start);
+ } else if (typeid == '3') {
+ start = d;
+ n->parts[cs_sha512sum].ofs = d - n->representation;
+ while (*o != ' ' && *o != '\0')
+ *(d++) = *(o++);
+ n->parts[cs_sha512sum].len = (hashlen_t)(d - start);
+ } else
+ while (*o != ' ' && *o != '\0')
+ *(d++) = *(o++);
+ assert (*o == ' ');
+ if (*o == ' ')
+ *(d++) = *(o++);
+
+ if (b[0] == ':' && typeid == b[1]) {
+ while (*b != ' ' && *b != '\0')
+ b++;
+ assert (*b == ' ');
+ if (*b == ' ')
+ b++;
+ }
+ } else {
+ *(d++) = *(b++);
+ typeid = *b;
+ *(d++) = *(b++);
+ *(d++) = *(b++);
+ if (typeid == '1') {
+ if (improvedhashes != NULL)
+ improvedhashes[cs_sha1sum] = true;
+ start = d;
+ n->parts[cs_sha1sum].ofs = d - n->representation;
+ while (*b != ' ' && *b != '\0')
+ *(d++) = *(b++);
+ n->parts[cs_sha1sum].len = (hashlen_t)(d - start);
+ } else if (typeid == '2') {
+ if (improvedhashes != NULL)
+ improvedhashes[cs_sha256sum] = true;
+ start = d;
+ n->parts[cs_sha256sum].ofs = d - n->representation;
+ while (*b != ' ' && *b != '\0')
+ *(d++) = *(b++);
+ n->parts[cs_sha256sum].len = (hashlen_t)(d - start);
+ } else if (typeid == '3') {
+ if (improvedhashes != NULL)
+ improvedhashes[cs_sha512sum] = true;
+ start = d;
+ n->parts[cs_sha512sum].ofs = d - n->representation;
+ while (*b != ' ' && *b != '\0')
+ *(d++) = *(b++);
+ n->parts[cs_sha512sum].len = (hashlen_t)(d - start);
+ } else
+ while (*b != ' ' && *b != '\0')
+ *(d++) = *(b++);
+ assert (*b == ' ');
+ if (*b == ' ')
+ *(d++) = *(b++);
+ }
+ }
+ /* now take md5sum from original code, unless only the new one has it */
+ n->parts[cs_md5sum].ofs = d - n->representation;
+ start = d;
+ if (*o == '-' && *b != '-')
+ o = b;
+ while (*o != ' ' && *o != '\0')
+ *(d++) = *(o++);
+ n->parts[cs_md5sum].len = (hashlen_t)(d - start);
+ assert (*o == ' ');
+ if (*o == ' ')
+ *(d++) = *(o++);
+ /* and now the size */
+ n->parts[cs_length].ofs = d - n->representation;
+ start = d;
+ while (*o != '\0')
+ *(d++) = *(o++);
+ n->parts[cs_length].len = (hashlen_t)(d - start);
+ assert ((size_t)(d - n->representation) <= len);
+ *(d++) = '\0';
+ *checksums_p = realloc(n, sizeof(struct checksums)
+ + (d-n->representation));
+ if (*checksums_p == NULL)
+ *checksums_p = n;
+ checksums_free(old);
+ return RET_OK;
+}
+
+void checksumsarray_done(struct checksumsarray *array) {
+ if (array->names.count > 0) {
+ int i;
+ assert (array->checksums != NULL);
+ for (i = 0 ; i < array->names.count ; i++) {
+ checksums_free(array->checksums[i]);
+ }
+ } else
+ assert (array->checksums == NULL);
+ strlist_done(&array->names);
+ free(array->checksums);
+}
+
+retvalue hashline_parse(const char *filenametoshow, const char *line, enum checksumtype cs, const char **basename_p, struct hash_data *data_p, struct hash_data *size_p) {
+ const char *p = line;
+ const char *hash_start, *size_start, *filename;
+ size_t hash_len, size_len;
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ hash_start = p;
+ while ((*p >= '0' && *p <= '9') ||
+ (*p >= 'a' && *p <= 'f'))
+ p++;
+ hash_len = p - hash_start;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ while (*p == '0' && p[1] >= '0' && p[1] <= '9')
+ p++;
+ size_start = p;
+ while ((*p >= '0' && *p <= '9'))
+ p++;
+ size_len = p - size_start;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ filename = p;
+ while (*p != '\0' && *p != ' ' && *p != '\t'
+ && *p != '\r' && *p != '\n')
+ p++;
+ if (unlikely(size_len == 0 || hash_len == 0
+ || filename == p || *p != '\0')) {
+ fprintf(stderr,
+"Error parsing %s checksum line ' %s' within '%s'\n",
+ hash_name[cs], line,
+ filenametoshow);
+ return RET_ERROR;
+ }
+ *basename_p = filename;
+ data_p->start = hash_start;
+ data_p->len = hash_len;
+ size_p->start = size_start;
+ size_p->len = size_len;
+ return RET_OK;
+}
+
+retvalue checksumsarray_parse(struct checksumsarray *out, const struct strlist l[cs_hashCOUNT], const char *filenametoshow) {
+ retvalue r;
+ int i;
+ struct checksumsarray a;
+ struct strlist filenames;
+ size_t count;
+ bool foundhashtype[cs_hashCOUNT];
+ struct hashes *parsed;
+ enum checksumtype cs;
+
+ memset(foundhashtype, 0, sizeof(foundhashtype));
+
+ /* avoid realloc by allocing the absolute maximum only
+ * if every checksum field contains different files */
+ count = 0;
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ count += l[cs].count;
+ }
+
+ parsed = nzNEW(count, struct hashes);
+ if (FAILEDTOALLOC(parsed))
+ return RET_ERROR_OOM;
+ strlist_init_n(count + 1, &filenames);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ for (i = 0 ; i < l[cs].count ; i++) {
+ const char *line = l[cs].values[i];
+ const char *p = line,
+ *hash_start, *size_start, *filename;
+ size_t hash_len, size_len;
+ int fileofs;
+
+ while (*p == ' ' || *p == '\t')
+ p++;
+ hash_start = p;
+ while ((*p >= '0' && *p <= '9') ||
+ (*p >= 'a' && *p <= 'f'))
+ p++;
+ hash_len = p - hash_start;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ while (*p == '0' && p[1] >= '0' && p[1] <= '9')
+ p++;
+ size_start = p;
+ while ((*p >= '0' && *p <= '9'))
+ p++;
+ size_len = p - size_start;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ filename = p;
+ while (*p != '\0' && *p != ' ' && *p != '\t'
+ && *p != '\r' && *p != '\n')
+ p++;
+ if (unlikely(size_len == 0 || hash_len == 0
+ || filename == p || *p != '\0')) {
+ fprintf(stderr,
+"Error parsing %s checksum line ' %s' within '%s'\n",
+ hash_name[cs], line,
+ filenametoshow);
+ strlist_done(&filenames);
+ free(parsed);
+ return RET_ERROR;
+ } else {
+ struct hash_data *hashes;
+
+ fileofs = strlist_ofs(&filenames, filename);
+ if (fileofs == -1) {
+ fileofs = filenames.count;
+ r = strlist_add_dup(&filenames, filename);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&filenames);
+ free(parsed);
+ return r;
+ }
+ hashes = parsed[fileofs].hashes;
+ hashes[cs_length].start = size_start;
+ hashes[cs_length].len = size_len;
+ } else {
+ hashes = parsed[fileofs].hashes;
+ if (unlikely(hashes[cs_length].len
+ != size_len
+ || memcmp(hashes[cs_length].start,
+ size_start, size_len) != 0)) {
+ fprintf(stderr,
+"WARNING: %s checksum line ' %s' in '%s' contradicts previous filesize!\n",
+ hash_name[cs], line,
+ filenametoshow);
+ continue;
+ }
+ }
+ hashes[cs].start = hash_start;
+ hashes[cs].len = hash_len;
+ foundhashtype[cs] = true;
+ }
+ }
+ }
+ assert (count >= (size_t)filenames.count);
+
+ if (filenames.count == 0) {
+ strlist_done(&filenames);
+ strlist_init(&out->names);
+ out->checksums = NULL;
+ free(parsed);
+ return RET_OK;
+ }
+#if 0
+// TODO: re-enable this once apt-utils is fixed for a long enough time...
+ for (i = 0 ; i < filenames.count ; i++) {
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ if (!foundhashtype[cs])
+ continue;
+ if (parsed[i].hashes[cs].start == NULL) {
+ fprintf(stderr,
+"WARNING: Inconsistent hashes in %s: '%s' missing %s!\n",
+ filenametoshow,
+ filenames.values[i],
+ hash_name[cs]);
+ r = RET_ERROR;
+ /* show one per file, but list all problematic files */
+ break;
+ }
+ }
+ }
+#endif
+ a.checksums = nzNEW(filenames.count+1, struct checksums *);
+ if (FAILEDTOALLOC(a.checksums)) {
+ strlist_done(&filenames);
+ free(parsed);
+ return RET_ERROR_OOM;
+ }
+ strlist_move(&a.names, &filenames);
+
+ for (i = 0 ; i < a.names.count ; i++) {
+ r = checksums_initialize(a.checksums + i, parsed[i].hashes);
+ if (RET_WAS_ERROR(r)) {
+ free(parsed);
+ checksumsarray_done(&a);
+ return r;
+ }
+ }
+ checksumsarray_move(out, &a);
+ free(parsed);
+ return RET_OK;
+}
+
+retvalue checksumsarray_genfilelist(const struct checksumsarray *a, char **md5_p, char **sha1_p, char **sha256_p) {
+ size_t lens[cs_hashCOUNT];
+ bool missing[cs_hashCOUNT];
+ char *filelines[cs_hashCOUNT];
+ int i;
+ enum checksumtype cs;
+ size_t filenamelen[a->names.count];
+
+ memset(missing, 0, sizeof(missing));
+ memset(lens, 0, sizeof(lens));
+
+ for (i=0 ; i < a->names.count ; i++) {
+ const struct checksums *checksums = a->checksums[i];
+ size_t len;
+
+ filenamelen[i] = strlen(a->names.values[i]);
+
+ len = 4 + filenamelen[i] + checksums->parts[cs_length].len;
+ assert (checksums != NULL);
+ if (checksums->parts[cs_md5sum].len == 0)
+ lens[cs_md5sum] += len + 1;
+ else
+ lens[cs_md5sum] += len + checksums->parts[cs_md5sum].len;
+ for (cs = cs_md5sum+1 ; cs < cs_hashCOUNT ; cs++) {
+ if (checksums->parts[cs].len == 0)
+ missing[cs] = true;
+ lens[cs] += len + checksums->parts[cs].len;
+ }
+ }
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ if (missing[cs])
+ filelines[cs] = NULL;
+ else {
+ filelines[cs] = malloc(lens[cs] + 1);
+ if (FAILEDTOALLOC(filelines[cs])) {
+ while (cs-- > cs_md5sum)
+ free(filelines[cs]);
+ return RET_ERROR_OOM;
+ }
+ }
+ }
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ char *p;
+
+ if (missing[cs])
+ continue;
+
+ p = filelines[cs];
+ *(p++) = '\n';
+ for (i=0 ; i < a->names.count ; i++) {
+ const struct checksums *c = a->checksums[i];
+
+ *(p++) = ' ';
+ if (c->parts[cs].len == 0) {
+ *(p++) = '-';
+ } else {
+ memcpy(p, checksums_hashpart(c, cs),
+ c->parts[cs].len);
+ p += c->parts[cs].len;
+ }
+ *(p++) = ' ';
+ memcpy(p, checksums_hashpart(c, cs_length),
+ c->parts[cs_length].len);
+ p += c->parts[cs_length].len;
+ *(p++) = ' ';
+ memcpy(p, a->names.values[i], filenamelen[i]);
+ p += filenamelen[i];
+ *(p++) = '\n';
+ }
+ *(--p) = '\0';
+ assert ((size_t)(p - filelines[cs]) == lens[cs]);
+ }
+ *md5_p = filelines[cs_md5sum];
+ *sha1_p = filelines[cs_sha1sum];
+ *sha256_p = filelines[cs_sha256sum];
+ return RET_OK;
+}
+
+void checksumsarray_move(/*@out@*/struct checksumsarray *destination, struct checksumsarray *origin) {
+ strlist_move(&destination->names, &origin->names);
+ destination->checksums = origin->checksums;
+ origin->checksums = NULL;
+}
+
+void checksumsarray_resetunsupported(const struct checksumsarray *a, bool *types) {
+ int i;
+ enum checksumtype cs;
+
+ for (i = 0 ; i < a->names.count ; i++) {
+ struct checksums *c = a->checksums[i];
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ if (c->parts[cs].len == 0)
+ types[cs] = false;
+ }
+ }
+}
+
+retvalue checksumsarray_include(struct checksumsarray *a, /*@only@*/char *name, const struct checksums *checksums) {
+ retvalue r;
+ struct checksums **n;
+ int count = a->names.count;
+
+ n = nNEW(count + 1, struct checksums *);
+ if (FAILEDTOALLOC(n)) {
+ free(name);
+ return RET_ERROR_OOM;
+ }
+ n[0] = checksums_dup(checksums);
+ if (FAILEDTOALLOC(n[0])) {
+ free(name);
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_include(&a->names, name);
+ if (!RET_IS_OK(r)) {
+ checksums_free(n[0]);
+ free(n);
+ return r;
+ }
+ assert (a->names.count == count + 1);
+ if (count > 0) {
+ assert (a->checksums != NULL);
+ memcpy(&n[1], a->checksums, count*sizeof(struct checksums*));
+ }
+ free(a->checksums);
+ a->checksums = n;
+ return RET_OK;
+}
+
+/* check if the file has the given md5sum (only cheap tests like size),
+ * RET_NOTHING means file does not exist, RET_ERROR_WRONG_MD5 means wrong size */
+retvalue checksums_cheaptest(const char *fullfilename, const struct checksums *checksums, bool complain) {
+ off_t expectedsize;
+ int i;
+ struct stat s;
+
+ i = stat(fullfilename, &s);
+ if (i < 0) {
+ i = errno;
+ if (i == EACCES || i == ENOENT)
+ return RET_NOTHING;
+ else {
+ fprintf(stderr, "Error %d stating '%s': %s!\n",
+ i, fullfilename, strerror(i));
+ return RET_ERRNO(i);
+ }
+ }
+
+ expectedsize = checksums_getfilesize(checksums);
+
+ if (s.st_size == expectedsize)
+ return RET_OK;
+ if (complain)
+ fprintf(stderr,
+ "WRONG SIZE of '%s': expected %lld found %lld\n",
+ fullfilename,
+ (long long)expectedsize,
+ (long long)s.st_size);
+ return RET_ERROR_WRONG_MD5;
+}
+
+retvalue checksums_test(const char *filename, const struct checksums *checksums, struct checksums **checksums_p) {
+ retvalue r;
+ struct checksums *filechecksums;
+ bool improves;
+
+ /* check if it is there and has the correct size */
+ r = checksums_cheaptest(filename, checksums, false);
+ /* if it is, read its checksums */
+ if (RET_IS_OK(r))
+ r = checksums_read(filename, &filechecksums);
+ if (!RET_IS_OK(r))
+ return r;
+ if (!checksums_check(checksums, filechecksums, &improves)) {
+ checksums_free(filechecksums);
+ return RET_ERROR_WRONG_MD5;
+ }
+ if (improves && checksums_p != NULL) {
+ if (*checksums_p == NULL) {
+ *checksums_p = checksums_dup(checksums);
+ if (FAILEDTOALLOC(*checksums_p)) {
+ checksums_free(filechecksums);
+ return RET_ERROR_OOM;
+ }
+ }
+ r = checksums_combine(checksums_p, filechecksums, NULL);
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(filechecksums);
+ return r;
+ }
+ }
+ checksums_free(filechecksums);
+ return RET_OK;
+}
+
+/* copy, only checking file size, perhaps add some paranoia checks later */
+static retvalue copy(const char *destination, const char *source, const struct checksums *checksums) {
+ off_t filesize = 0, expected;
+ static const size_t bufsize = 16384;
+ char *buffer = malloc(bufsize);
+ ssize_t sizeread, towrite, written;
+ const char *start;
+ int e, i;
+ int infd, outfd;
+
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+
+ infd = open(source, O_RDONLY);
+ if (infd < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d opening '%s': %s\n",
+ e, source, strerror(e));
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ outfd = open(destination, O_NOCTTY|O_WRONLY|O_CREAT|O_EXCL, 0666);
+ if (outfd < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d creating '%s': %s\n",
+ e, destination, strerror(e));
+ (void)close(infd);
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ filesize = 0;
+ do {
+ sizeread = read(infd, buffer, bufsize);
+ if (sizeread < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d while reading %s: %s\n",
+ e, source, strerror(e));
+ free(buffer);
+ (void)close(infd); (void)close(outfd);
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ filesize += sizeread;
+ towrite = sizeread;
+ start = buffer;
+ while (towrite > 0) {
+ written = write(outfd, start, (size_t)towrite);
+ if (written < 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d while writing to %s: %s\n",
+ e, destination, strerror(e));
+ free(buffer);
+ (void)close(infd); (void)close(outfd);
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ towrite -= written;
+ start += written;
+ }
+ } while (sizeread > 0);
+ free(buffer);
+ i = close(infd);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d reading %s: %s\n",
+ e, source, strerror(e));
+ (void)close(outfd);
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ i = close(outfd);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d writing to %s: %s\n",
+ e, destination, strerror(e));
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ expected = checksums_getfilesize(checksums);
+ if (filesize != expected) {
+ fprintf(stderr,
+"Error copying %s to %s:\n"
+" File seems to be of size %llu, while %llu was expected!\n",
+ source, destination,
+ (unsigned long long)filesize,
+ (unsigned long long)expected);
+ deletefile(destination);
+ return RET_ERROR_WRONG_MD5;
+ }
+ return RET_OK;
+}
+
+retvalue checksums_hardlink(const char *directory, const char *filekey, const char *sourcefilename, const struct checksums *checksums) {
+ retvalue r;
+ int i, e;
+ char *fullfilename = calc_dirconcat(directory, filekey);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+
+ i = link(sourcefilename, fullfilename);
+ e = errno;
+ if (i != 0 && e == EEXIST) {
+ (void)unlink(fullfilename);
+ errno = 0;
+ i = link(sourcefilename, fullfilename);
+ e = errno;
+ }
+ if (i != 0 && (e == EACCES || e == ENOENT || e == ENOTDIR)) {
+ errno = 0;
+ (void)dirs_make_parent(fullfilename);
+ i = link(sourcefilename, fullfilename);
+ e = errno;
+ }
+ if (i != 0) {
+ if (e == EXDEV || e == EPERM || e == EMLINK) {
+ r = copy(fullfilename, sourcefilename, checksums);
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ return r;
+ }
+ } else {
+ fprintf(stderr,
+"Error %d creating hardlink of '%s' as '%s': %s\n",
+ e, sourcefilename, fullfilename, strerror(e));
+ free(fullfilename);
+ return RET_ERRNO(e);
+ }
+ }
+ free(fullfilename);
+ return RET_OK;
+}
+
+void checksumscontext_init(struct checksumscontext *context) {
+ MD5Init(&context->md5);
+ SHA1Init(&context->sha1);
+ SHA256Init(&context->sha256);
+ SHA512Init(&context->sha512);
+}
+
+void checksumscontext_update(struct checksumscontext *context, const unsigned char *data, size_t len) {
+ MD5Update(&context->md5, data, len);
+// TODO: sha1 sha256 and sha512 share quite some stuff,
+// the code can most likely be combined with quite some synergies..
+ SHA1Update(&context->sha1, data, len);
+ SHA256Update(&context->sha256, data, len);
+ SHA512Update(&context->sha512, data, len);
+}
+
+static const char tab[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+
+retvalue checksums_from_context(struct checksums **out, struct checksumscontext *context) {
+ unsigned char md5buffer[MD5_DIGEST_SIZE], sha1buffer[SHA1_DIGEST_SIZE],
+ sha256buffer[SHA256_DIGEST_SIZE], sha512buffer[SHA512_DIGEST_SIZE];
+ char *d;
+ unsigned int i;
+ struct checksums *n;
+
+ n = malloc(sizeof(struct checksums) + 2*MD5_DIGEST_SIZE
+ + 2*SHA1_DIGEST_SIZE + 2*SHA256_DIGEST_SIZE + 2*SHA512_DIGEST_SIZE + 30);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ setzero(struct checksums, n);
+ d = n->representation;
+ *(d++) = ':';
+ *(d++) = '1';
+ *(d++) = ':';
+ n->parts[cs_sha1sum].ofs = 3;
+ n->parts[cs_sha1sum].len = 2*SHA1_DIGEST_SIZE;
+ SHA1Final(&context->sha1, sha1buffer);
+ for (i = 0 ; i < SHA1_DIGEST_SIZE ; i++) {
+ *(d++) = tab[sha1buffer[i] >> 4];
+ *(d++) = tab[sha1buffer[i] & 0xF];
+ }
+ *(d++) = ' ';
+
+ *(d++) = ':';
+ *(d++) = '2';
+ *(d++) = ':';
+ n->parts[cs_sha256sum].ofs = d - n->representation;
+ n->parts[cs_sha256sum].len = 2*SHA256_DIGEST_SIZE;
+ SHA256Final(&context->sha256, sha256buffer);
+ for (i = 0 ; i < SHA256_DIGEST_SIZE ; i++) {
+ *(d++) = tab[sha256buffer[i] >> 4];
+ *(d++) = tab[sha256buffer[i] & 0xF];
+ }
+ *(d++) = ' ';
+
+ *(d++) = ':';
+ *(d++) = '3';
+ *(d++) = ':';
+ n->parts[cs_sha512sum].ofs = d - n->representation;
+ n->parts[cs_sha512sum].len = 2*SHA512_DIGEST_SIZE;
+ SHA512Final(&context->sha512, sha512buffer);
+ for (i = 0 ; i < SHA512_DIGEST_SIZE ; i++) {
+ *(d++) = tab[sha512buffer[i] >> 4];
+ *(d++) = tab[sha512buffer[i] & 0xF];
+ }
+ *(d++) = ' ';
+
+ n->parts[cs_md5sum].ofs = d - n->representation;
+ assert (d - n->representation == n->parts[cs_md5sum].ofs);
+ n->parts[cs_md5sum].len = 2*MD5_DIGEST_SIZE;
+ MD5Final(md5buffer, &context->md5);
+ for (i=0 ; i < MD5_DIGEST_SIZE ; i++) {
+ *(d++) = tab[md5buffer[i] >> 4];
+ *(d++) = tab[md5buffer[i] & 0xF];
+ }
+ *(d++) = ' ';
+ n->parts[cs_length].ofs = d - n->representation;
+ assert (d - n->representation == n->parts[cs_length].ofs);
+ n->parts[cs_length].len = (hashlen_t)snprintf(d,
+ 2*MD5_DIGEST_SIZE + 2*SHA1_DIGEST_SIZE
+ + 2*SHA256_DIGEST_SIZE + 2*SHA512_DIGEST_SIZE + 30
+ - (d - n->representation), "%lld",
+ (long long)context->sha1.count);
+ assert (strlen(d) == n->parts[cs_length].len);
+ *out = n;
+ return RET_OK;
+}
+
+bool checksums_iscomplete(const struct checksums *checksums) {
+ return checksums->parts[cs_md5sum].len != 0 &&
+ checksums->parts[cs_sha1sum].len != 0 &&
+ checksums->parts[cs_sha256sum].len != 0 &&
+ checksums->parts[cs_sha512sum].len != 0;
+}
+
+/* Collect missing checksums.
+ * if the file is not there, return RET_NOTHING.
+ * return RET_ERROR_WRONG_MD5 if already existing do not match */
+retvalue checksums_complete(struct checksums **checksums_p, const char *fullfilename) {
+ if (checksums_iscomplete(*checksums_p))
+ return RET_OK;
+ return checksums_test(fullfilename, *checksums_p, checksums_p);
+}
+
+retvalue checksums_read(const char *fullfilename, /*@out@*/struct checksums **checksums_p) {
+ struct checksumscontext context;
+ static const size_t bufsize = 16384;
+ unsigned char *buffer = malloc(bufsize);
+ ssize_t sizeread;
+ int e, i;
+ int infd;
+
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+
+ checksumscontext_init(&context);
+
+ infd = open(fullfilename, O_RDONLY);
+ if (infd < 0) {
+ e = errno;
+ if ((e == EACCES || e == ENOENT) &&
+ !isregularfile(fullfilename)) {
+ free(buffer);
+ return RET_NOTHING;
+ }
+ fprintf(stderr, "Error %d opening '%s': %s\n",
+ e, fullfilename, strerror(e));
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ do {
+ sizeread = read(infd, buffer, bufsize);
+ if (sizeread < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d while reading %s: %s\n",
+ e, fullfilename, strerror(e));
+ free(buffer);
+ (void)close(infd);
+ return RET_ERRNO(e);;
+ }
+ checksumscontext_update(&context, buffer, (size_t)sizeread);
+ } while (sizeread > 0);
+ free(buffer);
+ i = close(infd);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d reading %s: %s\n",
+ e, fullfilename, strerror(e));
+ return RET_ERRNO(e);;
+ }
+ return checksums_from_context(checksums_p, &context);
+}
+
+retvalue checksums_copyfile(const char *destination, const char *source, bool deletetarget, struct checksums **checksums_p) {
+ struct checksumscontext context;
+ static const size_t bufsize = 16384;
+ unsigned char *buffer = malloc(bufsize);
+ ssize_t sizeread, towrite, written;
+ const unsigned char *start;
+ int e, i;
+ int infd, outfd;
+
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+
+ infd = open(source, O_RDONLY);
+ if (infd < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d opening '%s': %s\n",
+ e, source, strerror(e));
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ outfd = open(destination, O_NOCTTY|O_WRONLY|O_CREAT|O_EXCL, 0666);
+ if (outfd < 0) {
+ e = errno;
+ if (e == EEXIST) {
+ if (deletetarget) {
+ i = unlink(destination);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d deleting '%s': %s\n",
+ e, destination, strerror(e));
+ (void)close(infd);
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ outfd = open(destination,
+ O_NOCTTY|O_WRONLY|O_CREAT|O_EXCL,
+ 0666);
+ e = errno;
+ } else {
+ (void)close(infd);
+ free(buffer);
+ return RET_ERROR_EXIST;
+ }
+ }
+ if (outfd < 0) {
+ fprintf(stderr,
+"Error %d creating '%s': %s\n",
+ e, destination, strerror(e));
+ (void)close(infd);
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ }
+ checksumscontext_init(&context);
+ do {
+ sizeread = read(infd, buffer, bufsize);
+ if (sizeread < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d while reading %s: %s\n",
+ e, source, strerror(e));
+ free(buffer);
+ (void)close(infd); (void)close(outfd);
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ checksumscontext_update(&context, buffer, (size_t)sizeread);
+ towrite = sizeread;
+ start = buffer;
+ while (towrite > 0) {
+ written = write(outfd, start, (size_t)towrite);
+ if (written < 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d while writing to %s: %s\n",
+ e, destination, strerror(e));
+ free(buffer);
+ (void)close(infd); (void)close(outfd);
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ towrite -= written;
+ start += written;
+ }
+ } while (sizeread > 0);
+ free(buffer);
+ i = close(infd);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d reading %s: %s\n",
+ e, source, strerror(e));
+ (void)close(outfd);
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ i = close(outfd);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d writing to %s: %s\n",
+ e, destination, strerror(e));
+ deletefile(destination);
+ return RET_ERRNO(e);;
+ }
+ return checksums_from_context(checksums_p, &context);
+}
+
+retvalue checksums_linkorcopyfile(const char *destination, const char *source, struct checksums **checksums_p) {
+ int i;
+ retvalue r;
+
+ // TODO: is this needed? perhaps move this duty to the caller...
+ r = dirs_make_parent(destination);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ errno = 0;
+ i = link(source, destination);
+ if (i != 0)
+ return checksums_copyfile(destination, source, true, checksums_p);
+ *checksums_p = NULL;
+ return RET_OK;
+}
+
+retvalue checksums_replace(const char *filename, const char *data, size_t len, struct checksums **checksums_p){
+ struct checksumscontext context;
+ size_t todo; const char *towrite;
+ char *tempfilename;
+ struct checksums *checksums;
+ int fd, ret;
+ retvalue r;
+
+ tempfilename = calc_addsuffix(filename, "new");
+ if (FAILEDTOALLOC(tempfilename))
+ return RET_ERROR_OOM;
+
+ fd = open(tempfilename, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666);
+ if (fd < 0) {
+ int e = errno;
+ fprintf(stderr, "ERROR creating '%s': %s\n", tempfilename,
+ strerror(e));
+ free(tempfilename);
+ return RET_ERRNO(e);
+ }
+
+ todo = len; towrite = data;
+ while (todo > 0) {
+ ssize_t written = write(fd, towrite, todo);
+ if (written >= 0) {
+ todo -= written;
+ towrite += written;
+ } else {
+ int e = errno;
+ close(fd);
+ fprintf(stderr, "Error writing to '%s': %s\n",
+ tempfilename, strerror(e));
+ unlink(tempfilename);
+ free(tempfilename);
+ return RET_ERRNO(e);
+ }
+ }
+ ret = close(fd);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error writing to '%s': %s\n",
+ tempfilename, strerror(e));
+ unlink(tempfilename);
+ free(tempfilename);
+ return RET_ERRNO(e);
+ }
+
+ if (checksums_p != NULL) {
+ checksumscontext_init(&context);
+ checksumscontext_update(&context, (const unsigned char *)data, len);
+ r = checksums_from_context(&checksums, &context);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ unlink(tempfilename);
+ free(tempfilename);
+ return r;
+ }
+ } else
+ checksums = NULL;
+ ret = rename(tempfilename, filename);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error moving '%s' to '%s': %s\n",
+ tempfilename, filename, strerror(e));
+ unlink(tempfilename);
+ free(tempfilename);
+ checksums_free(checksums);
+ return RET_ERRNO(e);
+ }
+ free(tempfilename);
+ if (checksums_p != NULL)
+ *checksums_p = checksums;
+ return RET_OK;
+}
+
+const struct constant hashes_constants[cs_hashCOUNT+1] = {
+ {"md5", cs_md5sum},
+ {"sha1", cs_sha1sum},
+ {"sha256", cs_sha256sum},
+ {"sha512", cs_sha512sum},
+ {NULL, 0}
+}, *hashnames = hashes_constants;
diff --git a/checksums.h b/checksums.h
new file mode 100644
index 0000000..8c5da9b
--- /dev/null
+++ b/checksums.h
@@ -0,0 +1,145 @@
+#ifndef REPREPRO_CHECKSUMS_H
+#define REPREPRO_CHECKSUMS_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+
+enum checksumtype {
+ /* must be first */
+ cs_md5sum,
+ /* additionall hashes: */
+#define cs_firstEXTENDED cs_sha1sum
+ cs_sha1sum,
+ cs_sha256sum,
+ cs_sha512sum,
+#define cs_hashCOUNT cs_length
+ /* must be last but one */
+ cs_length,
+ /* must be last */
+ cs_COUNT };
+
+struct checksums;
+
+extern const char * const changes_checksum_names[];
+extern const char * const source_checksum_names[];
+extern const char * const release_checksum_names[];
+extern const struct constant *hashnames;
+
+struct hashes {
+ struct hash_data {
+ const char *start; size_t len;
+ } hashes[cs_COUNT];
+};
+
+
+void checksums_free(/*@only@*//*@null@*/struct checksums *);
+
+/* duplicate a checksum record, NULL means OOM */
+/*@null@*/struct checksums *checksums_dup(const struct checksums *);
+
+retvalue checksums_setall(/*@out@*/struct checksums **checksums_p, const char *combinedchecksum, size_t len);
+
+retvalue checksums_initialize(/*@out@*/struct checksums **checksums_p, const struct hash_data *);
+/* hashes[*] is free'd: */
+retvalue checksums_init(/*@out@*/struct checksums **, char *hashes[cs_COUNT]);
+retvalue checksums_parse(/*@out@*/struct checksums **, const char *);
+
+off_t checksums_getfilesize(const struct checksums *);
+
+/* get 0-terminated combined textual representation of the checksums,
+ * including the size (including the trailing '\0'): */
+retvalue checksums_getcombined(const struct checksums *, /*@out@*/const char **, /*@out@*/size_t *);
+
+/* get a static pointer to a specific part of a checksum (wihtout size) */
+bool checksums_getpart(const struct checksums *, enum checksumtype, /*@out@*/const char **, /*@out@*/size_t *);
+/* extract a single checksum from the combined data: */
+bool checksums_gethashpart(const struct checksums *, enum checksumtype, /*@out@*/const char **hash_p, /*@out@*/size_t *hashlen_p, /*@out@*/const char **size_p, /*@out@*/size_t *sizelen_p);
+
+/* check if a single checksum fits */
+bool checksums_matches(const struct checksums *, enum checksumtype, const char *);
+
+/* Copy file <origin> to file <destination>, calculating checksums */
+retvalue checksums_copyfile(const char * /*destination*/, const char * /*origin*/, bool /*deletetarget*/, /*@out@*/struct checksums **);
+retvalue checksums_hardlink(const char * /*directory*/, const char * /*filekey*/, const char * /*sourcefilename*/, const struct checksums *);
+
+retvalue checksums_linkorcopyfile(const char * /*destination*/, const char * /*origin*/, /*@out@*/struct checksums **);
+
+/* calculare checksums of a file: */
+retvalue checksums_read(const char * /*fullfilename*/, /*@out@*/struct checksums **);
+
+/* replace the contents of a file with data and calculate the new checksums */
+retvalue checksums_replace(const char * /*filename*/, const char *, size_t, /*@out@*//*@null@*/struct checksums **);
+
+/* check if the file has the given md5sum (only cheap tests like size),
+ * RET_NOTHING means file does not exist,
+ * RET_ERROR_WRONG_MD5 means wrong size */
+retvalue checksums_cheaptest(const char * /*fullfilename*/, const struct checksums *, bool);
+
+/* check if filename has specified checksums, if not return RET_ERROR_WRONG_MD5,
+ * if it has, and checksums_p put the improved checksum there
+ * (*checksums_p should either be NULL or checksums) */
+retvalue checksums_test(const char *, const struct checksums *, /*@null@*/struct checksums **);
+
+/* check if checksum of filekey in database and checksum of actual file, set improve if some new has is in the last */
+bool checksums_check(const struct checksums *, const struct checksums *, /*@out@*/bool * /*improves_p*/);
+
+/* return true, iff all supported checksums are available */
+bool checksums_iscomplete(const struct checksums *);
+
+/* Collect missing checksums (if all are there always RET_OK without checking).
+ * if the file is not there, return RET_NOTHING,
+ * if it is but not matches, return RET_ERROR_WRONG_MD5 */
+retvalue checksums_complete(struct checksums **, const char * /*fullfilename*/);
+
+void checksums_printdifferences(FILE *, const struct checksums * /*expected*/, const struct checksums * /*got*/);
+
+retvalue checksums_combine(struct checksums **, const struct checksums *, /*@null@*/bool[cs_hashCOUNT]);
+
+typedef /*@only@*/ struct checksums *ownedchecksums;
+struct checksumsarray {
+ struct strlist names;
+ /*@null@*/ownedchecksums *checksums;
+};
+void checksumsarray_move(/*@out@*/struct checksumsarray *, /*@special@*/struct checksumsarray *array)/*@requires maxSet(array->names.values) >= array->names.count /\ maxSet(array->checksums) >= array->names.count @*/ /*@releases array->checksums, array->names.values @*/;
+void checksumsarray_done(/*@special@*/struct checksumsarray *array) /*@requires maxSet(array->names.values) >= array->names.count /\ maxSet(array->checksums) >= array->names.count @*/ /*@releases array->checksums, array->names.values @*/;
+retvalue checksumsarray_parse(/*@out@*/struct checksumsarray *, const struct strlist [cs_hashCOUNT], const char * /*filenametoshow*/);
+retvalue checksumsarray_genfilelist(const struct checksumsarray *, /*@out@*/char **, /*@out@*/char **, /*@out@*/char **);
+retvalue checksumsarray_include(struct checksumsarray *, /*@only@*/char *, const struct checksums *);
+void checksumsarray_resetunsupported(const struct checksumsarray *, bool[cs_hashCOUNT]);
+
+retvalue hashline_parse(const char * /*filenametoshow*/, const char * /*line*/, enum checksumtype, /*@out@*/const char ** /*basename_p*/, /*@out@*/struct hash_data *, /*@out@*/struct hash_data *);
+
+struct configiterator;
+
+#ifdef CHECKSUMS_CONTEXT
+#ifndef MD5_H
+#include "md5.h"
+#endif
+#ifndef REPREPRO_SHA1_H
+#include "sha1.h"
+#endif
+#ifndef REPREPRO_SHA256_H
+#include "sha256.h"
+#endif
+#ifndef REPREPRO_SHA512_H
+#include "sha512.h"
+#endif
+
+struct checksumscontext {
+ struct MD5Context md5;
+ struct SHA1_Context sha1;
+ struct SHA256_Context sha256;
+ struct SHA512_Context sha512;
+};
+
+void checksumscontext_init(/*@out@*/struct checksumscontext *);
+void checksumscontext_update(struct checksumscontext *, const unsigned char *, size_t);
+retvalue checksums_from_context(/*@out@*/struct checksums **, struct checksumscontext *);
+#endif
+
+#endif
diff --git a/chunkedit.c b/chunkedit.c
new file mode 100644
index 0000000..9bf5630
--- /dev/null
+++ b/chunkedit.c
@@ -0,0 +1,452 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <assert.h>
+#include "error.h"
+#include "chunkedit.h"
+#include "names.h"
+
+void cef_free(struct chunkeditfield *f) {
+ while (f != NULL) {
+ int i;
+ struct chunkeditfield *p = f;
+ f = f->next;
+
+ for (i = 0 ; i < p->linecount ; i++) {
+ free(p->lines[i].words);
+ free(p->lines[i].wordlen);
+ }
+ free(p);
+ }
+}
+
+struct chunkeditfield *cef_newfield(const char *field, enum cefaction action, enum cefwhen when, unsigned int linecount, struct chunkeditfield *next) {
+ struct chunkeditfield *n;
+
+ n = calloc(1, sizeof(struct chunkeditfield) +
+ linecount * sizeof(struct cef_line));
+ if (FAILEDTOALLOC(n)) {
+ cef_free(next);
+ return NULL;
+ }
+ assert(field != NULL);
+ n->field = field;
+ n->len_field = strlen(field);
+ n->action = action;
+ n->when = when;
+ n->linecount = linecount;
+ n->next = next;
+ return n;
+}
+
+
+void cef_setdatalen(struct chunkeditfield *cef, const char *data, size_t len) {
+ assert (data != NULL || len == 0);
+
+ assert (cef->len_all_data >= cef->len_data);
+ cef->len_all_data -= cef->len_data;
+ cef->len_all_data += len;
+ cef->data = data;
+ cef->len_data = len;
+ cef->words = NULL;
+}
+
+void cef_setdata(struct chunkeditfield *cef, const char *data) {
+ cef_setdatalen(cef, data, strlen(data));
+}
+
+void cef_setwordlist(struct chunkeditfield *cef, const struct strlist *words) {
+ int i; size_t len = 0;
+
+ for (i = 0 ; i < words->count ; i++) {
+ len += 1+strlen(words->values[i]);
+ }
+ if (len > 0)
+ len--;
+ assert (cef->len_all_data >= cef->len_data);
+ cef->len_all_data -= cef->len_data;
+ cef->len_all_data += len;
+ cef->data = NULL;
+ cef->len_data = len;
+ cef->words = words;
+}
+
+retvalue cef_setline(struct chunkeditfield *cef, int line, int wordcount, ...) {
+ va_list ap; int i;
+ struct cef_line *l;
+ const char *word;
+ size_t len;
+
+ assert (line < cef->linecount);
+ assert (wordcount > 0);
+
+ l = &cef->lines[line];
+ assert (l->wordcount == 0 && l->words == NULL && l->wordlen == NULL);
+
+ l->wordcount = wordcount;
+ l->words = nzNEW(wordcount, const char*);
+ if (FAILEDTOALLOC(l->words))
+ return RET_ERROR_OOM;
+ l->wordlen = nzNEW(wordcount, size_t);
+ if (FAILEDTOALLOC(l->wordlen)) {
+ free(l->words);l->words = NULL;
+ return RET_ERROR_OOM;
+ }
+ va_start(ap, wordcount);
+ len = 1; /* newline */
+ for (i = 0 ; i < wordcount; i++) {
+ word = va_arg(ap, const char*);
+ assert(word != NULL);
+
+ l->words[i] = word;
+ l->wordlen[i] = strlen(word);
+ len += 1 + l->wordlen[i];
+ }
+ word = va_arg(ap, const char*);
+ assert (word == NULL);
+
+ va_end(ap);
+ cef->len_all_data += len;
+ return RET_OK;
+}
+
+retvalue cef_setline2(struct chunkeditfield *cef, int line, const char *hash, size_t hashlen, const char *size, size_t sizelen, int wordcount, ...) {
+ va_list ap; int i;
+ struct cef_line *l;
+ const char *word;
+ size_t len;
+
+ assert (line < cef->linecount);
+ assert (wordcount >= 0);
+
+ l = &cef->lines[line];
+ assert (l->wordcount == 0 && l->words == NULL && l->wordlen == NULL);
+
+ l->wordcount = wordcount + 2;
+ l->words = nzNEW(wordcount + 2, const char *);
+ if (FAILEDTOALLOC(l->words))
+ return RET_ERROR_OOM;
+ l->wordlen = nzNEW(wordcount + 2, size_t);
+ if (FAILEDTOALLOC(l->wordlen)) {
+ free(l->words); l->words = NULL;
+ return RET_ERROR_OOM;
+ }
+ va_start(ap, wordcount);
+ len = 1; /* newline */
+ l->words[0] = hash;
+ l->wordlen[0] = hashlen;
+ len += 1 + hashlen;
+ l->words[1] = size;
+ l->wordlen[1] = sizelen;
+ len += 1 + sizelen;
+ for (i = 0 ; i < wordcount; i++) {
+ word = va_arg(ap, const char*);
+ assert(word != NULL);
+
+ l->words[i + 2] = word;
+ l->wordlen[i + 2] = strlen(word);
+ len += 1 + l->wordlen[i + 2];
+ }
+ word = va_arg(ap, const char*);
+ assert (word == NULL);
+
+ va_end(ap);
+ cef->len_all_data += len;
+ return RET_OK;
+}
+
+static inline int findcef(const struct chunkeditfield *cef, const char *p, size_t len) {
+ int result = 0;
+ while (cef != NULL) {
+ if (cef->len_field == len &&
+ strncasecmp(p, cef->field, len) == 0) {
+ return result;
+ }
+ cef = cef->next;
+ result++;
+ }
+ return -1;
+}
+
+retvalue chunk_edit(const char *chunk, char **result, size_t *rlen, const struct chunkeditfield *cefs) {
+ size_t maxlen;
+ int i, processed, count = 0;
+ const struct chunkeditfield *cef;
+ struct field {
+ const struct chunkeditfield *cef;
+ size_t startofs, endofs;
+ /* next in original chunk */
+ int next;
+ } *fields;
+ const char *p, *q, *e;
+ char *n; size_t len;
+
+ maxlen = 1; /* a newline might get missed */
+ for (cef = cefs ; cef != NULL ; cef=cef->next) {
+ maxlen += cef->len_field + cef->len_all_data + 3; /* ': \n' */
+ count ++;
+ }
+ fields = nzNEW(count, struct field);
+ if (FAILEDTOALLOC(fields))
+ return RET_ERROR_OOM;
+ i = 0;
+ for (cef = cefs ; cef != NULL ; cef=cef->next) {
+ assert (i < count);
+ fields[i++].cef = cef;
+ }
+ assert (i == count);
+
+ /* get rid of empty or strange lines at the beginning: */
+ while (*chunk == ' ' || *chunk == '\t') {
+ while (*chunk != '\0' && *chunk != '\n')
+ chunk++;
+ if (*chunk == '\n')
+ chunk++;
+ }
+ p = chunk;
+
+ while (true) {
+ q = p;
+ while (*q != '\0' && *q != '\n' && *q != ':')
+ q++;
+ if (*q == '\0')
+ break;
+ if (*q == '\n') {
+ /* header without colon? what kind of junk is this? */
+ q++;
+ while (*q == ' ' || *q == '\t') {
+ while (*q != '\0' && *q != '\n')
+ q++;
+ if (*q == '\n')
+ q++;
+
+ }
+ if (p == chunk)
+ chunk = q;
+ p = q;
+ continue;
+ }
+ i = findcef(cefs, p, q-p);
+ /* find begin and end of data */
+ q++;
+ while (*q == ' ')
+ q++;
+ e = q;
+ while (*e != '\0' && *e != '\n')
+ e++;
+ while (e[0] == '\n' && (e[1] == ' ' || e[1] == '\t')) {
+ e++;
+ while (*e != '\0' && *e != '\n')
+ e++;
+ }
+ if (i < 0) {
+ /* not known, we'll have to copy it */
+ maxlen += 1+e-p;
+ if (*e == '\0')
+ break;
+ p = e+1;
+ continue;
+ }
+ if (fields[i].endofs == 0) {
+ fields[i].startofs = p-chunk;
+ fields[i].endofs = e-chunk;
+ if (fields[i].cef->action == CEF_KEEP ||
+ fields[i].cef->action == CEF_ADDMISSED)
+ maxlen += 1+e-q;
+ }
+ if (*e == '\0')
+ break;
+ p = e+1;
+ }
+ n = malloc(maxlen + 1);
+ if (FAILEDTOALLOC(n)) {
+ free(fields);
+ return RET_ERROR_OOM;
+ }
+ len = 0;
+ for (processed = 0;
+ processed < count && fields[processed].cef->when == CEF_EARLY;
+ processed++) {
+ struct field *f = &fields[processed];
+ const struct chunkeditfield *ef = f->cef;
+ if (ef->action == CEF_DELETE)
+ continue;
+ if (ef->action == CEF_REPLACE && f->endofs == 0)
+ continue;
+ if (f->endofs != 0 &&
+ (ef->action == CEF_KEEP ||
+ ef->action == CEF_ADDMISSED)) {
+ size_t l = f->endofs - f->startofs;
+ assert (maxlen >= len + l);
+ memcpy(n+len, chunk + f->startofs, l);
+ len +=l;
+ n[len++] = '\n';
+ continue;
+ }
+ if (ef->action == CEF_KEEP)
+ continue;
+ assert (maxlen >= len+ 3+ ef->len_field);
+ memcpy(n+len, ef->field, ef->len_field);
+ len += ef->len_field;
+ n[len++] = ':';
+ n[len++] = ' ';
+ if (ef->data != NULL) {
+ assert (maxlen >= len+1+ef->len_data);
+ memcpy(n+len, ef->data, ef->len_data);
+ len += ef->len_data;
+ } else if (ef->words != NULL) {
+ int j;
+ for (j = 0 ; j < ef->words->count ; j++) {
+ const char *v = ef->words->values[j];
+ size_t l = strlen(v);
+ if (j > 0)
+ n[len++] = ' ';
+ memcpy(n+len, v, l);
+ len += l;
+ }
+ }
+ for (i = 0 ; i < ef->linecount ; i++) {
+ int j;
+ n[len++] = '\n';
+ for (j = 0 ; j < ef->lines[i].wordcount ; j++) {
+ n[len++] = ' ';
+ memcpy(n+len, ef->lines[i].words[j],
+ ef->lines[i].wordlen[j]);
+ len += ef->lines[i].wordlen[j];
+ }
+ }
+ assert(maxlen > len);
+ n[len++] = '\n';
+ }
+ p = chunk;
+ /* now add all headers in between */
+ while (true) {
+ q = p;
+ while (*q != '\0' && *q != '\n' && *q != ':')
+ q++;
+ if (*q == '\0')
+ break;
+ if (*q == '\n') {
+ /* header without colon? what kind of junk is this? */
+ q++;
+ while (*q == ' ' || *q == '\t') {
+ while (*q != '\0' && *q != '\n')
+ q++;
+ if (*q == '\n')
+ q++;
+
+ }
+ p = q;
+ continue;
+ }
+ i = findcef(cefs, p, q-p);
+ /* find begin and end of data */
+ q++;
+ while (*q == ' ')
+ q++;
+ e = q;
+ while (*e != '\0' && *e != '\n')
+ e++;
+ while (e[0] == '\n' && (e[1] == ' ' || e[1] == '\t')) {
+ e++;
+ while (*e != '\0' && *e != '\n')
+ e++;
+ }
+ if (i < 0) {
+ /* not known, copy it */
+ size_t l = e - p;
+ assert (maxlen >= len + l);
+ memcpy(n+len, p, l);
+ len += l;
+ n[len++] = '\n';
+ if (*e == '\0')
+ break;
+ p = e+1;
+ continue;
+ }
+ if (*e == '\0')
+ break;
+ p = e+1;
+ }
+ for (; processed < count ; processed++) {
+ struct field *f = &fields[processed];
+ const struct chunkeditfield *ef = f->cef;
+ if (ef->action == CEF_DELETE)
+ continue;
+ if (ef->action == CEF_REPLACE && f->endofs == 0)
+ continue;
+ if (f->endofs != 0 &&
+ (ef->action == CEF_KEEP ||
+ ef->action == CEF_ADDMISSED)) {
+ size_t l = f->endofs - f->startofs;
+ assert (maxlen >= len + l);
+ memcpy(n+len, chunk + f->startofs, l);
+ len +=l;
+ n[len++] = '\n';
+ continue;
+ }
+ if (ef->action == CEF_KEEP)
+ continue;
+ assert (maxlen >= len+ 3+ ef->len_field);
+ memcpy(n+len, ef->field, ef->len_field);
+ len += ef->len_field;
+ n[len++] = ':';
+ n[len++] = ' ';
+ if (ef->data != NULL) {
+ assert (maxlen >= len+1+ef->len_data);
+ memcpy(n+len, ef->data, ef->len_data);
+ len += ef->len_data;
+ } else if (ef->words != NULL) {
+ int j;
+ for (j = 0 ; j < ef->words->count ; j++) {
+ const char *v = ef->words->values[j];
+ size_t l = strlen(v);
+ if (j > 0)
+ n[len++] = ' ';
+ memcpy(n+len, v, l);
+ len += l;
+ }
+ }
+ for (i = 0 ; i < ef->linecount ; i++) {
+ int j;
+ n[len++] = '\n';
+ for (j = 0 ; j < ef->lines[i].wordcount ; j++) {
+ n[len++] = ' ';
+ memcpy(n+len, ef->lines[i].words[j],
+ ef->lines[i].wordlen[j]);
+ len += ef->lines[i].wordlen[j];
+ }
+ }
+ assert(maxlen > len);
+ n[len++] = '\n';
+ }
+ assert(maxlen >= len);
+ n[len] = '\0';
+ free(fields);
+ *result = realloc(n, len+1);
+ if (*result == NULL)
+ *result = n;
+ *rlen = len;
+ return RET_OK;
+}
diff --git a/chunkedit.h b/chunkedit.h
new file mode 100644
index 0000000..7d7f4cf
--- /dev/null
+++ b/chunkedit.h
@@ -0,0 +1,58 @@
+#ifndef REPREPRO_CHUNKEDIT_H
+#define REPREPRO_CHUNKEDIT_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+
+
+/* modifications of a chunk: */
+struct chunkeditfield {
+ /*@null@*/struct chunkeditfield *next;
+ /* The name of the field: */
+ const char *field; size_t len_field;
+ enum cefaction { CEF_DELETE, /* delete if there */
+ CEF_ADDMISSED, /* add if not there */
+ CEF_REPLACE, /* replace if there */
+ CEF_ADD, /* add if not there or replace if there */
+ CEF_KEEP /* keep it */
+ } action;
+ enum cefwhen { CEF_EARLY, CEF_LATE } when;
+ /* the following must be 0 or NULL for CEF_DELETE */
+ size_t len_all_data;
+ /*@null@*/const char *data; size_t len_data;
+ const struct strlist *words;
+ int linecount;
+ struct cef_line {
+ int wordcount;
+ const char **words;
+ size_t *wordlen;
+ } lines[];
+};
+
+/* those return NULL on out of memory and free next in that case */
+/*@null@*/struct chunkeditfield *cef_newfield(const char *, enum cefaction, enum cefwhen, unsigned int /*linecount*/, /*@only@*//*@null@*/struct chunkeditfield *);
+
+void cef_setdata(struct chunkeditfield *, const char *);
+void cef_setdatalen(struct chunkeditfield *, const char *, size_t);
+/* calculate the length, do not change the strlist after that before free */
+void cef_setwordlist(struct chunkeditfield *, const struct strlist *);
+retvalue cef_setline(struct chunkeditfield *, int /*line*/, int /*wordcount*/, ...);
+retvalue cef_setline2(struct chunkeditfield *, int, const char *, size_t, const char *, size_t, int, ...);
+
+retvalue chunk_edit(const char *, char **, size_t *, const struct chunkeditfield *);
+
+void cef_free(/*@only@*//*@null@*/struct chunkeditfield *);
+
+static inline struct chunkeditfield *cef_pop(/*@only@*/struct chunkeditfield *cef) {
+ struct chunkeditfield *next = cef->next;
+ cef->next = NULL;
+ cef_free(cef);
+ return next;
+}
+
+#endif
diff --git a/chunks.c b/chunks.c
new file mode 100644
index 0000000..41f5f2c
--- /dev/null
+++ b/chunks.c
@@ -0,0 +1,798 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <assert.h>
+#include "error.h"
+#include "chunks.h"
+#include "names.h"
+
+/* point to a specified field in a chunk */
+static const char *chunk_getfield(const char *name, const char *chunk) {
+ size_t l;
+
+ if (chunk == NULL)
+ return NULL;
+ l = strlen(name);
+ while (*chunk != '\0') {
+ if (strncasecmp(name, chunk, l) == 0 && chunk[l] == ':') {
+ chunk += l+1;
+ return chunk;
+ }
+ while (*chunk != '\n' && *chunk != '\0')
+ chunk++;
+ if (*chunk == '\0')
+ return NULL;
+ chunk++;
+ }
+ return NULL;
+}
+
+/* get the content of the given field, including all following lines, in a format
+ * that may be put into chunk_replacefields
+static retvalue chunk_getcontent(const char *chunk, const char *name, char **value) {
+ const char *field;
+ char *val;
+ const char *b, *e;
+
+ assert(value != NULL);
+ field = chunk_getfield(name, chunk);
+ if (field == NULL)
+ return RET_NOTHING;
+
+ b = field;
+ * jump over spaces at the beginning *
+ if (xisspace(*b))
+ b++;
+
+ * search for the end *
+ e = b;
+ do {
+ while (*e != '\n' && *e != '\0')
+ e++;
+ if (*e != '\0')
+ e++;
+ } while (*e != ' ' && *e != '\t' && *e != '\0');
+
+ if (e > b && *e == '\0')
+ e--;
+ * remove trailing newline *
+ if (e > b && *e == '\n')
+ e--;
+ if (e > b)
+ val = strndup(b, e - b + 1);
+ else
+ val = strdup("");
+ if (FAILEDTOALLOC(val))
+ return RET_ERROR_OOM;
+ *value = val;
+ return RET_OK;
+}
+*/
+
+/* look for name in chunk. returns RET_NOTHING if not found */
+retvalue chunk_getvalue(const char *chunk, const char *name, char **value) {
+ const char *field;
+ char *val;
+ const char *b, *e;
+
+ assert(value != NULL);
+ field = chunk_getfield(name, chunk);
+ if (field == NULL)
+ return RET_NOTHING;
+
+ b = field;
+ /* jump over spaces at the beginning */
+ while (*b != '\0' && (*b == ' ' || *b == '\t'))
+ b++;
+ /* search for the end */
+ e = b;
+ while (*e != '\n' && *e != '\0')
+ e++;
+ /* remove trailing spaces */
+ while (e > b && xisspace(*e))
+ e--;
+ if (!xisspace(*e))
+ val = strndup(b, e - b + 1);
+ else
+ val = strdup("");
+ if (FAILEDTOALLOC(val))
+ return RET_ERROR_OOM;
+ *value = val;
+ return RET_OK;
+}
+
+retvalue chunk_getextralinelist(const char *chunk, const char *name, struct strlist *strlist) {
+ retvalue r;
+ const char *f, *b, *e;
+ char *v;
+
+ f = chunk_getfield(name, chunk);
+ if (f == NULL)
+ return RET_NOTHING;
+ strlist_init(strlist);
+ /* walk over the first line */
+ while (*f != '\0' && *f != '\n')
+ f++;
+ /* nothing there is an empty list */
+ if (*f == '\0')
+ return RET_OK;
+ f++;
+ /* while lines begin with ' ' or '\t', add them */
+ while (*f == ' ' || *f == '\t') {
+ while (*f != '\0' && xisblank(*f))
+ f++;
+ b = f;
+ while (*f != '\0' && *f != '\n')
+ f++;
+ e = f;
+ while (e > b && *e != '\0' && xisspace(*e))
+ e--;
+ if (!xisspace(*e))
+ v = strndup(b, e - b + 1);
+ else
+ v = strdup("");
+ if (FAILEDTOALLOC(v)) {
+ strlist_done(strlist);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_add(strlist, v);
+ if (!RET_IS_OK(r)) {
+ strlist_done(strlist);
+ return r;
+ }
+ if (*f == '\0')
+ return RET_OK;
+ f++;
+ }
+ return RET_OK;
+}
+
+retvalue chunk_getwholedata(const char *chunk, const char *name, char **value) {
+ const char *f, *p, *e;
+ bool afternewline = false;
+ char *v;
+
+ f = chunk_getfield(name, chunk);
+ if (f == NULL)
+ return RET_NOTHING;
+ while (*f == ' ')
+ f++;
+ for (e = p = f ; *p != '\0' ; p++) {
+ if (afternewline) {
+ if (*p == ' ' || *p == '\t')
+ afternewline = false;
+ else if (*p != '\r')
+ break;
+ } else {
+ if (*p == '\n') {
+ e = p;
+ afternewline = true;
+ }
+ }
+ }
+ if (!afternewline && *p == '\0')
+ e = p;
+ v = strndup(f, e - f);
+ if (FAILEDTOALLOC(v))
+ return RET_ERROR_OOM;
+ *value = v;
+ return RET_OK;
+}
+
+retvalue chunk_getwordlist(const char *chunk, const char *name, struct strlist *strlist) {
+ retvalue r;
+ const char *f, *b;
+ char *v;
+
+ f = chunk_getfield(name, chunk);
+ if (f == NULL)
+ return RET_NOTHING;
+ strlist_init(strlist);
+ while (*f != '\0') {
+ /* walk over spaces */
+ while (*f != '\0' && xisspace(*f)) {
+ if (*f == '\n') {
+ f++;
+ if (*f != ' ' && *f != '\t')
+ return RET_OK;
+ } else
+ f++;
+ }
+ if (*f == '\0')
+ return RET_OK;
+ b = f;
+ /* search for end of word */
+ while (*f != '\0' && !xisspace(*f))
+ f++;
+ v = strndup(b, f - b);
+ if (FAILEDTOALLOC(v)) {
+ strlist_done(strlist);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_add(strlist, v);
+ if (!RET_IS_OK(r)) {
+ strlist_done(strlist);
+ return r;
+ }
+ }
+ return RET_OK;
+}
+
+retvalue chunk_getuniqwordlist(const char *chunk, const char *name, struct strlist *strlist) {
+ retvalue r;
+ const char *f, *b;
+ char *v;
+
+ f = chunk_getfield(name, chunk);
+ if (f == NULL)
+ return RET_NOTHING;
+ strlist_init(strlist);
+ while (*f != '\0') {
+ /* walk over spaces */
+ while (*f != '\0' && xisspace(*f)) {
+ if (*f == '\n') {
+ f++;
+ if (*f != ' ' && *f != '\t')
+ return RET_OK;
+ } else
+ f++;
+ }
+ if (*f == '\0')
+ return RET_OK;
+ b = f;
+ /* search for end of word */
+ while (*f != '\0' && !xisspace(*f))
+ f++;
+ v = strndup(b, f - b);
+ if (FAILEDTOALLOC(v)) {
+ strlist_done(strlist);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_adduniq(strlist, v);
+ if (!RET_IS_OK(r)) {
+ strlist_done(strlist);
+ return r;
+ }
+ }
+ return RET_OK;
+}
+
+retvalue chunk_gettruth(const char *chunk, const char *name) {
+ const char *field;
+
+ field = chunk_getfield(name, chunk);
+ if (field == NULL)
+ return RET_NOTHING;
+ while (*field == ' ' || *field == '\t')
+ field++;
+ if ((field[0] == 'f' || field[0] == 'F') &&
+ (field[1] == 'a' || field[1] == 'A') &&
+ (field[2] == 'l' || field[2] == 'L') &&
+ (field[3] == 's' || field[3] == 'S') &&
+ (field[4] == 'e' || field[4] == 'E')) {
+ return RET_NOTHING;
+ }
+ if ((field[0] == 'n' || field[0] == 'N') &&
+ (field[1] == 'o' || field[1] == 'O')) {
+ return RET_NOTHING;
+ }
+ // TODO: strict check?
+ return RET_OK;
+}
+/* return RET_OK, if field is found, RET_NOTHING, if not */
+retvalue chunk_checkfield(const char *chunk, const char *name){
+ const char *field;
+
+ field = chunk_getfield(name, chunk);
+ if (field == NULL)
+ return RET_NOTHING;
+
+ return RET_OK;
+}
+
+/* Parse a package/source-field: ' *value( ?\(version\))? *' */
+retvalue chunk_getname(const char *chunk, const char *name, char **pkgname, bool allowversion) {
+ const char *field, *name_end, *p;
+
+ field = chunk_getfield(name, chunk);
+ if (field == NULL)
+ return RET_NOTHING;
+ while (*field != '\0' && *field != '\n' && xisspace(*field))
+ field++;
+ name_end = field;
+ /* this has now checked somewhere else for correctness and
+ * is only a pure separation process:
+ * (as package(version) is possible, '(' must be checked) */
+ while (*name_end != '\0' && *name_end != '\n' && *name_end != '('
+ && !xisspace(*name_end))
+ name_end++;
+ p = name_end;
+ while (*p != '\0' && *p != '\n' && xisspace(*p))
+ p++;
+ if (name_end == field ||
+ (*p != '\0' && *p != '\n' &&
+ (!allowversion || *p != '('))) {
+ if (*field == '\n' || *field == '\0') {
+ fprintf(stderr, "Error: Field '%s' is empty!\n", name);
+ } else {
+ fprintf(stderr,
+"Error: Field '%s' contains unexpected character '%c'!\n",
+ name, *p);
+ }
+ return RET_ERROR;
+ }
+ if (*p == '(') {
+ while (*p != '\0' && *p != '\n' && *p != ')')
+ // TODO: perhaps check for wellformed version
+ p++;
+ if (*p != ')') {
+ fprintf(stderr,
+"Error: Field '%s' misses closing parenthesis!\n", name);
+ return RET_ERROR;
+ }
+ p++;
+ }
+ while (*p != '\0' && *p != '\n' && xisspace(*p))
+ p++;
+ if (*p != '\0' && *p != '\n') {
+ fprintf(stderr,
+"Error: Field '%s' contains trailing junk starting with '%c'!\n", name, *p);
+ return RET_ERROR;
+ }
+
+ *pkgname = strndup(field, name_end - field);
+ if (FAILEDTOALLOC(*pkgname))
+ return RET_ERROR_OOM;
+ return RET_OK;
+
+}
+
+/* Parse a package/source-field: ' *value( ?\(version\))? *' */
+retvalue chunk_getnameandversion(const char *chunk, const char *name, char **pkgname, char **version) {
+ const char *field, *name_end, *p;
+ char *v;
+
+ field = chunk_getfield(name, chunk);
+ if (field == NULL)
+ return RET_NOTHING;
+ while (*field != '\0' && *field != '\n' && xisspace(*field))
+ field++;
+ name_end = field;
+ /* this has now checked somewhere else for correctness and
+ * is only a pure separation process:
+ * (as package(version) is possible, '(' must be checked) */
+ while (*name_end != '\0' && *name_end != '\n' && *name_end != '('
+ && !xisspace(*name_end))
+ name_end++;
+ p = name_end;
+ while (*p != '\0' && *p != '\n' && xisspace(*p))
+ p++;
+ if (name_end == field || (*p != '\0' && *p != '\n' && *p != '(')) {
+ if (*field == '\n' || *field == '\0') {
+ fprintf(stderr, "Error: Field '%s' is empty!\n", name);
+ } else {
+ fprintf(stderr,
+"Error: Field '%s' contains unexpected character '%c'!\n", name, *p);
+ }
+ return RET_ERROR;
+ }
+ if (*p == '(') {
+ const char *version_begin;
+
+ p++;
+ while (*p != '\0' && *p != '\n' && xisspace(*p))
+ p++;
+ version_begin = p;
+ while (*p != '\0' && *p != '\n' && *p != ')' && !xisspace(*p))
+ // TODO: perhaps check for wellformed version
+ p++;
+ v = strndup(version_begin, p - version_begin);
+ if (FAILEDTOALLOC(v))
+ return RET_ERROR_OOM;
+ while (*p != '\0' && *p != '\n' && *p != ')' && xisspace(*p))
+ p++;
+ if (*p != ')') {
+ free(v);
+ if (*p == '\0' || *p == '\n')
+ fprintf(stderr,
+"Error: Field '%s' misses closing parenthesis!\n",
+ name);
+ else
+ fprintf(stderr,
+"Error: Field '%s' has multiple words after '('!\n",
+ name);
+ return RET_ERROR;
+ }
+ p++;
+ } else {
+ v = NULL;
+ }
+ while (*p != '\0' && *p != '\n' && xisspace(*p))
+ p++;
+ if (*p != '\0' && *p != '\n') {
+ free(v);
+ fprintf(stderr,
+"Error: Field '%s' contains trailing junk starting with '%c'!\n",
+ name, *p);
+ return RET_ERROR;
+ }
+
+ *pkgname = strndup(field, name_end - field);
+ if (FAILEDTOALLOC(*pkgname)) {
+ free(v);
+ return RET_ERROR_OOM;
+ }
+ *version = v;
+ return RET_OK;
+
+}
+
+/* Add this the <fields to add> to <chunk> before <beforethis> field,
+ * replacing older fields of this name, if they are already there. */
+
+char *chunk_replacefields(const char *chunk, const struct fieldtoadd *toadd, const char *beforethis, bool maybemissing) {
+ const char *c, *ce;
+ char *newchunk, *n;
+ size_t size, len_beforethis;
+ const struct fieldtoadd *f;
+ retvalue result;
+ bool fieldsadded = false;
+
+ assert (chunk != NULL && beforethis != NULL);
+
+ if (toadd == NULL)
+ return NULL;
+
+ c = chunk;
+
+ /* calculate the maximal size we might end up with */
+ size = 2 + strlen(c);
+ f = toadd;
+ while (f != NULL) {
+ if (f->data != NULL)
+ size += 3 + f->len_field + f->len_data;
+ f = f->next;
+ }
+
+ newchunk = n = malloc(size);
+ if (FAILEDTOALLOC(n))
+ return NULL;
+
+ len_beforethis = strlen(beforethis);
+
+ result = RET_NOTHING;
+ do {
+ /* are we at the place to add the fields yet? */
+ if (!fieldsadded && strncasecmp(c, beforethis, len_beforethis) == 0
+ && c[len_beforethis] == ':') {
+ /* add them now: */
+ f = toadd;
+ while (f != NULL) {
+ if (f->data != NULL) {
+ memcpy(n, f->field, f->len_field);
+ n += f->len_field;
+ *n = ':'; n++;
+ *n = ' '; n++;
+ memcpy(n, f->data, f->len_data);
+ n += f->len_data;
+ *n = '\n'; n++;
+ }
+ f = f->next;
+ }
+ result = RET_OK;
+ fieldsadded = true;
+ }
+ /* is this one of the fields we added/will add? */
+ f = toadd;
+ while (f != NULL) {
+ if (strncasecmp(c, f->field, f->len_field) == 0
+ && c[f->len_field] == ':')
+ break;
+ f = f->next;
+ }
+ /* search the end of the field */
+ ce = c;
+ do {
+ while (*ce != '\n' && *ce != '\0')
+ ce++;
+ if (*ce == '\0')
+ break;
+ ce++;
+ } while (*ce == ' ' || *ce == '\t');
+
+ /* copy it, if it is not to be ignored */
+
+ if (f == NULL && ce-c > 0) {
+ memcpy(n, c, ce -c);
+ n += ce-c;
+ }
+
+ /* and proceed with the next */
+ c = ce;
+
+ } while (*c != '\0' && *c != '\n');
+
+ if (n > newchunk && *(n-1) != '\n')
+ *(n++) = '\n';
+ if (maybemissing && !fieldsadded) {
+ /* add them now, if they are allowed to come later */
+ f = toadd;
+ while (f != NULL) {
+ if (f->data != NULL) {
+ memcpy(n, f->field, f->len_field);
+ n += f->len_field;
+ *n = ':'; n++;
+ *n = ' '; n++;
+ memcpy(n, f->data, f->len_data);
+ n += f->len_data;
+ *n = '\n'; n++;
+ }
+ f = f->next;
+ }
+ result = RET_OK;
+ fieldsadded = true;
+ }
+ *n = '\0';
+
+ assert (n-newchunk < 0 || (size_t)(n-newchunk) <= size-1);
+
+ if (result == RET_NOTHING) {
+ fprintf(stderr,
+"Could not find field '%s' in chunk '%s'!!!\n",
+ beforethis, chunk);
+ assert(false);
+ }
+
+ return newchunk;
+}
+
+struct fieldtoadd *aodfield_new(const char *field, const char *data, struct fieldtoadd *next) {
+ struct fieldtoadd *n;
+
+ assert(field != NULL);
+
+ n = NEW(struct fieldtoadd);
+ if (FAILEDTOALLOC(n)) {
+ addfield_free(next);
+ return NULL;
+ }
+ n->field = field;
+ n->len_field = strlen(field);
+ n->data = data;
+ if (data != NULL)
+ n->len_data = strlen(data);
+ else
+ n->len_data = 0;
+ n->next = next;
+ return n;
+}
+struct fieldtoadd *addfield_new(const char *field, const char *data, struct fieldtoadd *next) {
+ struct fieldtoadd *n;
+
+ assert(field != NULL && data != NULL);
+
+ n = NEW(struct fieldtoadd);
+ if (FAILEDTOALLOC(n)) {
+ addfield_free(next);
+ return NULL;
+ }
+ n->field = field;
+ n->len_field = strlen(field);
+ n->data = data;
+ n->len_data = strlen(data);
+ n->next = next;
+ return n;
+}
+struct fieldtoadd *deletefield_new(const char *field, struct fieldtoadd *next) {
+ struct fieldtoadd *n;
+
+ assert(field != NULL);
+
+ n = NEW(struct fieldtoadd);
+ if (FAILEDTOALLOC(n)) {
+ addfield_free(next);
+ return NULL;
+ }
+ n->field = field;
+ n->len_field = strlen(field);
+ n->data = NULL;
+ n->len_data = 0;
+ n->next = next;
+ return n;
+}
+struct fieldtoadd *addfield_newn(const char *field, const char *data, size_t len, struct fieldtoadd *next) {
+ struct fieldtoadd *n;
+
+ n = NEW(struct fieldtoadd);
+ if (FAILEDTOALLOC(n)) {
+ addfield_free(next);
+ return NULL;
+ }
+ n->field = field;
+ n->len_field = strlen(field);
+ n->data = data;
+ n->len_data = len;
+ n->next = next;
+ return n;
+}
+void addfield_free(struct fieldtoadd *f) {
+ struct fieldtoadd *g;
+
+ while (f != NULL) {
+ g = f->next;
+ free(f);
+ f = g;
+ }
+}
+
+char *chunk_replacefield(const char *chunk, const char *fieldname, const char *data, bool maybemissing) {
+ struct fieldtoadd toadd;
+
+ toadd.field = fieldname;
+ toadd.len_field = strlen(fieldname);
+ toadd.data = data;
+ toadd.len_data = strlen(data);
+ toadd.next = NULL;
+ return chunk_replacefields(chunk, &toadd, fieldname, maybemissing);
+}
+
+/* Add field <firstfieldname> as first field with value data, and remove
+ * all other fields of that name (and of name alsoremove if that is != NULL), */
+
+char *chunk_normalize(const char *chunk, const char *firstfieldname, const char *data) {
+ const char *c, *ce;
+ char *newchunk, *n;
+ size_t size;
+ size_t data_len, field_len;
+
+ assert (chunk != NULL && firstfieldname != NULL && data != NULL);
+ data_len = strlen(data);
+ field_len = strlen(firstfieldname);
+ c = chunk;
+
+ /* calculate the maximal size we might end up with */
+ size = 2 + strlen(c) + 3 + data_len + field_len;
+
+ newchunk = n = malloc(size);
+ if (FAILEDTOALLOC(n))
+ return NULL;
+
+ memcpy(n, firstfieldname, field_len); n += field_len;
+ *(n++) = ':';
+ *(n++) = ' ';
+ memcpy(n, data, data_len); n += data_len;
+ *(n++) = '\n';
+ do {
+ bool toremove;
+
+ if (strncasecmp(c, firstfieldname, field_len) == 0
+ && c[field_len] == ':')
+ toremove = true;
+ else
+ toremove = false;
+ /* search the end of the field */
+ ce = c;
+ do {
+ while (*ce != '\n' && *ce != '\0')
+ ce++;
+ if (*ce == '\0')
+ break;
+ ce++;
+ } while (*ce == ' ' || *ce == '\t');
+
+ /* copy it, if it is not to be ignored */
+
+ if (!toremove && ce-c > 0) {
+ memcpy(n, c, ce-c);
+ n += ce-c;
+ }
+ /* and proceed with the next */
+ c = ce;
+ } while (*c != '\0' && *c != '\n');
+ if (n > newchunk && *(n-1) != '\n')
+ *(n++) = '\n';
+ *n = '\0';
+ return newchunk;
+}
+
+const char *chunk_getstart(const char *start, size_t len, bool commentsallowed) {
+ const char *s, *l;
+
+ s = start; l = start + len;
+ while (s < l && (*s == ' ' || *s == '\t' ||
+ *s == '\r' || *s =='\n'))
+ s++;
+ /* ignore leading comments (even full paragraphs of them) */
+ while (commentsallowed && s < l && *s == '#') {
+ while (s < l && *s != '\n')
+ s++;
+ while (s < l && (*s == ' ' || *s == '\t' ||
+ *s == '\r' ||
+ *s =='\n'))
+ s++;
+ }
+ return s;
+}
+
+const char *chunk_over(const char *e) {
+ while (*e != '\0') {
+ if (*(e++) == '\n') {
+ while (*e =='\r')
+ e++;
+ if (*e == '\n')
+ return e+1;
+ }
+ }
+ return e;
+}
+
+/* this is a bit wastefull, as with normally perfect formatted input, it just
+ * writes everything to itself in a inefficent way. But when there are \r
+ * in it or spaces before it or stuff like that, it will be in perfect
+ * form afterwards. */
+/* Write the first chunk found in the first len bytes after start
+ * to buffer and set next to the next data found after it.
+ * buffer can be a different buffer may be the buffer start is in
+ * (as long as start is bigger than buffer).
+ * buffer must be big enough to store up to len+1 bytes */
+size_t chunk_extract(char *buffer, const char *start, size_t len, bool commentsallowed, const char **next) {
+ const char *e, *n, *l;
+ char *p;
+
+ p = buffer;
+ l = start + len;
+ e = chunk_getstart(start, len, commentsallowed);
+ n = NULL;
+ while (e < l && *e != '\0') {
+ if (*e == '\r') {
+ e++;
+ } else if (*e == '\n') {
+ *(p++) = *(e++);
+ n = e;
+ while (n < l && *n =='\r')
+ n++;
+ if (n < l && *n == '\n')
+ break;
+ e = n;
+ n = NULL;
+ } else {
+ *(p++) = *(e++);
+ }
+ }
+
+ if (n == NULL) {
+ n = e;
+ assert (n == l || *n == '\0');
+ assert ((p - buffer) <= (n - start));
+ *p = '\0';
+ } else {
+ assert (n < l && *n == '\n');
+ n++;
+ assert (p - buffer < n - start);
+ *p = '\0';
+ while (n < l && (*n == '\n' || *n =='\r'))
+ n++;
+ }
+ *next = n;
+ return p - buffer;
+}
+
diff --git a/chunks.h b/chunks.h
new file mode 100644
index 0000000..3c26912
--- /dev/null
+++ b/chunks.h
@@ -0,0 +1,61 @@
+#ifndef REPREPRO_CHUNKS_H
+#define REPREPRO_CHUNKS_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+
+/* look for name in chunk. returns RET_NOTHING if not found */
+retvalue chunk_getvalue(const char *, const char *, /*@out@*/char **);
+retvalue chunk_getextralinelist(const char *, const char *, /*@out@*/struct strlist *);
+retvalue chunk_getwordlist(const char *, const char *, /*@out@*/struct strlist *);
+retvalue chunk_getuniqwordlist(const char *, const char *, /*@out@*/struct strlist *);
+retvalue chunk_getwholedata(const char *, const char *, /*@out@*/char **value);
+
+/* Parse a package/source-field: ' *value( ?\(version\))? *' */
+retvalue chunk_getname(const char *, const char *, /*@out@*/char **, bool /*allowversion*/);
+retvalue chunk_getnameandversion(const char *, const char *, /*@out@*/char **, /*@out@*/char **);
+
+/* return RET_OK, if field is found, RET_NOTHING, if not (or value indicates false) */
+retvalue chunk_gettruth(const char *, const char *);
+/* return RET_OK, if field is found, RET_NOTHING, if not */
+retvalue chunk_checkfield(const char *, const char *);
+
+/* modifications of a chunk: */
+struct fieldtoadd {
+ /*@null@*/struct fieldtoadd *next;
+ /* The name of the field: */
+ /*@dependent@*/const char *field;
+ /* The data to include: (if NULL, delete this field) */
+ /*@null@*//*@dependent@*/const char *data;
+ /* how many chars in them (the *exact* len to use
+ * , no \0 allowed within!), */
+ size_t len_field, len_data;
+};
+
+// TODO make this return retvalue..
+/* Add this the <fields to add> to <chunk> before <beforethis> field,
+ * replacing older fields of this name, if they are already there. */
+/*@null@*/ char *chunk_replacefields(const char *, const struct fieldtoadd *, const char * /*beforethis*/, bool /*maybemissing*/);
+/*@null@*/struct fieldtoadd *deletefield_new(/*@dependent@*/const char *, /*@only@*//*@null@*/struct fieldtoadd *);
+/*@null@*/struct fieldtoadd *aodfield_new(/*@dependent@*/const char *, /*@dependent@*//*@null@*/const char *, /*@only@*/struct fieldtoadd *);
+/*@null@*/struct fieldtoadd *addfield_new(/*@dependent@*/const char *, /*@dependent@*//*@null@*/const char *, /*@only@*/struct fieldtoadd *);
+/*@null@*/struct fieldtoadd *addfield_newn(/*@dependent@*/const char *, /*@dependent@*//*@null@*/const char *, size_t, /*@only@*/struct fieldtoadd *);
+void addfield_free(/*@only@*//*@null@*/struct fieldtoadd *);
+
+/* that is chunk_replacefields(chunk,{fieldname,strlen,data,strlen},fieldname); */
+/*@null@*/char *chunk_replacefield(const char *, const char *, const char *, bool /*maybemissing*/);
+
+/* make sure a given field is first and remove any later occurrences */
+/*@null@*/char *chunk_normalize(const char *, const char *, const char *);
+
+/* reformat control data, removing leading spaces and CRs */
+size_t chunk_extract(char * /*buffer*/, const char */*start*/, size_t, bool, /*@out@*/const char ** /*next*/);
+const char *chunk_getstart(const char *, size_t, bool /*commentsallowed*/);
+const char *chunk_over(const char *);
+
+#endif
diff --git a/configparser.c b/configparser.c
new file mode 100644
index 0000000..7528517
--- /dev/null
+++ b/configparser.c
@@ -0,0 +1,1532 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <dirent.h>
+
+#include "error.h"
+#include "names.h"
+#include "atoms.h"
+#include "filecntl.h"
+#include "configparser.h"
+
+struct configiterator {
+ FILE *f;
+ unsigned int startline, line, column, markerline, markercolumn;
+ const char *filename;
+ const char *chunkname;
+ bool eol;
+};
+
+const char *config_filename(const struct configiterator *iter) {
+ return iter->filename;
+}
+
+unsigned int config_line(const struct configiterator *iter) {
+ return iter->line;
+}
+
+unsigned int config_column(const struct configiterator *iter) {
+ return iter->column;
+}
+
+unsigned int config_firstline(const struct configiterator *iter) {
+ return iter->startline;
+}
+
+unsigned int config_markerline(const struct configiterator *iter) {
+ return iter->markerline;
+}
+
+unsigned int config_markercolumn(const struct configiterator *iter) {
+ return iter->markercolumn;
+}
+
+void config_overline(struct configiterator *iter) {
+ int c;
+
+ while (!iter->eol) {
+ c = fgetc(iter->f);
+ if (c == '#') {
+ do {
+ c = fgetc(iter->f);
+ } while (c != EOF && c != '\n');
+ }
+ if (c == EOF || c == '\n')
+ iter->eol = true;
+ else
+ iter->column++;
+ }
+}
+
+bool config_nextline(struct configiterator *iter) {
+ int c;
+
+ assert (iter->eol);
+ c = fgetc(iter->f);
+ while (c == '#') {
+ do {
+ c = fgetc(iter->f);
+ } while (c != EOF && c != '\n');
+ iter->line++;
+ c = fgetc(iter->f);
+ }
+ if (c == EOF)
+ return false;
+ if (c == ' ' || c == '\t') {
+ iter->line++;
+ iter->column = 1;
+ iter->eol = false;
+ return true;
+ }
+ (void)ungetc(c, iter->f);
+ return false;
+}
+
+retvalue linkedlistfinish(UNUSED(void *privdata), void *this, void **last, UNUSED(bool complete), UNUSED(struct configiterator *dummy3)) {
+ *last = this;
+ return RET_NOTHING;
+}
+
+static inline retvalue finishchunk(configfinishfunction finishfunc, void *privdata, struct configiterator *iter, const struct configfield *fields, size_t fieldcount, bool *found, void **this, void **last, bool complete) {
+ size_t i;
+ retvalue r;
+
+ if (complete)
+ for (i = 0 ; i < fieldcount ; i++) {
+ if (!fields[i].required)
+ continue;
+ if (found[i])
+ continue;
+ fprintf(stderr,
+"Error parsing config file %s, line %u:\n"
+"Required field '%s' not found in\n"
+"%s starting in line %u and ending in line %u.\n",
+ iter->filename, iter->line,
+ fields[i].name, iter->chunkname,
+ iter->startline, iter->line-1);
+ (void)finishfunc(privdata, *this, last, false, iter);
+ *this = NULL;
+ return RET_ERROR_MISSING;
+ }
+ r = finishfunc(privdata, *this, last, complete, iter);
+ *this = NULL;
+ return r;
+}
+
+char *configfile_expandname(const char *filename, char *fndup) {
+ const char *fromdir;
+ char *n;
+
+ assert (fndup == NULL || fndup == filename);
+
+ if (filename[0] == '/' || (filename[0] == '.' && filename[1] == '/'))
+ return fndup?fndup:strdup(filename);
+ if (filename[0] == '~' && filename[1] == '/') {
+ n = calc_dirconcat(getenv("HOME"), filename + 2);
+ free(fndup);
+ return n;
+ }
+ if (filename[0] != '+' || filename[1] == '\0' || filename[2] != '/') {
+ n = calc_dirconcat(global.confdir, filename);
+ free(fndup);
+ return n;
+ }
+ if (filename[1] == 'b') {
+ fromdir = global.basedir;
+ } else if (filename[1] == 'o') {
+ fromdir = global.outdir;
+ } else if (filename[1] == 'c') {
+ fromdir = global.confdir;
+ } else {
+ fprintf(stderr, "Warning: strange filename '%s'!\n",
+ filename);
+ return fndup?fndup:strdup(filename);
+ }
+ n = calc_dirconcat(fromdir, filename + 3);
+ free(fndup);
+ return n;
+}
+
+static retvalue configfile_parse_multi(/*@only@*/char *, bool, configinitfunction, configfinishfunction, const char *, const struct configfield *, size_t, void *, int, void **, struct strlist *);
+
+static retvalue configfile_parse_single(/*@only@*/char *filename, bool ignoreunknown, configinitfunction initfunc, configfinishfunction finishfunc, const char *chunkname, const struct configfield *fields, size_t fieldcount, void *privdata, int depth, void **last_p, struct strlist *filenames) {
+ bool found[fieldcount];
+ void *this = NULL;
+ char key[100];
+ size_t keylen;
+ int c, ret;
+ size_t i;
+ struct configiterator iter;
+ retvalue result, r;
+ bool afterinclude = false;
+
+ if (strlist_in(filenames, filename)) {
+ if (verbose >= 0) {
+ fprintf(stderr,
+"Ignoring subsequent inclusion of '%s'!\n", filename);
+ }
+ free(filename);
+ return RET_NOTHING;
+ }
+ iter.filename = filename;
+ r = strlist_add(filenames, filename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ iter.chunkname = chunkname;
+ iter.line = 0;
+ iter.column = 0;
+
+ iter.f = fopen(iter.filename, "r");
+ if (iter.f == NULL) {
+ int e = errno;
+ fprintf(stderr, "Error opening config file '%s': %s(%d)\n",
+ iter.filename, strerror(e), e);
+ return RET_ERRNO(e);
+ }
+ result = RET_NOTHING;
+ do {
+ iter.line++;
+ iter.column = 1;
+
+ c = fgetc(iter.f);
+ while (c == '#') {
+ do {
+ c = fgetc(iter.f);
+ } while (c != EOF && c != '\n');
+ iter.line++;
+ c = fgetc(iter.f);
+ }
+ if (c == '\r') {
+ do {
+ c = fgetc(iter.f);
+ } while (c == '\r');
+ if (c != EOF && c != '\n') {
+ fprintf(stderr,
+"%s:%u: error parsing configuration file: CR without following LF!\n",
+ iter.filename, iter.line);
+ result = RET_ERROR;
+ break;
+ }
+ }
+ if (c == EOF)
+ break;
+ if (c == '\n') {
+ afterinclude = false;
+ /* Ignore multiple emptye lines */
+ if (this == NULL)
+ continue;
+ /* finish this chunk, to get ready for the next: */
+ r = finishchunk(finishfunc, privdata, &iter,
+ fields, fieldcount, found,
+ &this, last_p, true);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ continue;
+ }
+ if (afterinclude) {
+ fprintf(stderr,
+"Warning parsing %s, line %u: no empty line after '!include'-sequence"
+" might cause ambiguity in the future!\n",
+ iter.filename, iter.line);
+ afterinclude = false;
+ }
+ if (c == '!') {
+ keylen = 0;
+ while ((c = fgetc(iter.f)) != EOF && c >= 'a' && c <= 'z') {
+ iter.column++;
+ key[keylen++] = c;
+ if (keylen >= 10)
+ break;
+ }
+ if (c != ':') {
+ fprintf(stderr,
+"Error parsing %s, line %u: invalid !-sequence!\n",
+ iter.filename, iter.line);
+ result = RET_ERROR;
+ break;
+ }
+ iter.column++;
+ if (keylen == 7 && memcmp(key, "include", 7) == 0) {
+ char *filetoinclude;
+
+ if (this != NULL) {
+ fprintf(stderr,
+"Error parsing %s, line %u: '!include' statement within unterminated %s!\n"
+"(perhaps you forgot to put an empty line before this)\n",
+ iter.filename, iter.line,
+ chunkname);
+ result = RET_ERROR;
+ break;
+ }
+ if (depth > 20) {
+ fprintf(stderr,
+"Error parsing %s, line %u: too many nested '!include' statements!\n",
+ iter.filename, iter.line);
+ result = RET_ERROR;
+ break;
+ }
+ r = config_getonlyword(&iter, "!include",
+ NULL, &filetoinclude);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ filetoinclude = configfile_expandname(
+ filetoinclude, filetoinclude);
+ r = configfile_parse_multi(filetoinclude,
+ ignoreunknown,
+ initfunc, finishfunc,
+ chunkname,
+ fields, fieldcount,
+ privdata, depth + 1,
+ last_p, filenames);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ afterinclude = true;
+ } else {
+ key[keylen] = '\0';
+ fprintf(stderr,
+"Error parsing %s, line %u: unknown !-sequence '%s'!\n",
+ iter.filename, iter.line, key);
+ result = RET_ERROR;
+ break;
+ }
+ /* ignore all data left of this field */
+ do {
+ config_overline(&iter);
+ } while (config_nextline(&iter));
+ continue;
+ }
+ if (c == '\0') {
+ fprintf(stderr,
+"Error parsing %s, line %u: \\000 character not allowed in config files!\n",
+ iter.filename, iter.line);
+ result = RET_ERROR;
+ break;
+ }
+ if (c == ' ' || c == '\t') {
+ fprintf(stderr,
+"Error parsing %s, line %u: unexpected white space before keyword!\n",
+ iter.filename, iter.line);
+ result = RET_ERROR;
+ break;
+ }
+ key[0] = c;
+ keylen = 1;
+
+ while ((c = fgetc(iter.f)) != EOF && c != ':' && c != '\n'
+ && c != '#' && c != '\0') {
+ iter.column++;
+ if (c == ' ') {
+ fprintf(stderr,
+"Error parsing %s, line %u: Unexpected space in header name!\n",
+ iter.filename, iter.line);
+ result = RET_ERROR;
+ break;
+ }
+ if (c == '\t') {
+ fprintf(stderr,
+"Error parsing %s, line %u: Unexpected tabulator character in header name!\n",
+ iter.filename, iter.line);
+ result = RET_ERROR;
+ break;
+ }
+ key[keylen++] = c;
+ if (keylen >= 100)
+ break;
+ }
+ if (c != ':') {
+ if (c != ' ' && c != '\t')
+ /* newline or end-of-file */
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: Colon expected!\n",
+ iter.filename, iter.line, iter.column);
+ result = RET_ERROR;
+ break;
+ }
+ if (this == NULL) {
+ /* new chunk, initialize everything */
+ r = initfunc(privdata, *last_p, &this);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ assert (this != NULL);
+ iter.startline = iter.line;
+ memset(found, 0, sizeof(found));
+ }
+ for (i = 0 ; i < fieldcount ; i++) {
+ if (keylen != fields[i].namelen)
+ continue;
+ if (strncasecmp(key, fields[i].name, keylen) != 0)
+ continue;
+ break;
+ }
+ if (i >= fieldcount) {
+ key[keylen] = '\0';
+ if (!ignoreunknown) {
+ fprintf(stderr,
+"Error parsing %s, line %u: Unknown header '%s'!\n",
+ iter.filename, iter.line, key);
+ result = RET_ERROR_UNKNOWNFIELD;
+ break;
+ }
+ if (verbose >= 0)
+ fprintf(stderr,
+"Warning parsing %s, line %u: Unknown header '%s'!\n",
+ iter.filename, iter.line, key);
+ } else if (found[i]) {
+ fprintf(stderr,
+"Error parsing %s, line %u: Second appearance of '%s' in the same chunk!\n",
+ iter.filename, iter.line, fields[i].name);
+ result = RET_ERROR;
+ break;
+ } else
+ found[i] = true;
+ do {
+ c = fgetc(iter.f);
+ iter.column++;
+ } while (c == ' ' || c == '\t');
+ (void)ungetc(c, iter.f);
+
+ iter.eol = false;
+ if (i < fieldcount) {
+ r = fields[i].setfunc(privdata, fields[i].name, this, &iter);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ /* ignore all data left of this field */
+ do {
+ config_overline(&iter);
+ } while (config_nextline(&iter));
+ } while (true);
+ if (this != NULL) {
+ r = finishchunk(finishfunc, privdata, &iter,
+ fields, fieldcount, found,
+ &this, last_p,
+ !RET_WAS_ERROR(result));
+ RET_UPDATE(result, r);
+ }
+ if (ferror(iter.f) != 0) {
+ int e = errno;
+ fprintf(stderr, "Error reading config file '%s': %s(%d)\n",
+ iter.filename, strerror(e), e);
+ r = RET_ERRNO(e);
+ RET_UPDATE(result, r);
+ }
+ ret = fclose(iter.f);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr, "Error closing config file '%s': %s(%d)\n",
+ iter.filename, strerror(e), e);
+ r = RET_ERRNO(e);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+static retvalue configfile_parse_multi(/*@only@*/char *fullfilename, bool ignoreunknown, configinitfunction initfunc, configfinishfunction finishfunc, const char *chunkname, const struct configfield *fields, size_t fieldcount, void *privdata, int depth, void **last_p, struct strlist *filenames) {
+ retvalue result = RET_NOTHING, r;
+
+ if (isdirectory(fullfilename)) {
+ DIR *dir;
+ struct dirent *de;
+ int e;
+ char *subfilename;
+
+ dir = opendir(fullfilename);
+ if (dir == NULL) {
+ e = errno;
+ fprintf(stderr,
+"Error %d opening directory '%s': %s\n",
+ e, fullfilename, strerror(e));
+ free(fullfilename);
+ return RET_ERRNO(e);
+ }
+ while ((errno = 0, de = readdir(dir)) != NULL) {
+ size_t l;
+ if (de->d_type != DT_REG && de->d_type != DT_LNK
+ && de->d_type != DT_UNKNOWN)
+ continue;
+ if (de->d_name[0] == '.')
+ continue;
+ l = strlen(de->d_name);
+ if (l < 5 || strcmp(de->d_name + l - 5, ".conf") != 0)
+ continue;
+ subfilename = calc_dirconcat(fullfilename, de->d_name);
+ if (FAILEDTOALLOC(subfilename)) {
+ (void)closedir(dir);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+ r = configfile_parse_single(subfilename, ignoreunknown,
+ initfunc, finishfunc,
+ chunkname, fields, fieldcount, privdata,
+ depth, last_p, filenames);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r)) {
+ (void)closedir(dir);
+ free(fullfilename);
+ return r;
+ }
+ }
+ e = errno;
+ if (e != 0) {
+ (void)closedir(dir);
+ fprintf(stderr,
+"Error %d reading directory '%s': %s\n",
+ e, fullfilename, strerror(e));
+ free(fullfilename);
+ return RET_ERRNO(e);
+ }
+ if (closedir(dir) != 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d closing directory '%s': %s\n",
+ e, fullfilename, strerror(e));
+ free(fullfilename);
+ return RET_ERRNO(e);
+ }
+ free(fullfilename);
+ } else {
+ r = configfile_parse_single(fullfilename, ignoreunknown,
+ initfunc, finishfunc,
+ chunkname, fields, fieldcount, privdata,
+ depth, last_p, filenames);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+retvalue configfile_parse(const char *filename, bool ignoreunknown, configinitfunction initfunc, configfinishfunction finishfunc, const char *chunkname, const struct configfield *fields, size_t fieldcount, void *privdata) {
+ struct strlist filenames;
+ void *last = NULL;
+ retvalue r;
+ char *fullfilename;
+
+ fullfilename = configfile_expandname(filename, NULL);
+ if (fullfilename == NULL)
+ return RET_ERROR_OOM;
+
+ strlist_init(&filenames);
+
+ r = configfile_parse_multi(fullfilename, ignoreunknown,
+ initfunc, finishfunc,
+ chunkname, fields, fieldcount, privdata,
+ 0, &last, &filenames);
+
+ /* only free filenames last, as they might still be
+ * referenced while running */
+ strlist_done(&filenames);
+ return r;
+}
+
+static inline int config_nextchar(struct configiterator *iter) {
+ int c;
+ unsigned int realcolumn;
+
+ c = fgetc(iter->f);
+ realcolumn = iter->column + 1;
+ if (c == '#') {
+ do {
+ c = fgetc(iter->f);
+ realcolumn++;
+ } while (c != '\n' && c != EOF && c != '\r');
+ }
+ if (c == '\r') {
+ while (c == '\r') {
+ realcolumn++;
+ c = fgetc(iter->f);
+ }
+ if (c != '\n' && c != EOF) {
+ fprintf(stderr,
+"Warning parsing config file '%s', line '%u', column %u: CR not followed by LF!\n",
+ config_filename(iter),
+ config_line(iter),
+ realcolumn);
+
+ }
+ }
+ if (c == EOF) {
+ fprintf(stderr,
+"Warning parsing config file '%s', line '%u': File ending without final LF!\n",
+ config_filename(iter),
+ config_line(iter));
+ /* fake a proper text file: */
+ c = '\n';
+ }
+ iter->column++;
+ if (c == '\n')
+ iter->eol = true;
+ return c;
+}
+
+static inline int config_nextnonspace(struct configiterator *iter) {
+ int c;
+
+ do {
+ iter->markerline = iter->line;
+ iter->markercolumn = iter->column;
+ if (iter->eol) {
+ if (!config_nextline(iter))
+ return EOF;
+ }
+ c = config_nextchar(iter);
+ } while (c == '\n' || c == ' ' || c == '\t');
+ return c;
+}
+
+int config_nextnonspaceinline(struct configiterator *iter) {
+ int c;
+
+ do {
+ iter->markerline = iter->line;
+ iter->markercolumn = iter->column;
+ if (iter->eol)
+ return EOF;
+ c = config_nextchar(iter);
+ if (c == '\n')
+ return EOF;
+ } while (c == '\r' || c == ' ' || c == '\t');
+ return c;
+}
+
+#define configparser_errorlast(iter, message, ...) \
+ fprintf(stderr, "Error parsing %s, line %u, column %u: " message "\n", \
+ iter->filename, iter->markerline, \
+ iter->markercolumn, ## __VA_ARGS__);
+#define configparser_error(iter, message, ...) \
+ fprintf(stderr, "Error parsing %s, line %u, column %u: " message "\n", \
+ iter->filename, iter->line, \
+ iter->column, ## __VA_ARGS__);
+
+retvalue config_completeword(struct configiterator *iter, char firstc, char **result_p) {
+ size_t size = 0, len = 0;
+ char *value = NULL, *nv;
+ int c = firstc;
+
+ iter->markerline = iter->line;
+ iter->markercolumn = iter->column;
+ do {
+ if (len + 2 >= size) {
+ nv = realloc(value, size+128);
+ if (FAILEDTOALLOC(nv)) {
+ free(value);
+ return RET_ERROR_OOM;
+ }
+ size += 128;
+ value = nv;
+ }
+ value[len] = c;
+ len++;
+ c = config_nextchar(iter);
+ if (c == '\n')
+ break;
+ } while (c != ' ' && c != '\t');
+ assert (len > 0);
+ assert (len < size);
+ value[len] = '\0';
+ nv = realloc(value, len+1);
+ if (nv == NULL)
+ *result_p = value;
+ else
+ *result_p = nv;
+ return RET_OK;
+}
+
+retvalue config_getwordinline(struct configiterator *iter, char **result_p) {
+ int c;
+
+ c = config_nextnonspaceinline(iter);
+ if (c == EOF)
+ return RET_NOTHING;
+ return config_completeword(iter, c, result_p);
+}
+
+retvalue config_getword(struct configiterator *iter, char **result_p) {
+ int c;
+
+ c = config_nextnonspace(iter);
+ if (c == EOF)
+ return RET_NOTHING;
+ return config_completeword(iter, c, result_p);
+}
+
+retvalue config_gettimespan(struct configiterator *iter, const char *header, unsigned long *time_p) {
+ long long currentnumber, currentsum = 0;
+ bool empty = true;
+ int c;
+
+ do {
+ c = config_nextnonspace(iter);
+ if (c == EOF) {
+ if (empty) {
+ configparser_errorlast(iter,
+"Unexpected end of %s header (value expected).", header);
+ return RET_ERROR;
+ }
+ *time_p = currentsum;
+ return RET_OK;
+ }
+ iter->markerline = iter->line;
+ iter->markercolumn = iter->column;
+ currentnumber = 0;
+ if (c < '0' || c > '9') {
+ configparser_errorlast(iter,
+"Unexpected character '%c' where a digit was expected in %s header.",
+ (char)c, header);
+ return RET_ERROR;
+ }
+ empty = false;
+ do {
+ if (currentnumber > 3660) {
+ configparser_errorlast(iter,
+"Absurdly long time span (> 100 years) in %s header.", header);
+ return RET_ERROR;
+ }
+ currentnumber *= 10;
+ currentnumber += (c - '0');
+ c = config_nextchar(iter);
+ } while (c >= '0' && c <= '9');
+ if (c == ' ' || c == '\t' || c == '\n')
+ c = config_nextnonspace(iter);
+ if (c == 'y') {
+ if (currentnumber > 100) {
+ configparser_errorlast(iter,
+"Absurdly long time span (> 100 years) in %s header.", header);
+ return RET_ERROR;
+ }
+ currentnumber *= 365*24*60*60;
+ } else if (c == 'm') {
+ if (currentnumber > 1200) {
+ configparser_errorlast(iter,
+"Absurdly long time span (> 100 years) in %s header.", header);
+ return RET_ERROR;
+ }
+ currentnumber *= 31*24*60*60;
+ } else if (c == 'd') {
+ if (currentnumber > 36600) {
+ configparser_errorlast(iter,
+"Absurdly long time span (> 100 years) in %s header.", header);
+ return RET_ERROR;
+ }
+ currentnumber *= 24*60*60;
+ } else {
+ if (currentnumber > 36600) {
+ configparser_errorlast(iter,
+"Absurdly long time span (> 100 years) in %s header.", header);
+ return RET_ERROR;
+ }
+ currentnumber *= 24*60*60;
+ if (c != EOF) {
+ configparser_errorlast(iter,
+"Unexpected character '%c' where a 'd','m' or 'y' was expected in %s header.",
+ (char)c, header);
+ return RET_ERROR;
+ }
+ }
+ currentsum += currentnumber;
+ } while (true);
+}
+
+retvalue config_getonlyword(struct configiterator *iter, const char *header, checkfunc check, char **result_p) {
+ char *value;
+ retvalue r;
+
+ r = config_getword(iter, &value);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected end of %s header (value expected).", header);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (config_nextnonspace(iter) != EOF) {
+ configparser_error(iter,
+"End of %s header expected (but trailing garbage).", header);
+ free(value);
+ return RET_ERROR;
+ }
+ if (check != NULL) {
+ const char *errormessage = check(value);
+ if (errormessage != NULL) {
+ configparser_errorlast(iter,
+"Malformed %s content '%s': %s", header, value, errormessage);
+ free(value);
+ checkerror_free(errormessage);
+ return RET_ERROR;
+ }
+ }
+ *result_p = value;
+ return RET_OK;
+}
+
+retvalue config_getscript(struct configiterator *iter, const char *name, char **value_p) {
+ char *value;
+ retvalue r;
+
+ r = config_getonlyword(iter, name, NULL, &value);
+ if (RET_IS_OK(r)) {
+ assert (value != NULL && value[0] != '\0');
+ value = configfile_expandname(value, value);
+ if (FAILEDTOALLOC(value))
+ return RET_ERROR_OOM;
+ *value_p = value;
+ }
+ return r;
+}
+
+retvalue config_geturl(struct configiterator *iter, const char *header, char **result_p) {
+ char *value, *p;
+ retvalue r;
+ size_t l;
+
+
+ r = config_getword(iter, &value);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected end of %s header (value expected).", header);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ // TODO: think about allowing (escaped) spaces...
+ if (config_nextnonspace(iter) != EOF) {
+ configparser_error(iter,
+"End of %s header expected (but trailing garbage).", header);
+ free(value);
+ return RET_ERROR;
+ }
+ p = value;
+ while (*p != '\0' && (*p == '_' || *p == '-' || *p == '+' ||
+ (*p>='a' && *p<='z') || (*p>='A' && *p<='Z') ||
+ (*p>='0' && *p<='9'))) {
+ p++;
+ }
+ if (*p != ':') {
+ configparser_errorlast(iter,
+"Malformed %s field: no colon (must be method:path).", header);
+ free(value);
+ return RET_ERROR;
+ }
+ if (p == value) {
+ configparser_errorlast(iter,
+"Malformed %s field: transport method name expected (colon is not allowed to be the first character)!", header);
+ free(value);
+ return RET_ERROR;
+ }
+ p++;
+ l = strlen(p);
+ /* remove one leading slash, as we always add one and some apt-methods
+ * are confused with //. (end with // if you really want it) */
+ if (l > 0 && p[l - 1] == '/')
+ p[l - 1] = '\0';
+ *result_p = value;
+ return RET_OK;
+}
+
+retvalue config_getuniqwords(struct configiterator *iter, const char *header, checkfunc check, struct strlist *result_p) {
+ char *value;
+ retvalue r;
+ struct strlist data;
+ const char *errormessage;
+
+ strlist_init(&data);
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data);
+ return r;
+ }
+ if (strlist_in(&data, value)) {
+ configparser_errorlast(iter,
+"Unexpected duplicate '%s' within %s header.", value, header);
+ free(value);
+ strlist_done(&data);
+ return RET_ERROR;
+ } else if (check != NULL && (errormessage = check(value)) != NULL) {
+ configparser_errorlast(iter,
+"Malformed %s element '%s': %s", header, value, errormessage);
+ checkerror_free(errormessage);
+ free(value);
+ strlist_done(&data);
+ return RET_ERROR;
+ } else {
+ r = strlist_add(&data, value);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data);
+ return r;
+ }
+ }
+ }
+ strlist_move(result_p, &data);
+ return RET_OK;
+}
+
+retvalue config_getinternatomlist(struct configiterator *iter, const char *header, enum atom_type type, checkfunc check, struct atomlist *result_p) {
+ char *value;
+ retvalue r;
+ struct atomlist data;
+ const char *errormessage;
+ atom_t atom;
+
+ atomlist_init(&data);
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data);
+ return r;
+ }
+ if (check != NULL && (errormessage = check(value)) != NULL) {
+ configparser_errorlast(iter,
+"Malformed %s element '%s': %s", header, value, errormessage);
+ checkerror_free(errormessage);
+ free(value);
+ atomlist_done(&data);
+ return RET_ERROR;
+ }
+ r = atom_intern(type, value, &atom);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = atomlist_add_uniq(&data, atom);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected duplicate '%s' within %s header.", value, header);
+ free(value);
+ atomlist_done(&data);
+ return RET_ERROR;
+ }
+ free(value);
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data);
+ return r;
+ }
+ }
+ atomlist_move(result_p, &data);
+ return RET_OK;
+}
+
+retvalue config_getatom(struct configiterator *iter, const char *header, enum atom_type type, atom_t *result_p) {
+ char *value;
+ retvalue r;
+ atom_t atom;
+
+ r = config_getword(iter, &value);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected empty '%s' field.", header);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ atom = atom_find(type, value);
+ if (!atom_defined(atom)) {
+ configparser_errorlast(iter,
+"Not previously seen %s '%s' within '%s' field.", atomtypes[type], value, header);
+ free(value);
+ return RET_ERROR;
+ }
+ *result_p = atom;
+ free(value);
+ return RET_OK;
+}
+
+retvalue config_getatomlist(struct configiterator *iter, const char *header, enum atom_type type, struct atomlist *result_p) {
+ char *value;
+ retvalue r;
+ struct atomlist data;
+ atom_t atom;
+
+ atomlist_init(&data);
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data);
+ return r;
+ }
+ atom = atom_find(type, value);
+ if (!atom_defined(atom)) {
+ configparser_errorlast(iter,
+"Not previously seen %s '%s' within '%s' header.", atomtypes[type], value, header);
+ free(value);
+ atomlist_done(&data);
+ return RET_ERROR;
+ }
+ r = atomlist_add_uniq(&data, atom);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected duplicate '%s' within %s header.", value, header);
+ free(value);
+ atomlist_done(&data);
+ return RET_ERROR;
+ }
+ free(value);
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data);
+ return r;
+ }
+ }
+ atomlist_move(result_p, &data);
+ return RET_OK;
+}
+
+retvalue config_getsplitatoms(struct configiterator *iter, const char *header, enum atom_type type, struct atomlist *from_p, struct atomlist *into_p) {
+ char *value, *separator;
+ atom_t origin, destination;
+ retvalue r;
+ struct atomlist data_from, data_into;
+
+ atomlist_init(&data_from);
+ atomlist_init(&data_into);
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data_from);
+ atomlist_done(&data_into);
+ return r;
+ }
+ separator = strchr(value, '>');
+ if (separator == NULL) {
+ separator = value;
+ destination = atom_find(type, value);
+ origin = destination;;
+ } else if (separator == value) {
+ destination = atom_find(type, separator + 1);
+ origin = destination;;
+ } else if (separator[1] == '\0') {
+ *separator = '\0';
+ separator = value;
+ destination = atom_find(type, value);
+ origin = destination;;
+ } else {
+ *separator = '\0';
+ separator++;
+ origin = atom_find(type, value);
+ destination = atom_find(type, separator);
+ }
+ if (!atom_defined(origin)) {
+ configparser_errorlast(iter,
+"Unknown %s '%s' in %s.", atomtypes[type], value, header);
+ free(value);
+ atomlist_done(&data_from);
+ atomlist_done(&data_into);
+ return RET_ERROR;
+ }
+ if (!atom_defined(destination)) {
+ configparser_errorlast(iter,
+"Unknown %s '%s' in %s.", atomtypes[type], separator, header);
+ free(value);
+ atomlist_done(&data_from);
+ atomlist_done(&data_into);
+ return RET_ERROR;
+ }
+ free(value);
+ r = atomlist_add(&data_from, origin);
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data_from);
+ atomlist_done(&data_into);
+ return r;
+ }
+ r = atomlist_add(&data_into, destination);
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data_from);
+ atomlist_done(&data_into);
+ return r;
+ }
+ }
+ atomlist_move(from_p, &data_from);
+ atomlist_move(into_p, &data_into);
+ return RET_OK;
+}
+
+retvalue config_getatomsublist(struct configiterator *iter, const char *header, enum atom_type type, struct atomlist *result_p, const struct atomlist *superset, const char *superset_header) {
+ char *value;
+ retvalue r;
+ struct atomlist data;
+ atom_t atom;
+
+ atomlist_init(&data);
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data);
+ return r;
+ }
+ atom = atom_find(type, value);
+ if (!atom_defined(atom) || !atomlist_in(superset, atom)) {
+ configparser_errorlast(iter,
+"'%s' not allowed in %s as it was not in %s.", value, header, superset_header);
+ free(value);
+ atomlist_done(&data);
+ return RET_ERROR;
+ }
+ r = atomlist_add_uniq(&data, atom);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected duplicate '%s' within %s header.", value, header);
+ free(value);
+ atomlist_done(&data);
+ return RET_ERROR;
+ }
+ free(value);
+ if (RET_WAS_ERROR(r)) {
+ atomlist_done(&data);
+ return r;
+ }
+ }
+ atomlist_move(result_p, &data);
+ return RET_OK;
+}
+
+retvalue config_getwords(struct configiterator *iter, struct strlist *result_p) {
+ char *value;
+ retvalue r;
+ struct strlist data;
+
+ strlist_init(&data);
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data);
+ return r;
+ }
+ r = strlist_add(&data, value);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data);
+ return r;
+ }
+ }
+ strlist_move(result_p, &data);
+ return RET_OK;
+}
+
+retvalue config_getsignwith(struct configiterator *iter, const char *name, struct strlist *result_p) {
+ char *value;
+ retvalue r;
+ struct strlist data;
+ int c;
+
+ strlist_init(&data);
+
+ c = config_nextnonspace(iter);
+ if (c == EOF) {
+ configparser_errorlast(iter,
+"Missing value for %s field.", name);
+ return RET_ERROR;
+ }
+ /* if the first character is a '!', a script to start follows */
+ if (c == '!') {
+ const char *type = "!";
+
+ iter->markerline = iter->line;
+ iter->markercolumn = iter->column;
+ c = config_nextchar(iter);
+ if (c == '-') {
+ configparser_errorlast(iter,
+"'!-' in signwith lines reserved for future usage!\n");
+ return RET_ERROR;
+ type = "!-";
+ c = config_nextnonspace(iter);
+ } else if (c == '\n' || c == ' ' || c == '\t')
+ c = config_nextnonspace(iter);
+ if (c == EOF) {
+ configparser_errorlast(iter,
+"Missing value for %s field.", name);
+ return RET_ERROR;
+ }
+ r = config_completeword(iter, c, &value);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (config_nextnonspace(iter) != EOF) {
+ configparser_error(iter,
+"End of %s header expected (but trailing garbage).", name);
+ free(value);
+ return RET_ERROR;
+ }
+ assert (value != NULL && value[0] != '\0');
+ value = configfile_expandname(value, value);
+ if (FAILEDTOALLOC(value))
+ return RET_ERROR_OOM;
+ r = strlist_add_dup(&data, type);
+ if (RET_WAS_ERROR(r)) {
+ free(value);
+ return r;
+ }
+ r = strlist_add(&data, value);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data);
+ return r;
+ }
+ strlist_move(result_p, &data);
+ return RET_OK;
+ }
+ /* otherwise each word is stored in the strlist */
+ r = config_completeword(iter, c, &value);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = strlist_add(&data, value);
+ if (RET_WAS_ERROR(r))
+ return r;
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data);
+ return r;
+ }
+ r = strlist_add(&data, value);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data);
+ return r;
+ }
+ }
+ strlist_move(result_p, &data);
+ return RET_OK;
+}
+
+retvalue config_getsplitwords(struct configiterator *iter, UNUSED(const char *header), struct strlist *from_p, struct strlist *into_p) {
+ char *value, *origin, *destination, *separator;
+ retvalue r;
+ struct strlist data_from, data_into;
+
+ strlist_init(&data_from);
+ strlist_init(&data_into);
+ while ((r = config_getword(iter, &value)) != RET_NOTHING) {
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data_from);
+ strlist_done(&data_into);
+ return r;
+ }
+ separator = strchr(value, '>');
+ if (separator == NULL) {
+ destination = strdup(value);
+ origin = value;
+ } else if (separator == value) {
+ destination = strdup(separator+1);
+ origin = strdup(separator+1);
+ free(value);
+ } else if (separator[1] == '\0') {
+ *separator = '\0';
+ destination = strdup(value);
+ origin = value;
+ } else {
+ origin = strndup(value, separator-value);
+ destination = strdup(separator+1);
+ free(value);
+ }
+ if (FAILEDTOALLOC(origin) || FAILEDTOALLOC(destination)) {
+ free(origin); free(destination);
+ strlist_done(&data_from);
+ strlist_done(&data_into);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_add(&data_from, origin);
+ if (RET_WAS_ERROR(r)) {
+ free(destination);
+ strlist_done(&data_from);
+ strlist_done(&data_into);
+ return r;
+ }
+ r = strlist_add(&data_into, destination);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&data_from);
+ strlist_done(&data_into);
+ return r;
+ }
+ }
+ strlist_move(from_p, &data_from);
+ strlist_move(into_p, &data_into);
+ return RET_OK;
+}
+
+retvalue config_getconstant(struct configiterator *iter, const struct constant *constants, int *result_p) {
+ retvalue r;
+ char *value;
+ const struct constant *c;
+
+ /* that could be done more in-situ,
+ * but is not runtime-critical at all */
+
+ r = config_getword(iter, &value);
+ if (r == RET_NOTHING)
+ return r;
+ if (RET_WAS_ERROR(r))
+ return r;
+ for (c = constants ; c->name != NULL ; c++) {
+ if (strcmp(c->name, value) == 0) {
+ free(value);
+ *result_p = c->value;
+ return RET_OK;
+ }
+ }
+ free(value);
+ return RET_ERROR_UNKNOWNFIELD;
+}
+
+retvalue config_getflags(struct configiterator *iter, const char *header, const struct constant *constants, bool *flags, bool ignoreunknown, const char *msg) {
+ retvalue r, result = RET_NOTHING;
+ int option = -1;
+
+ while (true) {
+ r = config_getconstant(iter, constants, &option);
+ if (r == RET_NOTHING)
+ break;
+ if (r == RET_ERROR_UNKNOWNFIELD) {
+// TODO: would be nice to have the wrong flag here to put it in the error message:
+ if (ignoreunknown) {
+ fprintf(stderr,
+"Warning: ignored error parsing config file %s, line %u, column %u:\n"
+"Unknown flag in %s header.%s\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ header, msg);
+ continue;
+ }
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Unknown flag in %s header.%s\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ header, msg);
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (option >= 0);
+ flags[option] = true;
+ result = RET_OK;
+ option = -1;
+ }
+ return result;
+}
+
+retvalue config_getall(struct configiterator *iter, char **result_p) {
+ size_t size = 0, len = 0;
+ char *value = NULL, *nv;
+ int c;
+
+ c = config_nextnonspace(iter);
+ if (c == EOF)
+ return RET_NOTHING;
+ iter->markerline = iter->line;
+ iter->markercolumn = iter->column;
+ do {
+ if (len + 2 >= size) {
+ nv = realloc(value, size+128);
+ if (FAILEDTOALLOC(nv)) {
+ free(value);
+ return RET_ERROR_OOM;
+ }
+ size += 128;
+ value = nv;
+ }
+ value[len] = c;
+ len++;
+ if (iter->eol) {
+ if (!config_nextline(iter))
+ break;
+ }
+ c = config_nextchar(iter);
+ } while (true);
+ assert (len > 0);
+ assert (len < size);
+ while (len > 0 && (value[len-1] == ' ' || value[len-1] == '\t' ||
+ value[len-1] == '\n' || value[len-1] == '\r'))
+ len--;
+ value[len] = '\0';
+ nv = realloc(value, len+1);
+ if (nv == NULL)
+ *result_p = value;
+ else
+ *result_p = nv;
+ return RET_OK;
+}
+
+retvalue config_gettruth(struct configiterator *iter, const char *header, bool *result_p) {
+ char *value = NULL;
+ retvalue r;
+
+ /* wastefull, but does not happen that often */
+
+ r = config_getword(iter, &value);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected empty boolean %s header (something like Yes or No expected).", header);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ // TODO: check against trailing garbage
+ if (strcasecmp(value, "Yes") == 0) {
+ *result_p = true;
+ free(value);
+ return RET_OK;
+ }
+ if (strcasecmp(value, "No") == 0) {
+ *result_p = false;
+ free(value);
+ return RET_OK;
+ }
+ if (strcmp(value, "1") == 0) {
+ *result_p = true;
+ free(value);
+ return RET_OK;
+ }
+ if (strcmp(value, "0") == 0) {
+ *result_p = false;
+ free(value);
+ return RET_OK;
+ }
+ configparser_errorlast(iter,
+"Unexpected value in boolean %s header (something like Yes or No expected).", header);
+ free(value);
+ return RET_ERROR;
+}
+
+retvalue config_getnumber(struct configiterator *iter, const char *name, long long *result_p, long long minval, long long maxval) {
+ char *word = NULL;
+ retvalue r;
+ long long value;
+ char *e;
+
+ r = config_getword(iter, &word);
+ if (r == RET_NOTHING) {
+ configparser_errorlast(iter,
+"Unexpected end of line (%s number expected).", name);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ value = strtoll(word, &e, 10);
+ if (e == word) {
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Expected %s number but got '%s'\n",
+ config_filename(iter), config_markerline(iter),
+ config_markercolumn(iter), name, word);
+ free(word);
+ return RET_ERROR;
+ }
+ if (e != NULL && *e != '\0') {
+ unsigned char digit1, digit2, digit3;
+ digit1 = ((unsigned char)(*e))&0x7;
+ digit2 = (((unsigned char)(*e)) >> 3)&0x7;
+ digit3 = (((unsigned char)(*e)) >> 6)&0x7;
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Unexpected character \\%01hhu%01hhu%01hhu in %s number '%s'\n",
+ config_filename(iter), config_markerline(iter),
+ config_markercolumn(iter) + (int)(e-word),
+ digit3, digit2, digit1,
+ name, word);
+ free(word);
+ return RET_ERROR;
+ }
+ if (value == LLONG_MAX || value > maxval) {
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Too large %s number '%s'\n",
+ config_filename(iter), config_markerline(iter),
+ config_markercolumn(iter), name, word);
+ free(word);
+ return RET_ERROR;
+ }
+ if (value == LLONG_MIN || value < minval) {
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Too small %s number '%s'\n",
+ config_filename(iter), config_markerline(iter),
+ config_markercolumn(iter), name, word);
+ free(word);
+ return RET_ERROR;
+ }
+ free(word);
+ *result_p = value;
+ return RET_OK;
+}
+
+static retvalue config_getline(struct configiterator *iter, /*@out@*/char **result_p) {
+ size_t size = 0, len = 0;
+ char *value = NULL, *nv;
+ int c;
+
+ c = config_nextnonspace(iter);
+ if (c == EOF)
+ return RET_NOTHING;
+ iter->markerline = iter->line;
+ iter->markercolumn = iter->column;
+ do {
+ if (len + 2 >= size) {
+ nv = realloc(value, size+128);
+ if (FAILEDTOALLOC(nv)) {
+ free(value);
+ return RET_ERROR_OOM;
+ }
+ size += 128;
+ value = nv;
+ }
+ value[len] = c;
+ len++;
+ c = config_nextchar(iter);
+ } while (c != '\n');
+ assert (len > 0);
+ assert (len < size);
+ while (len > 0 && (value[len-1] == ' ' || value[len-1] == '\t'
+ || value[len-1] == '\r'))
+ len--;
+ assert (len > 0);
+ value[len] = '\0';
+ nv = realloc(value, len+1);
+ if (nv == NULL)
+ *result_p = value;
+ else
+ *result_p = nv;
+ return RET_OK;
+}
+
+retvalue config_getlines(struct configiterator *iter, struct strlist *result) {
+ char *line;
+ struct strlist list;
+ retvalue r;
+
+ strlist_init(&list);
+ do {
+ r = config_getline(iter, &line);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&list);
+ return r;
+ }
+ if (r == RET_NOTHING)
+ r = strlist_add_dup(&list, "");
+ else
+ r = strlist_add(&list, line);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&list);
+ return r;
+ }
+ } while (config_nextline(iter));
+ strlist_move(result, &list);
+ return RET_OK;
+}
diff --git a/configparser.h b/configparser.h
new file mode 100644
index 0000000..c9c3d18
--- /dev/null
+++ b/configparser.h
@@ -0,0 +1,305 @@
+#ifndef REPREPRO_CONFIGPARSER_H
+#define REPREPRO_CONFIGPARSER_H
+
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_CHECKS_H
+#include "checks.h"
+#endif
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+
+struct configiterator;
+
+typedef retvalue configsetfunction(void *, const char *, void *, struct configiterator *);
+typedef retvalue configinitfunction(void *, void *, void **);
+typedef retvalue configfinishfunction(void *, void *, void **, bool, struct configiterator *);
+
+retvalue linkedlistfinish(void *, void *, void **, bool, struct configiterator *);
+
+struct configfield {
+ const char *name;
+ size_t namelen;
+ /* privdata, allocated struct, iterator */
+ configsetfunction *setfunc;
+ bool required;
+};
+
+struct constant {
+ const char *name;
+ int value;
+};
+
+#define CFr(name, sname, field) {name, sizeof(name)-1, configparser_ ## sname ## _set_ ## field, true}
+#define CF(name, sname, field) {name, sizeof(name)-1, configparser_ ## sname ## _set_ ## field, false}
+
+/*@observer@*/const char *config_filename(const struct configiterator *) __attribute__((pure));
+unsigned int config_line(const struct configiterator *) __attribute__((pure));
+unsigned int config_column(const struct configiterator *) __attribute__((pure));
+unsigned int config_firstline(const struct configiterator *) __attribute__((pure));
+unsigned int config_markerline(const struct configiterator *) __attribute__((pure));
+unsigned int config_markercolumn(const struct configiterator *) __attribute__((pure));
+retvalue config_getflags(struct configiterator *, const char *, const struct constant *, bool *, bool, const char *);
+int config_nextnonspaceinline(struct configiterator *iter);
+retvalue config_getlines(struct configiterator *, struct strlist *);
+retvalue config_getwords(struct configiterator *, struct strlist *);
+retvalue config_getall(struct configiterator *iter, /*@out@*/char **result_p);
+retvalue config_getword(struct configiterator *, /*@out@*/char **);
+retvalue config_getwordinline(struct configiterator *, /*@out@*/char **);
+retvalue config_geturl(struct configiterator *, const char *, /*@out@*/char **);
+retvalue config_getonlyword(struct configiterator *, const char *, checkfunc, /*@out@*/char **);
+retvalue config_getuniqwords(struct configiterator *, const char *, checkfunc, struct strlist *);
+retvalue config_getinternatomlist(struct configiterator *, const char *, enum atom_type, checkfunc, struct atomlist *);
+retvalue config_getatom(struct configiterator *, const char *, enum atom_type, atom_t *);
+retvalue config_getatomlist(struct configiterator *, const char *, enum atom_type, struct atomlist *);
+retvalue config_getatomsublist(struct configiterator *, const char *, enum atom_type, struct atomlist *, const struct atomlist *, const char *);
+retvalue config_getsplitatoms(struct configiterator *, const char *, enum atom_type, struct atomlist *, struct atomlist *);
+retvalue config_getsplitwords(struct configiterator *, const char *, struct strlist *, struct strlist *);
+retvalue config_gettruth(struct configiterator *, const char *, bool *);
+retvalue config_getnumber(struct configiterator *, const char *, long long *, long long /*minvalue*/, long long /*maxvalue*/);
+retvalue config_getconstant(struct configiterator *, const struct constant *, int *);
+#define config_getenum(iter, type, constants, result) ({int _val;retvalue _r = config_getconstant(iter, type ## _ ## constants, &_val);*(result) = (enum type)_val;_r;})
+retvalue config_completeword(struct configiterator *, char, /*@out@*/char **);
+retvalue config_gettimespan(struct configiterator *, const char *, /*@out@*/unsigned long *);
+retvalue config_getscript(struct configiterator *, const char *, /*@out@*/char **);
+retvalue config_getsignwith(struct configiterator *, const char *, struct strlist *);
+void config_overline(struct configiterator *);
+bool config_nextline(struct configiterator *);
+retvalue configfile_parse(const char * /*filename*/, bool /*ignoreunknown*/, configinitfunction, configfinishfunction, const char *chunkname, const struct configfield *, size_t, void *);
+
+#define CFlinkedlistinit(sname) \
+static retvalue configparser_ ## sname ## _init(void *rootptr, void *lastitem, void **newptr) { \
+ struct sname *n, **root_p = rootptr, *last = lastitem; \
+ n = calloc(1, sizeof(struct sname)); \
+ if (n == NULL) \
+ return RET_ERROR_OOM; \
+ if (last == NULL) \
+ *root_p = n; \
+ else \
+ last->next = n; \
+ *newptr = n; \
+ return RET_OK; \
+}
+#define CFtimespanSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_gettimespan(iter, name, &item->field); \
+}
+#define CFcheckvalueSETPROC(sname, field, checker) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getonlyword(iter, name, checker, &item->field); \
+}
+#define CFvalueSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getonlyword(iter, name, NULL, &item->field); \
+}
+#define CFurlSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_geturl(iter, name, &item->field); \
+}
+#define CFscriptSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getscript(iter, name, &item->field); \
+}
+#define CFlinelistSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ item->field ## _set = true; \
+ return config_getlines(iter, &item->field); \
+}
+#define CFstrlistSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getwords(iter, &item->field); \
+}
+#define CFsignwithSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getsignwith(iter, name, &item->field); \
+}
+#define CFcheckuniqstrlistSETPROC(sname, field, checker) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ retvalue r; \
+ r = config_getuniqwords(iter, name, checker, &item->field); \
+ if (r == RET_NOTHING) { \
+ fprintf(stderr, \
+"Error parsing %s, line %d, column %d:\n" \
+" An empty %s-field is not allowed.\n", config_filename(iter), \
+ config_line(iter), \
+ config_column(iter), \
+ name); \
+ r = RET_ERROR; \
+ } \
+ return r; \
+}
+#define CFinternatomsSETPROC(sname, field, checker, type) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ retvalue r; \
+ r = config_getinternatomlist(iter, name, type, checker, &item->field); \
+ if (r == RET_NOTHING) { \
+ fprintf(stderr, \
+"Error parsing %s, line %d, column %d:\n" \
+" An empty %s-field is not allowed.\n", config_filename(iter), \
+ config_line(iter), \
+ config_column(iter), \
+ name); \
+ r = RET_ERROR; \
+ } \
+ return r; \
+}
+#define CFatomlistSETPROC(sname, field, type) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ retvalue r; \
+ item->field ## _set = true; \
+ r = config_getatomlist(iter, name, type, &item->field); \
+ if (r == RET_NOTHING) { \
+ fprintf(stderr, \
+"Error parsing %s, line %d, column %d:\n" \
+" An empty %s-field is not allowed.\n", config_filename(iter), \
+ config_line(iter), \
+ config_column(iter), \
+ name); \
+ r = RET_ERROR; \
+ } \
+ return r; \
+}
+#define CFatomSETPROC(sname, field, type) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getatom(iter, name, type, &item->field); \
+}
+#define CFatomsublistSETPROC(sname, field, type, superset, superset_header) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ retvalue r; \
+ item->field ## _set = true; \
+ if (item->superset.count == 0) { \
+ fprintf(stderr, \
+"Error parsing %s, line %d, column %d:\n" \
+" A '%s'-field is only allowed after a '%s'-field.\n", config_filename(iter), \
+ config_line(iter), \
+ config_column(iter), \
+ name, superset_header); \
+ return RET_ERROR; \
+ } \
+ r = config_getatomsublist(iter, name, type, &item->field, \
+ &item->superset, superset_header); \
+ if (r == RET_NOTHING) { \
+ fprintf(stderr, \
+"Error parsing %s, line %d, column %d:\n" \
+" An empty %s-field is not allowed.\n", config_filename(iter), \
+ config_line(iter), \
+ config_column(iter), \
+ name); \
+ r = RET_ERROR; \
+ } \
+ return r; \
+}
+#define CFuniqstrlistSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getuniqwords(iter, name, NULL, &item->field); \
+}
+#define CFuniqstrlistSETPROCset(sname, name) \
+static retvalue configparser_ ## sname ## _set_ ## name (UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ item->name ## _set = true; \
+ return config_getuniqwords(iter, name, NULL, &item->name); \
+}
+#define CFtruthSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ item->field ## _set = true; \
+ return config_gettruth(iter, name, &item->field); \
+}
+#define CFtruthSETPROC2(sname, name, field) \
+static retvalue configparser_ ## sname ## _set_ ## name(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_gettruth(iter, name, &item->field); \
+}
+#define CFnumberSETPROC(sname, minval, maxval, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getnumber(iter, name, &item->field, minval, maxval); \
+}
+#define CFallSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return config_getall(iter, &item->field); \
+}
+#define CFfilterlistSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return filterlist_load(&item->field, iter); \
+}
+#define CFexportmodeSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ return exportmode_set(&item->field, iter); \
+}
+#define CFUSETPROC(sname, field) static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *thisdata_ ## sname, struct configiterator *iter)
+#define CFuSETPROC(sname, field) static retvalue configparser_ ## sname ## _set_ ## field(void *privdata_ ## sname, UNUSED(const char *name), void *thisdata_ ## sname, struct configiterator *iter)
+#define CFSETPROC(sname, field) static retvalue configparser_ ## sname ## _set_ ## field(void *privdata_ ## sname, const char *headername, void *thisdata_ ## sname, struct configiterator *iter)
+#define CFSETPROCVARS(sname, item, mydata) struct sname *item = thisdata_ ## sname; struct read_ ## sname ## _data *mydata = privdata_ ## sname
+#define CFSETPROCVAR(sname, item) struct sname *item = thisdata_ ## sname
+
+#define CFstartparse(sname) static retvalue startparse ## sname(UNUSED(void *dummyprivdata), UNUSED(void *lastdata), void **result_p_ ##sname)
+#define CFstartparseVAR(sname, r) struct sname **r = (void*)result_p_ ## sname
+#define CFfinishparse(sname) static retvalue finishparse ## sname(void *privdata_ ## sname, void *thisdata_ ## sname, void **lastdata_p_ ##sname, bool complete, struct configiterator *iter)
+#define CFfinishparseVARS(sname, this, last, mydata) struct sname *this = thisdata_ ## sname, **last = (void*)lastdata_p_ ## sname; struct read_ ## sname ## _data *mydata = privdata_ ## sname
+#define CFUfinishparseVARS(sname, this, last, mydata) struct sname *this = thisdata_ ## sname
+#define CFhashesSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ retvalue r; \
+ item->field ## _set = true; \
+ r = config_getflags(iter, name, hashnames, item->field, false, \
+ "(allowed values: md5, sha1, sha256, and sha512)"); \
+ if (!RET_IS_OK(r)) \
+ return r; \
+ return RET_OK; \
+}
+
+// TODO: better error reporting:
+#define CFtermSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ char *formula; \
+ retvalue r; \
+ r = config_getall(iter, &formula); \
+ if (! RET_IS_OK(r)) \
+ return r; \
+ r = term_compilefortargetdecision(&item->field, formula); \
+ free(formula); \
+ return r; \
+}
+#define CFtermSSETPROC(sname, field) \
+static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \
+ struct sname *item = data; \
+ char *formula; \
+ retvalue r; \
+ r = config_getall(iter, &formula); \
+ if (! RET_IS_OK(r)) \
+ return r; \
+ r = term_compilefortargetdecision(&item->field, formula); \
+ free(formula); \
+ item->field ## _set = true; \
+ return r; \
+}
+
+// TODO: decide which should get better checking, which might allow escaping spaces:
+#define CFdirSETPROC CFvalueSETPROC
+#define CFfileSETPROC CFvalueSETPROC
+#define config_getfileinline config_getwordinline
+
+char *configfile_expandname(const char *, /*@only@*//*@null@*/char *);
+#endif /* REPREPRO_CONFIGPARSER_H */
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..878c0f5
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,170 @@
+dnl
+dnl Process this file with autoconf to produce a configure script
+dnl
+
+AC_INIT(reprepro, 5.4.4)
+AC_CONFIG_SRCDIR(main.c)
+AC_CONFIG_AUX_DIR(ac)
+AM_INIT_AUTOMAKE([-Wall -Werror -Wno-portability])
+AM_CONFIG_HEADER(config.h)
+
+if test "${CFLAGS+set}" != set ; then
+ CFLAGS="-Wall -O2 -g -Wmissing-prototypes -Wstrict-prototypes -Wshadow -Wsign-compare -Wlogical-op"
+fi
+
+AM_MAINTAINER_MODE
+AC_GNU_SOURCE
+
+AC_PROG_CC_C99
+AC_PROG_INSTALL
+AC_SYS_LARGEFILE
+
+AC_C_BIGENDIAN()
+AC_HEADER_STDBOOL
+AC_CHECK_FUNCS([closefrom strndup dprintf tdestroy])
+found_mktemp=no
+AC_CHECK_FUNCS([mkostemp mkstemp],[found_mktemp=yes ; break],)
+if test "$found_mktemp" = "no" ; then
+ AC_MSG_ERROR([Missing mkstemp or mkostemp])
+fi
+AC_CHECK_FUNC([vasprintf],,[AC_MSG_ERROR([Could not find vasprintf implementation!])])
+
+DBLIBS=""
+# the only way to find out which is compileable is to look into db.h:
+
+AC_CHECK_HEADER(db.h,,[AC_MSG_ERROR(["no db.h found"])])
+
+AC_CHECK_LIB(db, db_create, [DBLIBS="-ldb $DBLIBS"
+ ],[AC_MSG_ERROR(["no libdb found"])],[$DBLIBS])
+AC_SUBST([DBLIBS])
+
+AC_CHECK_LIB(z,gzopen,,[AC_MSG_ERROR(["no zlib found"])],)
+
+AC_ARG_WITH(libgpgme,
+[ --with-libgpgme=path|yes|no Give path to prefix libgpgme was installed with],[dnl
+ case "$withval" in
+ no)
+ ;;
+ yes)
+ AC_CHECK_HEADER(gpgme.h,,[AC_MSG_ERROR(["no gpgme.h found"])])
+ AC_CHECK_LIB(gpg-error,gpg_strsource,,[AC_MSG_ERROR(["no libgpg-error found"])],)
+ AC_CHECK_LIB(gpgme,gpgme_get_protocol_name,,[AC_MSG_ERROR(["no libgpgme found (need at least 0.4.1)"])],)
+ ;;
+ *)
+ CPPFLAGS="$CPPFLAGS -I$withval/include"
+ LIBS="$LIBS -L$withval/lib"
+ AC_CHECK_HEADER(gpgme.h,,[AC_MSG_ERROR(["no gpgme.h found"])])
+ AC_CHECK_LIB(gpg-error,gpg_strsource,,[AC_MSG_ERROR(["no libgpg-error found"])],)
+ AC_CHECK_LIB(gpgme,gpgme_get_protocol_name,,[AC_MSG_ERROR(["no libgpgme found (need at least 0.4.1)"])],)
+ ;;
+ esac
+],[dnl default is to behave like yes (for libgpgme only)
+ AC_CHECK_HEADER(gpgme.h,,[AC_MSG_ERROR(["no gpgme.h found (to disable run with --without-libgpgme)"])])
+ AC_CHECK_LIB(gpg-error,gpg_strsource,,[AC_MSG_ERROR(["no libgpg-error found (to disable run with --without-libgpgme)"])],)
+ AC_CHECK_LIB(gpgme,gpgme_get_protocol_name,,[AC_MSG_ERROR(["did not find libgpgme versoion 0.4.1 or later (to disable run with --without-libgpgme)"])],)
+])
+
+AC_ARG_WITH(libbz2,
+[ --with-libbz2=path|yes|no Give path to prefix libbz2 was installed with],[dnl
+ case "$withval" in
+ no)
+ ;;
+ yes)
+ AC_CHECK_LIB(bz2,BZ2_bzCompressInit,,[AC_MSG_ERROR(["no libbz2 found, despite being told to use it"])],)
+ ;;
+ *)
+ AC_CHECK_LIB(bz2,BZ2_bzCompressInit,[dnl
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBBZ2))
+ LIBS="$LIBS -L$withval/lib -lbz2"
+ CPPFLAGS="$CPPFLAGS -I$withval/include"
+ ],[AC_MSG_ERROR(["no libbz2 found, despite being told to use it"])],[-L$withval/lib])
+ ;;
+ esac
+],[dnl without --with-libbz2 we look for it but not finding it is no error:
+ AC_CHECK_LIB(bz2,BZ2_bzCompressInit,,[AC_MSG_WARN(["no libbz2 found, compiling without"])],)
+])
+
+AC_ARG_WITH(liblzma,
+[ --with-liblzma=path|yes|no Give path to prefix liblzma was installed with],[dnl
+ case "$withval" in
+ no)
+ ;;
+ yes)
+ AC_CHECK_LIB(lzma,lzma_easy_encoder,,[AC_MSG_ERROR(["no liblzma found, despite being told to use it"])],)
+ ;;
+ *)
+ AC_CHECK_LIB(lzma,lzma_easy_encoder,[dnl
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBLZMA))
+ LIBS="$LIBS -L$withval/lib -llzma"
+ CPPFLAGS="$CPPFLAGS -I$withval/include"
+ ],[AC_MSG_ERROR(["no liblzma found, despite being told to use it"])],[-L$withval/lib])
+ ;;
+ esac
+],[
+ AC_CHECK_LIB(lzma,lzma_easy_encoder,,[AC_MSG_WARN(["no liblzma found, compiling without"])],)
+])
+
+ARCHIVELIBS=""
+ARCHIVECPP=""
+AH_TEMPLATE([HAVE_LIBARCHIVE],[Defined if libarchive is available])
+AC_ARG_WITH(libarchive,
+[ --with-libarchive=path|yes|no Give path to prefix libarchive was installed with],[dnl
+ case "$withval" in
+ no)
+ ;;
+ yes)
+ AC_CHECK_LIB(archive,archive_read_new,[dnl
+ AC_CHECK_HEADER(archive.h,[dnl
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE),1)
+ ARCHIVELIBS="-larchive"
+ ],[AC_MSG_ERROR([Could not find archive.h])])
+ ],[AC_MSG_ERROR([Could not find libarchive])])
+ ;;
+ *)
+ AC_CHECK_LIB(archive,archive_read_new,[dnl
+ mysave_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS="-I$withval/include $CPPFLAGS"
+ AC_CHECK_HEADER(archive.h,[dnl
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE),1)
+ ARCHIVELIBS="-L$withval/lib -larchive"
+ ARCHIVECPP="-I$withval/include"
+ ],[AC_MSG_ERROR([Could not find archive.h])])
+ CPPFLAGS="$mysave_CPPFLAGS"
+ ],[AC_MSG_ERROR([Could not find libarchive])],[-L$withval/lib])
+ ;;
+ esac
+],[dnl without --with-libarchive we look for it but not finding it is no error:
+ AC_CHECK_LIB(archive,archive_read_new,[dnl
+ AC_CHECK_HEADER(archive.h,[dnl
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE),1)
+ ARCHIVELIBS="-larchive"
+ ],)
+ ],)
+])
+AC_ARG_WITH(static-libarchive,
+[ --with-static-libarchive=.a-file static libarchive library to be linked against],
+[ case "$withval" in
+ no|yes) AC_MSG_ERROR([--with-static-libarchive needs an .a file as parameter])
+ ;;
+ *)
+ AC_CHECK_LIB(c,archive_read_new,[dnl
+ mysave_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS="$ARCHIVECPP $CPPFLAGS"
+ AC_CHECK_HEADER(archive.h,[dnl
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE))
+ ARCHIVELIBS="$withval"
+ ],[AC_MSG_ERROR([Could not find archive.h])])
+ CPPFLAGS="$mysave_CPPFLAGS"
+ ],[AC_MSG_ERROR([Error linking against $withval])],[$withval])
+ esac
+])
+AM_CONDITIONAL([HAVE_LIBARCHIVE],[test -n "$ARCHIVELIBS"])
+AC_SUBST([ARCHIVELIBS])
+AC_SUBST([ARCHIVECPP])
+
+dnl
+dnl Create makefiles
+dnl
+
+AC_CONFIG_FILES([Makefile docs/Makefile tests/Makefile])
+AC_OUTPUT
diff --git a/contents.c b/contents.c
new file mode 100644
index 0000000..22ef046
--- /dev/null
+++ b/contents.c
@@ -0,0 +1,408 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006,2007,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <limits.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include "error.h"
+#include "strlist.h"
+#include "mprintf.h"
+#include "dirs.h"
+#include "names.h"
+#include "release.h"
+#include "distribution.h"
+#include "filelist.h"
+#include "files.h"
+#include "ignore.h"
+#include "configparser.h"
+#include "package.h"
+
+/* options are zerroed when called, when error is returned contentsopions_done
+ * is called by the caller */
+retvalue contentsoptions_parse(struct distribution *distribution, struct configiterator *iter) {
+ enum contentsflags {
+ cf_disable, cf_dummy, cf_udebs, cf_nodebs,
+ cf_uncompressed, cf_gz, cf_bz2, cf_xz,
+ cf_percomponent, cf_allcomponents,
+ cf_compatsymlink, cf_nocompatsymlink,
+ cf_ddebs,
+ cf_COUNT
+ };
+ bool flags[cf_COUNT];
+ static const struct constant contentsflags[] = {
+ {"0", cf_disable},
+ {"1", cf_dummy},
+ {"2", cf_dummy},
+ {"udebs", cf_udebs},
+ {"nodebs", cf_nodebs},
+ {"percomponent", cf_percomponent},
+ {"allcomponents", cf_allcomponents},
+ {"compatsymlink", cf_compatsymlink},
+ {"nocompatsymlink", cf_nocompatsymlink},
+ {".xz", cf_xz},
+ {".bz2", cf_bz2},
+ {".gz", cf_gz},
+ {".", cf_uncompressed},
+ {"ddebs", cf_ddebs},
+ {NULL, -1}
+ };
+ retvalue r;
+
+ distribution->contents.flags.enabled = true;
+
+ memset(flags, 0, sizeof(flags));
+ r = config_getflags(iter, "Contents", contentsflags, flags,
+ IGNORABLE(unknownfield), "");
+ if (r == RET_ERROR_UNKNOWNFIELD)
+ (void)fputs(
+"Note that the format of the Contents field has changed with reprepro 3.0.0.\n"
+"There is no longer a number needed (nor possible) there.\n", stderr);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (flags[cf_dummy]) {
+ (void)fputs(
+"Warning: Contents headers in conf/distribution no longer need an\n"
+"rate argument. Ignoring the number there, this might cause a error\n"
+"future versions.\n", stderr);
+ } else if (flags[cf_disable]) {
+ (void)fputs(
+"Warning: Contents headers in conf/distribution no longer need an\n"
+"rate argument. Treating the '0' as sign to not activate Contents-\n"
+"-generation, but it will cause an error in future version.\n", stderr);
+ distribution->contents.flags.enabled = false;
+ }
+ if (flags[cf_allcomponents] && flags[cf_compatsymlink]) {
+ fprintf(stderr, "Cannot have allcomponents and compatsymlink in the same Contents line!\n");
+ return RET_ERROR;
+ }
+ if (flags[cf_allcomponents] && flags[cf_nocompatsymlink]) {
+ fprintf(stderr, "Cannot have allcomponents and nocompatsymlink in the same Contents line!\n");
+ return RET_ERROR;
+ }
+
+#ifndef HAVE_LIBBZ2
+ if (flags[cf_bz2]) {
+ fprintf(stderr,
+"Warning: Ignoring request to generate .bz2'ed Contents files.\n"
+"(bzip2 support disabled at build time.)\n"
+"Request was in %s in the Contents header ending in line %u\n",
+ config_filename(iter), config_line(iter));
+ flags[cf_bz2] = false;
+ }
+#endif
+#ifndef HAVE_LIBLZMA
+ if (flags[cf_xz]) {
+ fprintf(stderr,
+"Warning: Ignoring request to generate .xz'ed Contents files.\n"
+"(xz support disabled at build time.)\n"
+"Request was in %s in the Contents header ending in line %u\n",
+ config_filename(iter), config_line(iter));
+ flags[cf_xz] = false;
+ }
+#endif
+ distribution->contents.compressions = 0;
+ if (flags[cf_uncompressed])
+ distribution->contents.compressions |= IC_FLAG(ic_uncompressed);
+ if (flags[cf_gz])
+ distribution->contents.compressions |= IC_FLAG(ic_gzip);
+#ifdef HAVE_LIBBZ2
+ if (flags[cf_bz2])
+ distribution->contents.compressions |= IC_FLAG(ic_bzip2);
+#endif
+#ifdef HAVE_LIBLZMA
+ if (flags[cf_xz])
+ distribution->contents.compressions |= IC_FLAG(ic_xz);
+#endif
+ distribution->contents.flags.udebs = flags[cf_udebs];
+ distribution->contents.flags.ddebs = flags[cf_ddebs];
+ distribution->contents.flags.nodebs = flags[cf_nodebs];
+ if (flags[cf_allcomponents])
+ distribution->contents.flags.allcomponents = true;
+ else
+ /* default is now off */
+ distribution->contents.flags.allcomponents = false;
+ if (flags[cf_percomponent])
+ distribution->contents.flags.percomponent = true;
+ else if (flags[cf_allcomponents])
+ /* if allcomponents is specified, default is off */
+ distribution->contents.flags.percomponent = false;
+ else
+ /* otherwise default is on */
+ distribution->contents.flags.percomponent = true;
+ /* compat symlink is only possible if there are no files
+ * created there, and on by default unless explicitly specified */
+ if (distribution->contents.flags.allcomponents)
+ distribution->contents.flags.compatsymlink = false;
+ else if (flags[cf_compatsymlink])
+ distribution->contents.flags.compatsymlink = true;
+ else if (flags[cf_nocompatsymlink])
+ distribution->contents.flags.compatsymlink = false;
+ else {
+ assert(distribution->contents.flags.percomponent);
+ distribution->contents.flags.compatsymlink = true;
+ }
+ assert(distribution->contents.flags.percomponent ||
+ distribution->contents.flags.allcomponents);
+ return RET_OK;
+}
+
+static retvalue addpackagetocontents(struct package *package, void *data) {
+ struct filelist_list *contents = data;
+
+ return filelist_addpackage(contents, package);
+}
+
+static retvalue gentargetcontents(struct target *target, struct release *release, bool onlyneeded, bool symlink) {
+ retvalue result, r;
+ char *contentsfilename;
+ struct filetorelease *file;
+ struct filelist_list *contents;
+ struct package_cursor iterator;
+ const char *suffix;
+ const char *symlink_prefix;
+
+ if (onlyneeded && target->saved_wasmodified)
+ onlyneeded = false;
+
+ switch (target->packagetype) {
+ case pt_ddeb:
+ symlink_prefix = "d";
+ suffix = "-ddeb";
+ break;
+ case pt_udeb:
+ symlink_prefix = "s";
+ suffix = "-udeb";
+ break;
+ default:
+ symlink_prefix = "";
+ suffix = "";
+ }
+
+ contentsfilename = mprintf("%s/Contents%s-%s",
+ atoms_components[target->component],
+ suffix,
+ atoms_architectures[target->architecture]);
+ if (FAILEDTOALLOC(contentsfilename))
+ return RET_ERROR_OOM;
+
+ if (symlink) {
+ char *symlinkas = mprintf("%sContents-%s",
+ symlink_prefix,
+ atoms_architectures[target->architecture]);
+ if (FAILEDTOALLOC(symlinkas)) {
+ free(contentsfilename);
+ return RET_ERROR_OOM;
+ }
+ r = release_startlinkedfile(release, contentsfilename,
+ symlinkas,
+ target->distribution->contents.compressions,
+ onlyneeded, &file);
+ free(symlinkas);
+ } else
+ r = release_startfile(release, contentsfilename,
+ target->distribution->contents.compressions,
+ onlyneeded, &file);
+ if (!RET_IS_OK(r)) {
+ free(contentsfilename);
+ return r;
+ }
+ if (verbose > 0) {
+ printf(" generating %s...\n", contentsfilename);
+ }
+ free(contentsfilename);
+
+ r = filelist_init(&contents);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ return r;
+ }
+ result = package_openiterator(target, READONLY, true, &iterator);
+ if (RET_IS_OK(result)) {
+ while (package_next(&iterator)) {
+ r = addpackagetocontents(&iterator.current, contents);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ }
+ if (!RET_WAS_ERROR(result))
+ result = filelist_write(contents, file);
+ if (RET_WAS_ERROR(result))
+ release_abortfile(file);
+ else
+ result = release_finishfile(release, file);
+ filelist_free(contents);
+ return result;
+}
+
+static retvalue genarchcontents(struct distribution *distribution, architecture_t architecture, packagetype_t type, struct release *release, bool onlyneeded) {
+ retvalue result = RET_NOTHING, r;
+ char *contentsfilename;
+ struct filetorelease *file;
+ struct filelist_list *contents;
+ const struct atomlist *components;
+ struct target *target;
+ bool combinedonlyifneeded;
+ const char *prefix;
+ const char *symlink_prefix;
+
+ if (type == pt_ddeb) {
+ if (distribution->contents_components_set)
+ components = &distribution->contents_dcomponents;
+ else
+ components = &distribution->ddebcomponents;
+ prefix = "d";
+ symlink_prefix = "d";
+ } else if (type == pt_udeb) {
+ if (distribution->contents_components_set)
+ components = &distribution->contents_ucomponents;
+ else
+ components = &distribution->udebcomponents;
+ prefix = "u";
+ symlink_prefix = "s";
+ } else {
+ if (distribution->contents_components_set)
+ components = &distribution->contents_components;
+ else
+ components = &distribution->components;
+ prefix = "";
+ symlink_prefix = "";
+ }
+
+ if (components->count == 0)
+ return RET_NOTHING;
+
+ combinedonlyifneeded = onlyneeded;
+
+ for (target=distribution->targets; target!=NULL; target=target->next) {
+ if (target->architecture != architecture
+ || target->packagetype != type
+ || !atomlist_in(components, target->component))
+ continue;
+ if (onlyneeded && target->saved_wasmodified)
+ combinedonlyifneeded = false;
+ if (distribution->contents.flags.percomponent) {
+ r = gentargetcontents(target, release, onlyneeded,
+ distribution->contents.
+ flags.compatsymlink &&
+ !distribution->contents.
+ flags.allcomponents &&
+ target->component
+ == components->atoms[0]);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+
+ if (!distribution->contents.flags.allcomponents) {
+ if (!distribution->contents.flags.compatsymlink) {
+ char *symlinkas = mprintf("%sContents-%s",
+ symlink_prefix,
+ atoms_architectures[architecture]);
+ if (FAILEDTOALLOC(symlinkas))
+ return RET_ERROR_OOM;
+ release_warnoldfileorlink(release, symlinkas,
+ distribution->contents.compressions);
+ free(symlinkas);
+ }
+ return RET_OK;
+ }
+
+ contentsfilename = mprintf("%sContents-%s",
+ prefix,
+ atoms_architectures[architecture]);
+ if (FAILEDTOALLOC(contentsfilename))
+ return RET_ERROR_OOM;
+ r = release_startfile(release, contentsfilename,
+ distribution->contents.compressions,
+ combinedonlyifneeded, &file);
+ if (!RET_IS_OK(r)) {
+ free(contentsfilename);
+ return r;
+ }
+ if (verbose > 0) {
+ printf(" generating %s...\n", contentsfilename);
+ }
+ free(contentsfilename);
+
+ r = filelist_init(&contents);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ return r;
+ }
+ r = package_foreach_c(distribution,
+ components, architecture, type,
+ addpackagetocontents, contents);
+ if (!RET_WAS_ERROR(r))
+ r = filelist_write(contents, file);
+ if (RET_WAS_ERROR(r))
+ release_abortfile(file);
+ else
+ r = release_finishfile(release, file);
+ filelist_free(contents);
+ RET_UPDATE(result, r);
+ return result;
+}
+
+retvalue contents_generate(struct distribution *distribution, struct release *release, bool onlyneeded) {
+ retvalue result, r;
+ int i;
+ const struct atomlist *architectures;
+
+ if (distribution->contents.compressions == 0)
+ distribution->contents.compressions = IC_FLAG(ic_gzip);
+
+ result = RET_NOTHING;
+ if (distribution->contents_architectures_set) {
+ architectures = &distribution->contents_architectures;
+ } else {
+ architectures = &distribution->architectures;
+ }
+ for (i = 0 ; i < architectures->count ; i++) {
+ architecture_t architecture = architectures->atoms[i];
+
+ if (architecture == architecture_source)
+ continue;
+
+ if (!distribution->contents.flags.nodebs) {
+ r = genarchcontents(distribution,
+ architecture, pt_deb,
+ release, onlyneeded);
+ RET_UPDATE(result, r);
+ }
+ if (distribution->contents.flags.udebs) {
+ r = genarchcontents(distribution,
+ architecture, pt_udeb,
+ release, onlyneeded);
+ RET_UPDATE(result, r);
+ }
+ if (distribution->contents.flags.ddebs) {
+ r = genarchcontents(distribution,
+ architecture, pt_ddeb,
+ release, onlyneeded);
+ RET_UPDATE(result, r);
+ }
+ }
+ return result;
+}
diff --git a/contents.h b/contents.h
new file mode 100644
index 0000000..b8a214d
--- /dev/null
+++ b/contents.h
@@ -0,0 +1,33 @@
+#ifndef REPREPRO_CONTENTS_H
+#define REPREPRO_CONTENTS_H
+
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_RELEASE_H
+#include "release.h"
+#endif
+
+struct contentsoptions {
+ struct {
+ bool enabled;
+ bool udebs;
+ bool nodebs;
+ bool percomponent;
+ bool allcomponents;
+ bool compatsymlink;
+ bool ddebs;
+ } flags;
+ compressionset compressions;
+};
+
+struct distribution;
+struct configiterator;
+
+retvalue contentsoptions_parse(struct distribution *, struct configiterator *);
+retvalue contents_generate(struct distribution *, struct release *, bool /*onlyneeded*/);
+
+#endif
diff --git a/copypackages.c b/copypackages.c
new file mode 100644
index 0000000..428ce13
--- /dev/null
+++ b/copypackages.c
@@ -0,0 +1,1038 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2008,2009,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <errno.h>
+#include <assert.h>
+#include <limits.h>
+#include <string.h>
+#include <strings.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "error.h"
+#include "ignore.h"
+#include "strlist.h"
+#include "indexfile.h"
+#include "files.h"
+#include "target.h"
+#include "terms.h"
+#include "termdecide.h"
+#include "dpkgversions.h"
+#include "tracking.h"
+#include "filecntl.h"
+#include "mprintf.h"
+#include "globmatch.h"
+#include "package.h"
+#include "copypackages.h"
+
+struct target_package_list {
+ struct target_package_list *next;
+ struct target *target;
+ struct target *fromtarget;
+ struct selectedpackage {
+ /*@null@*/struct selectedpackage *next;
+ char *name;
+ char *version;
+ char *sourcename;
+ char *sourceversion;
+ char *control;
+ struct checksumsarray origfiles;
+ struct strlist filekeys;
+ architecture_t architecture;
+ } *packages;
+};
+
+struct package_list {
+ /*@null@*/struct target_package_list *targets;
+};
+
+// cascade_strcmp compares the two strings s1 and s2. If the strings are equal, the strings
+// t1 and t2 are compared.
+static int cascade_strcmp(const char *s1, const char *s2, const char *t1, const char *t2) {
+ int result;
+
+ result = strcmp(s1, s2);
+ if (result == 0) {
+ result = strcmp(t1, t2);
+ }
+ return result;
+}
+
+static retvalue list_newpackage(struct package_list *list, struct target *desttarget, struct target *fromtarget, const char *sourcename, const char *sourceversion, const char *packagename, const char *packageversion, /*@out@*/struct selectedpackage **package_p) {
+ struct target_package_list *t, **t_p;
+ struct selectedpackage *package, **p_p;
+ int c;
+
+ t_p = &list->targets;
+ while (*t_p != NULL && (*t_p)->target != desttarget && (*t_p)->fromtarget != fromtarget)
+ t_p = &(*t_p)->next;
+ if (*t_p == NULL) {
+ t = zNEW(struct target_package_list);
+ if (FAILEDTOALLOC(t))
+ return RET_ERROR_OOM;
+ t->target = desttarget;
+ t->fromtarget = fromtarget;
+ t->next = *t_p;
+ *t_p = t;
+ } else
+ t = *t_p;
+
+ p_p = &t->packages;
+ while (*p_p != NULL && (c = cascade_strcmp(packagename, (*p_p)->name, packageversion, (*p_p)->version)) < 0)
+ p_p = &(*p_p)->next;
+ if (*p_p != NULL && c == 0) {
+ // TODO: improve this message..., or some context elsewhere
+ fprintf(stderr, "Multiple occurrences of package '%s' with version '%s'!\n",
+ packagename, packageversion);
+ return RET_ERROR_EXIST;
+ }
+ package = zNEW(struct selectedpackage);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+ package->name = strdup(packagename);
+ if (FAILEDTOALLOC(package->name)) {
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->version = strdup(packageversion);
+ if (FAILEDTOALLOC(package->version)) {
+ free(package->name);
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->sourcename = strdup(sourcename);
+ if (FAILEDTOALLOC(package->sourcename)) {
+ free(package->name);
+ free(package->version);
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->sourceversion = strdup(sourceversion);
+ if (FAILEDTOALLOC(package->sourceversion)) {
+ free(package->name);
+ free(package->version);
+ free(package->sourcename);
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->next = *p_p;
+ *p_p = package;
+ *package_p = package;
+ return RET_OK;
+}
+
+static void package_free(/*@only@*/struct selectedpackage *package) {
+ if (package == NULL)
+ return;
+
+ free(package->name);
+ free(package->version);
+ free(package->sourcename);
+ free(package->sourceversion);
+ free(package->control);
+ checksumsarray_done(&package->origfiles);
+ strlist_done(&package->filekeys);
+ free(package);
+}
+
+static void list_cancelpackage(struct package_list *list, /*@only@*/struct selectedpackage *package) {
+ struct target_package_list *target;
+ struct selectedpackage **p_p;
+
+ assert (package != NULL);
+
+ for (target = list->targets ; target != NULL ; target = target->next) {
+ p_p = &target->packages;
+ while (*p_p != NULL && *p_p != package)
+ p_p = &(*p_p)->next;
+ if (*p_p == package) {
+ *p_p = package->next;
+ package_free(package);
+ return;
+ }
+ }
+ assert (package == NULL);
+}
+
+static retvalue list_prepareadd(struct package_list *list, struct target *desttarget, struct target *fromtarget, struct package *package) {
+ struct selectedpackage *new SETBUTNOTUSED(= NULL);
+ retvalue r;
+ int i;
+
+ assert (desttarget->packagetype == package->target->packagetype);
+
+ r = package_getversion(package);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = package_getarchitecture(package);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = package_getsource(package);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = list_newpackage(list, desttarget, fromtarget,
+ package->source, package->sourceversion,
+ package->name, package->version, &new);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (new != NULL);
+
+ new->architecture = package->architecture;
+ r = desttarget->getinstalldata(desttarget, package,
+ &new->control, &new->filekeys, &new->origfiles);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ list_cancelpackage(list, new);
+ return r;
+ }
+ assert (new->filekeys.count == new->origfiles.names.count);
+ for (i = 0 ; i < new->filekeys.count ; i++) {
+ const char *newfilekey = new->filekeys.values[i];
+ const char *oldfilekey = new->origfiles.names.values[i];
+ const struct checksums *checksums = new->origfiles.checksums[i];
+
+ r = files_canadd(newfilekey, checksums);
+ /* normally it should just already have that file,
+ * in which case we have nothing to do: */
+ if (r == RET_NOTHING)
+ continue;
+ /* otherwise try to cope with it */
+ if (r == RET_ERROR_WRONG_MD5) {
+ if (strcmp(newfilekey, oldfilekey) == 0) {
+ fprintf(stderr,
+"Error: package %s version %s lists different checksums than in the pool!\n",
+ new->name, new->version);
+ } else {
+ fprintf(stderr,
+"Error: package %s version %s needs '%s' which previously was '%s',\n"
+"but the new file is already listed with different checksums!\n",
+ new->name, new->version,
+ newfilekey, oldfilekey);
+ }
+ }
+ if (RET_WAS_ERROR(r)) {
+ list_cancelpackage(list, new);
+ return r;
+ }
+ assert (RET_IS_OK(r));
+ if (strcmp(newfilekey, oldfilekey) == 0) {
+ fprintf(stderr,
+"Error: package %s version %s lists file %s not yet in the pool!\n",
+ new->name, new->version, newfilekey);
+ list_cancelpackage(list, new);
+ return RET_ERROR_MISSING;
+ }
+ // TODO:
+ // check new
+ // - if exists and other checksums delete
+ // - if exists and correct checksums use
+ // otherwise check old
+ // - if exists and other checksums bail out
+ // - if exists and correct checksum, hardlink/copy
+ fprintf(stderr,
+"Error: cannot yet deal with files changing their position\n"
+"(%s vs %s in %s version %s)\n",
+ newfilekey, oldfilekey,
+ new->name, new->version);
+ list_cancelpackage(list, new);
+ return RET_ERROR_MISSING;
+ }
+ return RET_OK;
+}
+
+static retvalue package_add(struct distribution *into, /*@null@*/trackingdb tracks, struct target *target, const struct selectedpackage *package, /*@null@*/ struct distribution *from, /*@null@*/trackingdb fromtracks, struct target *fromtarget, bool remove_source) {
+ struct trackingdata trackingdata;
+ retvalue r;
+
+ if (verbose >= 1) {
+ printf("Adding '%s' '%s' to '%s'.\n",
+ package->name, package->version,
+ target->identifier);
+ }
+
+ r = files_expectfiles(&package->filekeys,
+ package->origfiles.checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ if (tracks != NULL) {
+ r = trackingdata_summon(tracks, package->sourcename,
+ package->version, &trackingdata);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ r = target_addpackage(target,
+ into->logger,
+ package->name, package->version,
+ package->control,
+ &package->filekeys, true,
+ (tracks != NULL)?
+ &trackingdata:NULL,
+ package->architecture,
+ NULL, from != NULL ? from->codename : NULL);
+ RET_UPDATE(into->status, r);
+
+ if (tracks != NULL) {
+ retvalue r2;
+
+ r2 = trackingdata_finish(tracks, &trackingdata);
+ RET_ENDUPDATE(r, r2);
+ }
+
+ if (!RET_WAS_ERROR(r) && remove_source) {
+ if (fromtracks != NULL) {
+ r = trackingdata_summon(fromtracks, package->sourcename,
+ package->version, &trackingdata);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ r = target_removepackage(fromtarget,
+ from->logger,
+ package->name, package->version,
+ (tracks != NULL) ? &trackingdata : NULL);
+ RET_UPDATE(from->status, r);
+ if (fromtracks != NULL) {
+ retvalue r2;
+
+ r2 = trackingdata_finish(fromtracks, &trackingdata);
+ RET_ENDUPDATE(r, r2);
+ }
+ }
+ return r;
+}
+
+static retvalue packagelist_add(struct distribution *into, const struct package_list *list, /*@null@*/struct distribution *from, bool remove_source) {
+ retvalue result, r;
+ struct target_package_list *tpl;
+ struct selectedpackage *package;
+ trackingdb tracks, fromtracks = NULL;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: packagelist_add(into.codename=%s, from.codename=%s) called.\n",
+ into->codename, from != NULL ? from->codename : NULL);
+
+ r = distribution_prepareforwriting(into);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (remove_source) {
+ r = distribution_prepareforwriting(from);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ if (into->tracking != dt_NONE) {
+ r = tracking_initialize(&tracks, into, false);
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else
+ tracks = NULL;
+
+ if (from->tracking != dt_NONE) {
+ r = tracking_initialize(&fromtracks, from, false);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ result = RET_NOTHING;
+ for (tpl = list->targets; tpl != NULL ; tpl = tpl->next) {
+ struct target *target = tpl->target;
+ struct target *fromtarget = tpl->fromtarget;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: Processing add/move from '%s' to '%s'...\n",
+ fromtarget != NULL ? fromtarget->identifier : NULL, target->identifier);
+
+ r = target_initpackagesdb(target, READWRITE);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+
+ if (remove_source) {
+ r = target_initpackagesdb(fromtarget, READWRITE);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(r)) {
+ (void)target_closepackagesdb(target);
+ break;
+ }
+ }
+
+ for (package = tpl->packages; package != NULL ;
+ package = package->next) {
+ r = package_add(into, tracks, target,
+ package, from, fromtracks, fromtarget, remove_source);
+ RET_UPDATE(result, r);
+ }
+ if (remove_source) {
+ r = target_closepackagesdb(fromtarget);
+ RET_UPDATE(into->status, r);
+ RET_ENDUPDATE(result, r);
+ }
+ r = target_closepackagesdb(target);
+ RET_UPDATE(into->status, r);
+ RET_ENDUPDATE(result, r);
+ }
+ r = tracking_done(fromtracks, from);
+ RET_ENDUPDATE(result, r);
+ r = tracking_done(tracks, into);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+static retvalue copy_by_func(struct package_list *list, struct distribution *into, struct distribution *from, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, retvalue action(struct package_list*, struct target *, struct target *, void *), void *data) {
+ retvalue result, r;
+ struct target *origtarget, *desttarget;
+
+ result = RET_NOTHING;
+ for (origtarget = from->targets ; origtarget != NULL ;
+ origtarget = origtarget->next) {
+ if (!target_matches(origtarget,
+ components, architectures, packagetypes))
+ continue;
+ desttarget = distribution_gettarget(into,
+ origtarget->component,
+ origtarget->architecture,
+ origtarget->packagetype);
+ if (desttarget == NULL) {
+ if (verbose > 2)
+ printf(
+"Not looking into '%s' as no matching target in '%s'!\n",
+ origtarget->identifier,
+ into->codename);
+ continue;
+ }
+ r = action(list, desttarget, origtarget, data);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ return result;
+ }
+ return result;
+}
+
+struct namelist {
+ int argc;
+ const char **argv;
+ bool *warnedabout;
+ bool *found;
+};
+
+static retvalue by_name(struct package_list *list, struct target *desttarget, struct target *fromtarget, void *data) {
+ struct nameandversion *nameandversion = data;
+ struct nameandversion *prev;
+ retvalue result, r;
+
+ result = RET_NOTHING;
+ for (struct nameandversion *d = nameandversion; d->name != NULL ; d++) {
+ struct package package;
+
+ for (prev = nameandversion ; prev < d ; prev++) {
+ if (strcmp(prev->name, d->name) == 0 && strcmp2(prev->version, d->version) == 0)
+ break;
+ }
+ if (prev < d) {
+ if (verbose >= 0 && ! prev->warnedabout) {
+ if (d->version == NULL) {
+ fprintf(stderr,
+"Hint: '%s' was listed multiple times, ignoring all but first!\n",
+ d->name);
+ } else {
+ fprintf(stderr,
+"Hint: '%s=%s' was listed multiple times, ignoring all but first!\n",
+ d->name, d->version);
+ }
+ }
+ prev->warnedabout = true;
+ /* do not complain second is missing if we ignore it: */
+ d->found = true;
+ continue;
+ }
+
+ r = package_get(fromtarget, d->name, d->version, &package);
+ if (r == RET_NOTHING)
+ continue;
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ r = list_prepareadd(list, desttarget, fromtarget, &package);
+ package_done(&package);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ d->found = true;
+ }
+ return result;
+}
+
+static void packagelist_done(struct package_list *list) {
+ struct target_package_list *target;
+ struct selectedpackage *package;
+
+ while ((target = list->targets) != NULL) {
+ list->targets = target->next;
+
+ while ((package = target->packages) != NULL) {
+ target->packages = package->next;
+
+ package_free(package);
+ }
+ free(target);
+ }
+}
+
+retvalue copy_by_name(struct distribution *into, struct distribution *from, struct nameandversion *nameandversion, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, bool remove_source) {
+ struct package_list list;
+ retvalue r;
+
+ for (struct nameandversion *d = nameandversion; d->name != NULL; d++) {
+ d->found = false;
+ d->warnedabout = false;
+ }
+
+ memset(&list, 0, sizeof(list));
+ r = copy_by_func(&list, into, from, components,
+ architectures, packagetypes, by_name, nameandversion);
+ if (verbose >= 0 && !RET_WAS_ERROR(r)) {
+ bool first = true;
+
+ for (struct nameandversion *d = nameandversion; d->name != NULL; d++) {
+ if (d->found)
+ continue;
+ if (first)
+ (void)fputs(
+"Will not copy as not found: ", stderr);
+ else
+ (void)fputs(", ", stderr);
+ first = false;
+ (void)fputs(d->name, stderr);
+ if (d->version != NULL) {
+ (void)fputs("=", stderr);
+ (void)fputs(d->version, stderr);
+ }
+ }
+ if (!first) {
+ (void)fputc('.', stderr);
+ (void)fputc('\n', stderr);
+ }
+ }
+ if (!RET_IS_OK(r))
+ return r;
+ r = packagelist_add(into, &list, from, remove_source);
+ packagelist_done(&list);
+ return r;
+}
+
+static retvalue by_source(struct package_list *list, struct target *desttarget, struct target *fromtarget, void *data) {
+ struct namelist *d = data;
+ struct package_cursor iterator;
+ retvalue result, r;
+
+ assert (d->argc > 0);
+
+ r = package_openiterator(fromtarget, READONLY, true, &iterator);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ int i;
+
+ r = package_getsource(&iterator.current);
+ if (r == RET_NOTHING)
+ continue;
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ /* only include if source name matches */
+ if (strcmp(iterator.current.source, d->argv[0]) != 0) {
+ continue;
+ }
+ i = 0;
+ if (d->argc > 1) {
+ int c;
+
+ i = d->argc;
+ while (--i > 0) {
+ r = dpkgversions_cmp(
+ iterator.current.sourceversion,
+ d->argv[i], &c);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)package_closeiterator(&iterator);
+ return r;
+ }
+ if (c == 0)
+ break;
+ }
+ /* there are source versions specified and
+ * the source version of this package differs */
+ if (i == 0) {
+ continue;
+ }
+ }
+ r = list_prepareadd(list, desttarget, fromtarget, &iterator.current);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ d->found[0] = true;
+ d->found[i] = true;
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue copy_by_source(struct distribution *into, struct distribution *from, int argc, const char **argv, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, bool remove_source) {
+ struct package_list list;
+ struct namelist names = { argc, argv, NULL, nzNEW(argc, bool) };
+ retvalue r;
+
+ if (FAILEDTOALLOC(names.found)) {
+ free(names.found);
+ return RET_ERROR_OOM;
+ }
+ memset(&list, 0, sizeof(list));
+ // TODO: implement fast way by looking at source tracking
+ // (also allow copying .changes and .logs)
+ r = copy_by_func(&list, into, from, components, architectures,
+ packagetypes, by_source, &names);
+ if (argc == 1 && !RET_WAS_ERROR(r) && verbose >= 0) {
+ assert(names.found != NULL);
+
+ if (!names.found[0]) {
+ assert (r == RET_NOTHING);
+ fprintf(stderr,
+"Nothing to do as no package with source '%s' found!\n",
+ argv[0]);
+ free(names.found);
+ return RET_NOTHING;
+ }
+ } else if (!RET_WAS_ERROR(r) && verbose >= 0) {
+ int i;
+ bool first = true, anything = false;
+
+ for (i = 1 ; i < argc ; i++) {
+ if (names.found[i])
+ anything = true;
+ }
+ if (!anything) {
+ assert (r == RET_NOTHING);
+ fprintf(stderr,
+"Nothing to do as no packages with source '%s' and a requested source version found!\n",
+ argv[0]);
+ free(names.found);
+ return RET_NOTHING;
+ }
+ for (i = 1 ; i < argc ; i++) {
+ if (names.found[i])
+ continue;
+ if (first)
+ (void)fputs(
+"Will not copy as not found: ", stderr);
+ else
+ (void)fputs(", ", stderr);
+ first = false;
+ (void)fputs(argv[i], stderr);
+ }
+ if (!first) {
+ (void)fputc('.', stderr);
+ (void)fputc('\n', stderr);
+ }
+ if (verbose > 5) {
+ (void)fputs("Found versions are: ", stderr);
+ first = true;
+ for (i = 1 ; i < argc ; i++) {
+ if (!names.found[i])
+ continue;
+ if (!first)
+ (void)fputs(", ", stderr);
+ first = false;
+ (void)fputs(argv[i], stderr);
+ }
+ (void)fputc('.', stderr);
+ (void)fputc('\n', stderr);
+ }
+ }
+ free(names.found);
+ if (!RET_IS_OK(r))
+ return r;
+ r = packagelist_add(into, &list, from, remove_source);
+ packagelist_done(&list);
+ return r;
+}
+
+static retvalue by_formula(struct package_list *list, struct target *desttarget, struct target *fromtarget, void *data) {
+ term *condition = data;
+ struct package_cursor iterator;
+ retvalue result, r;
+
+ r = package_openiterator(fromtarget, READONLY, true, &iterator);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ r = term_decidepackage(condition, &iterator.current,
+ desttarget);
+ if (r == RET_NOTHING)
+ continue;
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ r = list_prepareadd(list, desttarget, fromtarget, &iterator.current);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+static retvalue by_glob(struct package_list *list, struct target *desttarget, struct target *fromtarget, void *data) {
+ const char *glob = data;
+ struct package_cursor iterator;
+ retvalue result, r;
+
+ r = package_openiterator(fromtarget, READONLY, true, &iterator);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ if (!globmatch(iterator.current.name, glob))
+ continue;
+ r = list_prepareadd(list, desttarget, fromtarget, &iterator.current);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue copy_by_glob(struct distribution *into, struct distribution *from, const char *glob, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, bool remove_source) {
+ struct package_list list;
+ retvalue r;
+
+ memset(&list, 0, sizeof(list));
+
+ r = copy_by_func(&list, into, from, components, architectures,
+ packagetypes, by_glob, (void*)glob);
+ if (!RET_IS_OK(r))
+ return r;
+ r = packagelist_add(into, &list, from, remove_source);
+ packagelist_done(&list);
+ return r;
+}
+
+retvalue copy_by_formula(struct distribution *into, struct distribution *from, const char *filter, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, bool remove_source) {
+ struct package_list list;
+ term *condition;
+ retvalue r;
+
+ memset(&list, 0, sizeof(list));
+
+ r = term_compilefortargetdecision(&condition, filter);
+ if (!RET_IS_OK(r)) {
+ return r;
+ }
+ r = copy_by_func(&list, into, from, components, architectures,
+ packagetypes, by_formula, condition);
+ term_free(condition);
+ if (!RET_IS_OK(r))
+ return r;
+ r = packagelist_add(into, &list, from, remove_source);
+ packagelist_done(&list);
+ return r;
+}
+
+static retvalue choose_by_name(struct package *package, void *privdata) {
+ const struct namelist *l = privdata;
+ int i;
+
+ for (i = 0 ; i < l->argc ; i++) {
+ if (strcmp(package->name, l->argv[i]) == 0)
+ break;
+ }
+ if (i >= l->argc)
+ return RET_NOTHING;
+ return RET_OK;
+}
+
+static retvalue choose_by_source(struct package *package, void *privdata) {
+ const struct namelist *l = privdata;
+ retvalue r;
+
+ r = package_getsource(package);
+ if (!RET_IS_OK(r))
+ return r;
+
+ assert (l->argc > 0);
+ /* only include if source name matches */
+ if (strcmp(package->source, l->argv[0]) != 0) {
+ return RET_NOTHING;
+ }
+ if (l->argc > 1) {
+ int i, c;
+
+ i = l->argc;
+ while (--i > 0) {
+ r = dpkgversions_cmp(package->sourceversion,
+ l->argv[i], &c);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (c == 0)
+ break;
+ }
+ /* there are source versions specified and
+ * the source version of this package differs */
+ if (i == 0) {
+ return RET_NOTHING;
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue choose_by_condition(struct package *package, void *privdata) {
+ term *condition = privdata;
+
+ return term_decidepackage(condition, package, package->target);
+}
+
+static retvalue choose_by_glob(struct package *package, void *privdata) {
+ const char *glob = privdata;
+
+ if (globmatch(package->name, glob))
+ return RET_OK;
+ else
+ return RET_NOTHING;
+}
+
+retvalue copy_from_file(struct distribution *into, component_t component, architecture_t architecture, packagetype_t packagetype, const char *filename, int argc, const char **argv) {
+ struct indexfile *i;
+ retvalue result, r;
+ struct target *target;
+ struct package_list list;
+ struct namelist d = {argc, argv, NULL, NULL};
+ struct package package;
+
+ assert (atom_defined(architecture));
+ assert (atom_defined(component));
+ assert (atom_defined(packagetype));
+
+ memset(&list, 0, sizeof(list));
+ target = distribution_gettarget(into,
+ component, architecture, packagetype);
+ if (target == NULL) {
+ if (!atomlist_in(&into->architectures, architecture)) {
+ fprintf(stderr,
+"Distribution '%s' does not contain architecture '%s!'\n",
+ into->codename,
+ atoms_architectures[architecture]);
+ }
+ if (packagetype == pt_ddeb) {
+ if (!atomlist_in(&into->ddebcomponents, component)) {
+ fprintf(stderr,
+"Distribution '%s' does not contain ddeb component '%s!'\n",
+ into->codename,
+ atoms_components[component]);
+ }
+ } else if (packagetype != pt_udeb) {
+ if (!atomlist_in(&into->components, component)) {
+ fprintf(stderr,
+"Distribution '%s' does not contain component '%s!'\n",
+ into->codename,
+ atoms_components[component]);
+ }
+ } else {
+ if (!atomlist_in(&into->udebcomponents, component)) {
+ fprintf(stderr,
+"Distribution '%s' does not contain udeb component '%s!'\n",
+ into->codename,
+ atoms_components[component]);
+ }
+ }
+ /* -A source needing -T dsc and vice versa already checked
+ * in main.c */
+ fprintf(stderr,
+"No matching part of distribution '%s' found!\n",
+ into->codename);
+ return RET_ERROR;
+ }
+ result = indexfile_open(&i, filename, c_none);
+ if (!RET_IS_OK(result))
+ return result;
+ result = RET_NOTHING;
+ setzero(struct package, &package);
+ while (indexfile_getnext(i, &package, target, false)) {
+ r = choose_by_name(&package, &d);
+ if (RET_IS_OK(r))
+ r = list_prepareadd(&list, target, NULL, &package);
+ package_done(&package);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ r = indexfile_close(i);
+ RET_ENDUPDATE(result, r);
+ if (RET_IS_OK(result))
+ result = packagelist_add(into, &list, NULL, false);
+ packagelist_done(&list);
+ return result;
+}
+
+typedef retvalue chooseaction(struct package *, void *);
+
+static retvalue restore_from_snapshot(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, chooseaction action, void *d) {
+ retvalue result, r;
+ struct package_list list;
+ struct target *target;
+ char *basedir;
+ enum compression compression;
+ struct distribution pseudo_from; // just stores the codename
+
+ basedir = calc_snapshotbasedir(into->codename, snapshotname);
+ if (FAILEDTOALLOC(basedir))
+ return RET_ERROR_OOM;
+
+ memset(&list, 0, sizeof(list));
+ result = RET_NOTHING;
+ for (target = into->targets ; target != NULL ;
+ target = target->next) {
+ struct package package;
+ char *filename;
+ struct indexfile *i;
+
+ if (!target_matches(target,
+ components, architectures, packagetypes))
+ continue;
+
+ /* we do not know what compressions where used back then
+ * and not even how the file was named, just look for
+ * how the file is named now and try all readable
+ * compressions */
+
+ compression = c_none;
+ filename = calc_dirconcat3(
+ basedir, target->relativedirectory,
+ target->exportmode->filename);
+ if (filename != NULL && !isregularfile(filename)) {
+ /* no uncompressed file found, try .gz */
+ free(filename);
+ compression = c_gzip;
+ filename = mprintf("%s/%s/%s.gz",
+ basedir, target->relativedirectory,
+ target->exportmode->filename);
+ }
+#ifdef HAVE_LIBBZ2
+ if (filename != NULL && !isregularfile(filename)) {
+ /* no uncompressed or .gz file found, try .bz2 */
+ free(filename);
+ compression = c_bzip2;
+ filename = mprintf("%s/%s/%s.bz2",
+ basedir, target->relativedirectory,
+ target->exportmode->filename);
+ }
+#endif
+ if (filename != NULL && !isregularfile(filename)) {
+ free(filename);
+ fprintf(stderr,
+"Could not find '%s/%s/%s' nor '%s/%s/%s.gz',\n"
+"ignoring that part of the snapshot.\n",
+ basedir, target->relativedirectory,
+ target->exportmode->filename,
+ basedir, target->relativedirectory,
+ target->exportmode->filename);
+ continue;
+ }
+ if (FAILEDTOALLOC(filename)) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ result = indexfile_open(&i, filename, compression);
+ if (!RET_IS_OK(result))
+ break;
+ setzero(struct package, &package);
+ while (indexfile_getnext(i, &package, target, false)) {
+ result = action(&package, d);
+ if (RET_IS_OK(result))
+ result = list_prepareadd(&list,
+ target, NULL, &package);
+ package_done(&package);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ r = indexfile_close(i);
+ RET_ENDUPDATE(result, r);
+ free(filename);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ free(basedir);
+ if (RET_WAS_ERROR(result))
+ return result;
+ memset(&pseudo_from, 0, sizeof(struct distribution));
+ pseudo_from.codename = (char*)snapshotname;
+ r = packagelist_add(into, &list, &pseudo_from, false);
+ packagelist_done(&list);
+ return r;
+}
+
+retvalue restore_by_name(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, int argc, const char **argv) {
+ struct namelist d = {argc, argv, NULL, NULL};
+ return restore_from_snapshot(into,
+ components, architectures, packagetypes,
+ snapshotname, choose_by_name, &d);
+}
+
+retvalue restore_by_source(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, int argc, const char **argv) {
+ struct namelist d = {argc, argv, NULL, NULL};
+ return restore_from_snapshot(into,
+ components, architectures, packagetypes,
+ snapshotname, choose_by_source, &d);
+}
+
+retvalue restore_by_formula(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, const char *filter) {
+ term *condition;
+ retvalue r;
+
+ r = term_compilefortargetdecision(&condition, filter);
+ if (!RET_IS_OK(r)) {
+ return r;
+ }
+ r = restore_from_snapshot(into,
+ components, architectures, packagetypes,
+ snapshotname, choose_by_condition, condition);
+ term_free(condition);
+ return r;
+}
+
+retvalue restore_by_glob(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, const char *glob) {
+ return restore_from_snapshot(into,
+ components, architectures, packagetypes,
+ snapshotname, choose_by_glob, (void*)glob);
+}
diff --git a/copypackages.h b/copypackages.h
new file mode 100644
index 0000000..0f4771f
--- /dev/null
+++ b/copypackages.h
@@ -0,0 +1,28 @@
+#ifndef REPREPRO_COPYPACKAGES_H
+#define REPREPRO_COPYPACKAGES_H
+
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+
+struct nameandversion {
+ const char *name;
+ const char /*@null@*/ *version;
+ bool warnedabout;
+ bool found;
+};
+
+retvalue copy_by_name(struct distribution * /*into*/, struct distribution * /*from*/, struct nameandversion *, const struct atomlist *, const struct atomlist *, const struct atomlist *, bool);
+retvalue copy_by_source(struct distribution * /*into*/, struct distribution * /*from*/, int, const char **, const struct atomlist *, const struct atomlist *, const struct atomlist *, bool);
+retvalue copy_by_formula(struct distribution * /*into*/, struct distribution * /*from*/, const char *formula, const struct atomlist *, const struct atomlist *, const struct atomlist *, bool);
+retvalue copy_by_glob(struct distribution * /*into*/, struct distribution * /*from*/, const char * /*glob*/, const struct atomlist *, const struct atomlist *, const struct atomlist *, bool);
+
+retvalue copy_from_file(struct distribution * /*into*/, component_t, architecture_t, packagetype_t, const char * /*filename*/ , int, const char **);
+
+/* note that snapshotname must live till logger_wait has run */
+retvalue restore_by_name(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, int, const char **);
+retvalue restore_by_source(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, int, const char **);
+retvalue restore_by_formula(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, const char *filter);
+retvalue restore_by_glob(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, const char * /*glob*/);
+
+#endif
diff --git a/database.c b/database.c
new file mode 100644
index 0000000..b01c8ed
--- /dev/null
+++ b/database.c
@@ -0,0 +1,2709 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2007,2008,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <assert.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <db.h>
+
+#include "globals.h"
+#include "error.h"
+#include "ignore.h"
+#include "strlist.h"
+#include "names.h"
+#include "database.h"
+#include "dirs.h"
+#include "filecntl.h"
+#include "files.h"
+#include "filelist.h"
+#include "reference.h"
+#include "tracking.h"
+#include "dpkgversions.h"
+#include "distribution.h"
+#include "database_p.h"
+#include "chunks.h"
+
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+#define LIBDB_VERSION_STRING "bdb" TOSTRING(DB_VERSION_MAJOR) "." TOSTRING(DB_VERSION_MINOR) "." TOSTRING(DB_VERSION_PATCH)
+#define CLEARDBT(dbt) { memset(&dbt, 0, sizeof(dbt)); }
+#define SETDBT(dbt, datastr) {const char *my = datastr; memset(&dbt, 0, sizeof(dbt)); dbt.data = (void *)my; dbt.size = strlen(my) + 1;}
+#define SETDBTl(dbt, datastr, datasize) {const char *my = datastr; memset(&dbt, 0, sizeof(dbt)); dbt.data = (void *)my; dbt.size = datasize;}
+
+static bool rdb_initialized, rdb_used, rdb_locked, rdb_verbose;
+static int rdb_dircreationdepth;
+static bool rdb_nopackages, rdb_readonly;
+static /*@null@*/ char *rdb_version, *rdb_lastsupportedversion,
+ *rdb_dbversion, *rdb_lastsupporteddbversion;
+static DB_ENV *rdb_env = NULL;
+
+struct table *rdb_checksums, *rdb_contents;
+struct table *rdb_references;
+
+struct opened_tables {
+ struct opened_tables *next;
+ const char *name;
+ const char *subname;
+};
+
+struct opened_tables *opened_tables = NULL;
+
+static void database_free(void) {
+ if (!rdb_initialized)
+ return;
+ free(rdb_version);
+ rdb_version = NULL;
+ free(rdb_lastsupportedversion);
+ rdb_lastsupportedversion = NULL;
+ free(rdb_dbversion);
+ rdb_dbversion = NULL;
+ free(rdb_lastsupporteddbversion);
+ rdb_lastsupporteddbversion = NULL;
+ rdb_initialized = false;
+}
+
+static inline char *dbfilename(const char *filename) {
+ return calc_dirconcat(global.dbdir, filename);
+}
+
+static retvalue database_openenv(void) {
+ int dbret;
+
+ dbret = db_env_create(&rdb_env, 0);
+ if (dbret != 0) {
+ fprintf(stderr, "db_env_create: %s\n", db_strerror(dbret));
+ return RET_ERROR;
+ }
+
+ // DB_INIT_LOCK is needed to open multiple databases in one file (e.g. for move command)
+ dbret = rdb_env->open(rdb_env, global.dbdir,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_INIT_LOCK, 0664);
+ if (dbret != 0) {
+ rdb_env->err(rdb_env, dbret, "environment open: %s", global.dbdir);
+ return RET_ERROR;
+ }
+
+ return RET_OK;
+}
+
+static void database_closeenv(void) {
+ int dbret;
+
+ dbret = rdb_env->close(rdb_env, 0);
+ if (dbret != 0) {
+ fprintf(stderr, "Error: DB_ENV->close: %s\n", db_strerror(dbret));
+ }
+ rdb_env = NULL;
+}
+
+/**********************/
+/* lock file handling */
+/**********************/
+
+static retvalue database_lock(size_t waitforlock) {
+ char *lockfile;
+ int fd;
+ retvalue r;
+ size_t tries = 0;
+
+ assert (!rdb_locked);
+ rdb_dircreationdepth = 0;
+ r = dir_create_needed(global.dbdir, &rdb_dircreationdepth);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ lockfile = dbfilename("lockfile");
+ if (FAILEDTOALLOC(lockfile))
+ return RET_ERROR_OOM;
+ fd = open(lockfile, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW|O_NOCTTY,
+ S_IRUSR|S_IWUSR);
+ while (fd < 0) {
+ int e = errno;
+ if (e == EEXIST) {
+ if (tries < waitforlock && ! interrupted()) {
+ unsigned int timetosleep = 10;
+ if (verbose >= 0)
+ printf(
+"Could not acquire lock: %s already exists!\nWaiting 10 seconds before trying again.\n",
+ lockfile);
+ while (timetosleep > 0)
+ timetosleep = sleep(timetosleep);
+ tries++;
+ fd = open(lockfile, O_WRONLY|O_CREAT|O_EXCL
+ |O_NOFOLLOW|O_NOCTTY,
+ S_IRUSR|S_IWUSR);
+ continue;
+
+ }
+ fprintf(stderr,
+"The lock file '%s' already exists. There might be another instance with the\n"
+"same database dir running. To avoid locking overhead, only one process\n"
+"can access the database at the same time. Do not delete the lock file unless\n"
+"you are sure no other version is still running!\n", lockfile);
+
+ } else
+ fprintf(stderr,
+"Error %d creating lock file '%s': %s!\n",
+ e, lockfile, strerror(e));
+ free(lockfile);
+ return RET_ERRNO(e);
+ }
+ // TODO: do some more locking of this file to avoid problems
+ // with the non-atomity of O_EXCL with nfs-filesystems...
+ if (close(fd) != 0) {
+ int e = errno;
+ fprintf(stderr,
+"(Late) Error %d creating lock file '%s': %s!\n",
+ e, lockfile, strerror(e));
+ (void)unlink(lockfile);
+ free(lockfile);
+ return RET_ERRNO(e);
+ }
+ free(lockfile);
+ rdb_locked = true;
+
+ r = database_openenv();
+ if (RET_WAS_ERROR(r)) {
+ (void)unlink(lockfile);
+ free(lockfile);
+ return r;
+ }
+ return RET_OK;
+}
+
+static void releaselock(void) {
+ char *lockfile;
+
+ assert (rdb_locked);
+
+ database_closeenv();
+ lockfile = dbfilename("lockfile");
+ if (lockfile == NULL)
+ return;
+ if (unlink(lockfile) != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d deleting lock file '%s': %s!\n",
+ e, lockfile, strerror(e));
+ (void)unlink(lockfile);
+ }
+ free(lockfile);
+ dir_remove_new(global.dbdir, rdb_dircreationdepth);
+ rdb_locked = false;
+}
+
+static retvalue writeversionfile(void);
+
+retvalue database_close(void) {
+ retvalue result = RET_OK, r;
+
+ if (rdb_references != NULL) {
+ r = table_close(rdb_references);
+ RET_UPDATE(result, r);
+ rdb_references = NULL;
+ }
+ if (rdb_checksums != NULL) {
+ r = table_close(rdb_checksums);
+ RET_UPDATE(result, r);
+ rdb_checksums = NULL;
+ }
+ if (rdb_contents != NULL) {
+ r = table_close(rdb_contents);
+ RET_UPDATE(result, r);
+ rdb_contents = NULL;
+ }
+ r = writeversionfile();
+ RET_UPDATE(result, r);
+ if (rdb_locked)
+ releaselock();
+ database_free();
+ return result;
+}
+
+static retvalue database_hasdatabasefile(const char *filename, /*@out@*/bool *exists_p) {
+ char *fullfilename;
+
+ fullfilename = dbfilename(filename);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+ *exists_p = isregularfile(fullfilename);
+ free(fullfilename);
+ return RET_OK;
+}
+
+enum database_type {
+ dbt_QUERY,
+ dbt_BTREE, dbt_BTREEDUP, dbt_BTREEPAIRS, dbt_BTREEVERSIONS,
+ dbt_HASH,
+ dbt_COUNT /* must be last */
+};
+static const uint32_t types[dbt_COUNT] = {
+ DB_UNKNOWN,
+ DB_BTREE, DB_BTREE, DB_BTREE, DB_BTREE,
+ DB_HASH
+};
+
+static int debianversioncompare(UNUSED(DB *db), const DBT *a, const DBT *b);
+#if DB_VERSION_MAJOR >= 6
+static int paireddatacompare(UNUSED(DB *db), const DBT *a, const DBT *b, size_t *locp);
+#else
+static int paireddatacompare(UNUSED(DB *db), const DBT *a, const DBT *b);
+#endif
+
+static retvalue database_opentable(const char *filename, /*@null@*/const char *subtable, enum database_type type, uint32_t flags, /*@out@*/DB **result) {
+ DB *table;
+ int dbret;
+
+ dbret = db_create(&table, rdb_env, 0);
+ if (dbret != 0) {
+ fprintf(stderr, "db_create: %s\n", db_strerror(dbret));
+ return RET_DBERR(dbret);
+ }
+ if (type == dbt_BTREEPAIRS || type == dbt_BTREEVERSIONS) {
+ dbret = table->set_flags(table, DB_DUPSORT);
+ if (dbret != 0) {
+ table->err(table, dbret, "db_set_flags(DB_DUPSORT):");
+ (void)table->close(table, 0);
+ return RET_DBERR(dbret);
+ }
+ } else if (type == dbt_BTREEDUP) {
+ dbret = table->set_flags(table, DB_DUP);
+ if (dbret != 0) {
+ table->err(table, dbret, "db_set_flags(DB_DUP):");
+ (void)table->close(table, 0);
+ return RET_DBERR(dbret);
+ }
+ }
+ if (type == dbt_BTREEPAIRS) {
+ dbret = table->set_dup_compare(table, paireddatacompare);
+ if (dbret != 0) {
+ table->err(table, dbret, "db_set_dup_compare:");
+ (void)table->close(table, 0);
+ return RET_DBERR(dbret);
+ }
+ }
+ if (type == dbt_BTREEVERSIONS) {
+ dbret = table->set_dup_compare(table, debianversioncompare);
+ if (dbret != 0) {
+ table->err(table, dbret, "db_set_dup_compare:");
+ (void)table->close(table, 0);
+ return RET_DBERR(dbret);
+ }
+ }
+
+#if DB_VERSION_MAJOR == 5 || DB_VERSION_MAJOR == 6
+#define DB_OPEN(database, filename, name, type, flags) \
+ database->open(database, NULL, filename, name, type, flags, 0664)
+#else
+#if DB_VERSION_MAJOR == 4
+#define DB_OPEN(database, filename, name, type, flags) \
+ database->open(database, NULL, filename, name, type, flags, 0664)
+#else
+#if DB_VERSION_MAJOR == 3
+#define DB_OPEN(database, filename, name, type, flags) \
+ database->open(database, filename, name, type, flags, 0664)
+#else
+#error Unexpected DB_VERSION_MAJOR!
+#endif
+#endif
+#endif
+ dbret = DB_OPEN(table, filename, subtable, types[type], flags);
+ if (dbret == ENOENT && !ISSET(flags, DB_CREATE)) {
+ (void)table->close(table, 0);
+ return RET_NOTHING;
+ }
+ if (dbret != 0) {
+ if (subtable != NULL)
+ table->err(table, dbret, "db_open(%s:%s)[%d]",
+ filename, subtable, dbret);
+ else
+ table->err(table, dbret, "db_open(%s)[%d]",
+ filename, dbret);
+ (void)table->close(table, 0);
+ return RET_DBERR(dbret);
+ }
+ *result = table;
+ return RET_OK;
+}
+
+retvalue database_listsubtables(const char *filename, struct strlist *result) {
+ DB *table;
+ DBC *cursor;
+ DBT key, data;
+ int dbret;
+ retvalue ret, r;
+ struct strlist ids;
+
+ r = database_opentable(filename, NULL,
+ dbt_QUERY, DB_RDONLY, &table);
+ if (!RET_IS_OK(r))
+ return r;
+
+ cursor = NULL;
+ if ((dbret = table->cursor(table, NULL, &cursor, 0)) != 0) {
+ table->err(table, dbret, "cursor(%s):", filename);
+ (void)table->close(table, 0);
+ return RET_ERROR;
+ }
+ CLEARDBT(key);
+ CLEARDBT(data);
+
+ strlist_init(&ids);
+
+ ret = RET_NOTHING;
+ while ((dbret=cursor->c_get(cursor, &key, &data, DB_NEXT)) == 0) {
+ char *identifier = strndup(key.data, key.size);
+ if (FAILEDTOALLOC(identifier)) {
+ (void)table->close(table, 0);
+ strlist_done(&ids);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_add(&ids, identifier);
+ if (RET_WAS_ERROR(r)) {
+ (void)table->close(table, 0);
+ strlist_done(&ids);
+ return r;
+ }
+ ret = RET_OK;
+ CLEARDBT(key);
+ CLEARDBT(data);
+ }
+
+ if (dbret != 0 && dbret != DB_NOTFOUND) {
+ table->err(table, dbret, "c_get(%s):", filename);
+ (void)table->close(table, 0);
+ strlist_done(&ids);
+ return RET_DBERR(dbret);
+ }
+ if ((dbret = cursor->c_close(cursor)) != 0) {
+ table->err(table, dbret, "c_close(%s):", filename);
+ (void)table->close(table, 0);
+ strlist_done(&ids);
+ return RET_DBERR(dbret);
+ }
+
+ dbret = table->close(table, 0);
+ if (dbret != 0) {
+ table->err(table, dbret, "close(%s):", filename);
+ strlist_done(&ids);
+ return RET_DBERR(dbret);
+ } else {
+ strlist_move(result, &ids);
+ return ret;
+ }
+}
+
+retvalue database_dropsubtable(const char *table, const char *subtable) {
+ char *filename;
+ DB *db;
+ int dbret;
+
+ filename = dbfilename(table);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+
+ if ((dbret = db_create(&db, NULL, 0)) != 0) {
+ fprintf(stderr, "db_create: %s %s\n",
+ filename, db_strerror(dbret));
+ free(filename);
+ return RET_DBERR(dbret);
+ }
+ dbret = db->remove(db, filename, subtable, 0);
+ if (dbret == ENOENT) {
+ free(filename);
+ return RET_NOTHING;
+ }
+ if (dbret != 0) {
+ fprintf(stderr, "Error removing '%s' from %s!\n",
+ subtable, filename);
+ free(filename);
+ return RET_DBERR(dbret);
+ }
+
+ free(filename);
+ return RET_OK;
+}
+
+static inline bool targetisdefined(const char *identifier, struct distribution *distributions) {
+ struct distribution *d;
+ struct target *t;
+
+ for (d = distributions ; d != NULL ; d = d->next) {
+ for (t = d->targets; t != NULL ; t = t->next) {
+ if (strcmp(t->identifier, identifier) == 0) {
+ t->existed = true;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static retvalue warnidentifiers(const struct strlist *identifiers, struct distribution *distributions, bool readonly) {
+ struct distribution *d;
+ struct target *t;
+ const char *identifier;
+ retvalue r;
+ int i;
+
+ for (i = 0; i < identifiers->count ; i++) {
+ identifier = identifiers->values[i];
+
+ if (targetisdefined(identifier, distributions))
+ continue;
+
+ fprintf(stderr,
+"Error: packages database contains unused '%s' database.\n", identifier);
+ if (ignored[IGN_undefinedtarget] == 0) {
+ (void)fputs(
+"This usually means you removed some component, architecture or even\n"
+"a whole distribution from conf/distributions.\n"
+"In that case you most likely want to call reprepro clearvanished to get rid\n"
+"of the databases belonging to those removed parts.\n"
+"(Another reason to get this error is using conf/ and db/ directories\n"
+" belonging to different reprepro repositories).\n",
+ stderr);
+ }
+ if (IGNORABLE(undefinedtarget)) {
+ (void)fputs(
+"Ignoring as --ignore=undefinedtarget given.\n",
+ stderr);
+ ignored[IGN_undefinedtarget]++;
+ continue;
+ }
+
+ (void)fputs(
+"To ignore use --ignore=undefinedtarget.\n",
+ stderr);
+ return RET_ERROR;
+ }
+ if (readonly)
+ return RET_OK;
+ for (d = distributions ; d != NULL ; d = d->next) {
+ bool architecture_existed[d->architectures.count];
+ bool have_old = false;
+
+ /* check for new architectures */
+ memset(architecture_existed, 0, sizeof(architecture_existed));
+
+ for (t = d->targets; t != NULL ; t = t->next) {
+ int o;
+
+ if (!t->existed)
+ continue;
+
+ o = atomlist_ofs(&d->architectures,
+ t->architecture);
+ assert (o >= 0);
+ if (o >= 0) {
+ architecture_existed[o] = true;
+ /* only warn about new ones if there
+ * is at least one old one, otherwise
+ * it's just a new distribution */
+ have_old = true;
+ }
+ }
+ for (i = 0 ; have_old && i < d->architectures.count ; i++) {
+ architecture_t a;
+
+ if (architecture_existed[i])
+ continue;
+
+ a = d->architectures.atoms[i];
+
+ fprintf(stderr,
+"New architecture '%s' in '%s'. Perhaps you want to call\n"
+"reprepro flood '%s' '%s'\n"
+"to populate it with architecture 'all' packages from other architectures.\n",
+ atoms_architectures[a], d->codename,
+ d->codename, atoms_architectures[a]);
+ }
+
+ /* create databases, so we know next time what is new */
+ for (t = d->targets; t != NULL ; t = t->next) {
+ if (t->existed)
+ continue;
+ /* create database now, to test it can be created
+ * early, and to know when new architectures
+ * arrive in the future. */
+ r = target_initpackagesdb(t, READWRITE);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = target_closepackagesdb(t);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue warnunusedtracking(const struct strlist *codenames, const struct distribution *distributions) {
+ const char *codename;
+ const struct distribution *d;
+ int i;
+
+ for (i = 0; i < codenames->count ; i++) {
+ codename = codenames->values[i];
+
+ d = distributions;
+ while (d != NULL && strcmp(d->codename, codename) != 0)
+ d = d->next;
+ if (d != NULL && d->tracking != dt_NONE)
+ continue;
+
+ fprintf(stderr,
+"Error: tracking database contains unused '%s' database.\n", codename);
+ if (ignored[IGN_undefinedtracking] == 0) {
+ if (d == NULL)
+ (void)fputs(
+"This either means you removed a distribution from the distributions config\n"
+"file without calling clearvanished (or at least removealltracks), you\n"
+"experienced a bug in retrack in versions < 3.0.0, you found a new bug or your\n"
+"config does not belong to this database.\n",
+ stderr);
+ else
+ (void)fputs(
+"This either means you removed the Tracking: options from this distribution without\n"
+"calling removealltracks for it, or your config does not belong to this database.\n",
+ stderr);
+ }
+ if (IGNORABLE(undefinedtracking)) {
+ (void)fputs(
+"Ignoring as --ignore=undefinedtracking given.\n",
+ stderr);
+ ignored[IGN_undefinedtracking]++;
+ continue;
+ }
+
+ (void)fputs("To ignore use --ignore=undefinedtracking.\n",
+ stderr);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue readline(/*@out@*/char **result, FILE *f, const char *versionfilename) {
+ char buffer[21];
+ size_t l;
+
+ if (fgets(buffer, 20, f) == NULL) {
+ int e = errno;
+ if (e == 0) {
+ fprintf(stderr,
+"Error reading '%s': unexpected empty file\n",
+ versionfilename);
+ return RET_ERROR;
+ } else {
+ fprintf(stderr, "Error reading '%s': %s(errno is %d)\n",
+ versionfilename, strerror(e), e);
+ return RET_ERRNO(e);
+ }
+ }
+ l = strlen(buffer);
+ while (l > 0 && (buffer[l-1] == '\r' || buffer[l-1] == '\n')) {
+ buffer[--l] = '\0';
+ }
+ if (l == 0) {
+ fprintf(stderr, "Error reading '%s': unexpected empty line.\n",
+ versionfilename);
+ return RET_ERROR;
+ }
+ *result = strdup(buffer);
+ if (FAILEDTOALLOC(*result))
+ return RET_ERROR_OOM;
+ return RET_OK;
+}
+
+static retvalue readversionfile(bool nopackagesyet) {
+ char *versionfilename;
+ FILE *f;
+ retvalue r;
+ int c;
+
+ versionfilename = dbfilename("version");
+ if (FAILEDTOALLOC(versionfilename))
+ return RET_ERROR_OOM;
+ f = fopen(versionfilename, "r");
+ if (f == NULL) {
+ int e = errno;
+
+ if (e != ENOENT) {
+ fprintf(stderr, "Error opening '%s': %s(errno is %d)\n",
+ versionfilename, strerror(e), e);
+ free(versionfilename);
+ return RET_ERRNO(e);
+ }
+ free(versionfilename);
+ if (nopackagesyet) {
+ /* set to default for new packages.db files: */
+ rdb_version = strdup(VERSION);
+ if (FAILEDTOALLOC(rdb_version))
+ return RET_ERROR_OOM;
+ } else
+ rdb_version = NULL;
+ rdb_lastsupportedversion = NULL;
+ rdb_dbversion = NULL;
+ rdb_lastsupporteddbversion = NULL;
+ return RET_NOTHING;
+ }
+ /* first line is the version creating this database */
+ r = readline(&rdb_version, f, versionfilename);
+ if (RET_WAS_ERROR(r)) {
+ (void)fclose(f);
+ free(versionfilename);
+ return r;
+ }
+ /* second line says which versions of reprepro will be able to cope
+ * with this database */
+ r = readline(&rdb_lastsupportedversion, f, versionfilename);
+ if (RET_WAS_ERROR(r)) {
+ (void)fclose(f);
+ free(versionfilename);
+ return r;
+ }
+ /* next line is the version of the underlying database library */
+ r = readline(&rdb_dbversion, f, versionfilename);
+ if (RET_WAS_ERROR(r)) {
+ (void)fclose(f);
+ free(versionfilename);
+ return r;
+ }
+ /* and then the minimum version of this library needed. */
+ r = readline(&rdb_lastsupporteddbversion, f, versionfilename);
+ if (RET_WAS_ERROR(r)) {
+ (void)fclose(f);
+ free(versionfilename);
+ return r;
+ }
+ (void)fclose(f);
+ free(versionfilename);
+
+ /* ensure we can understand it */
+
+ r = dpkgversions_cmp(VERSION, rdb_lastsupportedversion, &c);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (c < 0) {
+ fprintf(stderr,
+"According to %s/version this database was created with a future version\n"
+"and uses features this version cannot understand. Aborting...\n",
+ global.dbdir);
+ return RET_ERROR;
+ }
+
+ /* ensure it's a libdb database: */
+
+ if (strncmp(rdb_dbversion, "bdb", 3) != 0) {
+ fprintf(stderr,
+"According to %s/version this database was created with a yet unsupported\n"
+"database library. Aborting...\n",
+ global.dbdir);
+ return RET_ERROR;
+ }
+ if (strncmp(rdb_lastsupporteddbversion, "bdb", 3) != 0) {
+ fprintf(stderr,
+"According to %s/version this database was created with a yet unsupported\n"
+"database library. Aborting...\n",
+ global.dbdir);
+ return RET_ERROR;
+ }
+ r = dpkgversions_cmp(LIBDB_VERSION_STRING,
+ rdb_lastsupporteddbversion, &c);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (c < 0) {
+ fprintf(stderr,
+"According to %s/version this database was created with a future version\n"
+"%s of libdb. The libdb version this binary is linked against cannot yet\n"
+"handle this format. Aborting...\n",
+ global.dbdir, rdb_dbversion + 3);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue writeversionfile(void) {
+ char *versionfilename, *finalversionfilename;
+ FILE *f;
+ int i, e;
+
+ versionfilename = dbfilename("version.new");
+ if (FAILEDTOALLOC(versionfilename))
+ return RET_ERROR_OOM;
+ f = fopen(versionfilename, "w");
+ if (f == NULL) {
+ e = errno;
+ fprintf(stderr, "Error creating '%s': %s(errno is %d)\n",
+ versionfilename, strerror(e), e);
+ free(versionfilename);
+ return RET_ERRNO(e);
+ }
+ if (rdb_version == NULL)
+ (void)fputs("0\n", f);
+ else {
+ (void)fputs(rdb_version, f);
+ (void)fputc('\n', f);
+ }
+ if (rdb_lastsupportedversion == NULL) {
+ (void)fputs("3.3.0\n", f);
+ } else {
+ int c;
+ retvalue r;
+
+ r = dpkgversions_cmp(rdb_lastsupportedversion,
+ "3.3.0", &c);
+ if (!RET_IS_OK(r) || c < 0)
+ (void)fputs("3.3.0\n", f);
+ else {
+ (void)fputs(rdb_lastsupportedversion, f);
+ (void)fputc('\n', f);
+ }
+ }
+ if (rdb_dbversion == NULL)
+ fprintf(f, "bdb%d.%d.%d\n", DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH);
+ else {
+ (void)fputs(rdb_dbversion, f);
+ (void)fputc('\n', f);
+ }
+ if (rdb_lastsupporteddbversion == NULL)
+ fprintf(f, "bdb%d.%d.0\n", DB_VERSION_MAJOR, DB_VERSION_MINOR);
+ else {
+ (void)fputs(rdb_lastsupporteddbversion, f);
+ (void)fputc('\n', f);
+ }
+
+ e = ferror(f);
+
+ if (e != 0) {
+ fprintf(stderr, "Error writing '%s': %s(errno is %d)\n",
+ versionfilename, strerror(e), e);
+ (void)fclose(f);
+ unlink(versionfilename);
+ free(versionfilename);
+ return RET_ERRNO(e);
+ }
+ if (fclose(f) != 0) {
+ e = errno;
+ fprintf(stderr, "Error writing '%s': %s(errno is %d)\n",
+ versionfilename, strerror(e), e);
+ unlink(versionfilename);
+ free(versionfilename);
+ return RET_ERRNO(e);
+ }
+ finalversionfilename = dbfilename("version");
+ if (FAILEDTOALLOC(finalversionfilename)) {
+ unlink(versionfilename);
+ free(versionfilename);
+ return RET_ERROR_OOM;
+ }
+
+ i = rename(versionfilename, finalversionfilename);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d moving '%s' to '%s': %s\n",
+ e, versionfilename, finalversionfilename,
+ strerror(e));
+ (void)unlink(versionfilename);
+ free(versionfilename);
+ free(finalversionfilename);
+ return RET_ERRNO(e);
+ }
+ free(finalversionfilename);
+ free(versionfilename);
+ return RET_OK;
+}
+
+static retvalue createnewdatabase(struct distribution *distributions) {
+ struct distribution *d;
+ struct target *t;
+ retvalue result = RET_NOTHING, r;
+
+ for (d = distributions ; d != NULL ; d = d->next) {
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ r = target_initpackagesdb(t, READWRITE);
+ RET_UPDATE(result, r);
+ if (RET_IS_OK(r)) {
+ r = target_closepackagesdb(t);
+ RET_UPDATE(result, r);
+ }
+ }
+ }
+ r = writeversionfile();
+ RET_UPDATE(result, r);
+ return result;
+}
+
+/* Initialize a database.
+ * - if not fast, make all kind of checks for consistency (TO BE IMPLEMENTED),
+ * - if readonly, do not create but return with RET_NOTHING
+ * - lock database, waiting a given amount of time if already locked
+ */
+retvalue database_create(struct distribution *alldistributions, bool fast, bool nopackages, bool allowunused, bool readonly, size_t waitforlock, bool verbosedb) {
+ retvalue r;
+ bool packagesfileexists, trackingfileexists, nopackagesyet;
+
+ if (rdb_initialized || rdb_used) {
+ fputs("Internal Error: database initialized a 2nd time!\n",
+ stderr);
+ return RET_ERROR_INTERNAL;
+ }
+
+ if (readonly && !isdir(global.dbdir)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"Exiting without doing anything, as there is no database yet that could result in other actions.\n");
+ return RET_NOTHING;
+ }
+
+ rdb_initialized = true;
+ rdb_used = true;
+
+ r = database_lock(waitforlock);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r)) {
+ database_free();
+ return r;
+ }
+ rdb_readonly = readonly;
+ rdb_verbose = verbosedb;
+
+ r = database_hasdatabasefile("packages.db", &packagesfileexists);
+ if (RET_WAS_ERROR(r)) {
+ releaselock();
+ database_free();
+ return r;
+ }
+ r = database_hasdatabasefile("tracking.db", &trackingfileexists);
+ if (RET_WAS_ERROR(r)) {
+ releaselock();
+ database_free();
+ return r;
+ }
+
+ nopackagesyet = !packagesfileexists && !trackingfileexists;
+
+ r = readversionfile(nopackagesyet);
+ if (RET_WAS_ERROR(r)) {
+ releaselock();
+ database_free();
+ return r;
+ }
+
+ if (nopackages) {
+ rdb_nopackages = true;
+ return RET_OK;
+ }
+
+ if (nopackagesyet) {
+ // TODO: handle readonly, but only once packages files may no
+ // longer be generated when it is active...
+
+ r = createnewdatabase(alldistributions);
+ if (RET_WAS_ERROR(r)) {
+ database_close();
+ return r;
+ }
+ }
+
+ /* after this point we should call database_close,
+ * as other stuff was handled,
+ * so writing the version file cannot harm (and not doing so could) */
+
+ if (!allowunused && !fast && packagesfileexists) {
+ struct strlist identifiers;
+
+ r = database_listpackages(&identifiers);
+ if (RET_WAS_ERROR(r)) {
+ database_close();
+ return r;
+ }
+ if (r == RET_NOTHING)
+ strlist_init(&identifiers);
+ r = warnidentifiers(&identifiers,
+ alldistributions, readonly);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&identifiers);
+ database_close();
+ return r;
+ }
+ strlist_done(&identifiers);
+ }
+ if (!allowunused && !fast && trackingfileexists) {
+ struct strlist codenames;
+
+ r = tracking_listdistributions(&codenames);
+ if (RET_WAS_ERROR(r)) {
+ database_close();
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ r = warnunusedtracking(&codenames, alldistributions);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&codenames);
+ database_close();
+ return r;
+ }
+ strlist_done(&codenames);
+ }
+ }
+
+ return RET_OK;
+}
+
+/****************************************************************************
+ * Stuff string parts *
+ ****************************************************************************/
+
+static const char databaseerror[] = "Internal error of the underlying BerkeleyDB database:\n";
+
+/****************************************************************************
+ * Stuff to handle data in tables *
+ ****************************************************************************
+ There is nothing that cannot be solved by another layer of indirection, except
+ too many levels of indirection. (Source forgotten) */
+
+struct cursor {
+ DBC *cursor;
+ uint32_t flags;
+ retvalue r;
+};
+
+struct table {
+ char *name, *subname;
+ DB *berkeleydb;
+ DB *sec_berkeleydb;
+ bool readonly, verbose;
+ uint32_t flags;
+};
+
+static void table_printerror(struct table *table, int dbret, const char *action) {
+ char *error_msg;
+
+ switch (dbret) {
+ case DB_MALFORMED_KEY:
+ error_msg = "DB_MALFORMED_KEY: Primary key does not contain the separator '|'.";
+ break;
+ case RET_ERROR_OOM:
+ error_msg = "RET_ERROR_OOM: Out of memory.";
+ break;
+ default:
+ error_msg = NULL;
+ break;
+ }
+
+ if (error_msg == NULL) {
+ if (table->subname != NULL)
+ table->berkeleydb->err(table->berkeleydb, dbret,
+ "%sWithin %s subtable %s at %s",
+ databaseerror, table->name, table->subname,
+ action);
+ else
+ table->berkeleydb->err(table->berkeleydb, dbret,
+ "%sWithin %s at %s",
+ databaseerror, table->name, action);
+ } else {
+ if (table->subname != NULL)
+ table->berkeleydb->errx(table->berkeleydb,
+ "%sWithin %s subtable %s at %s: %s",
+ databaseerror, table->name, table->subname,
+ action, error_msg);
+ else
+ table->berkeleydb->errx(table->berkeleydb,
+ "%sWithin %s at %s: %s",
+ databaseerror, table->name, action, error_msg);
+ }
+}
+
+static void print_opened_tables(FILE *stream) {
+ if (opened_tables == NULL) {
+ fprintf(stream, "No tables are opened.\n");
+ } else {
+ fprintf(stream, "Opened tables:\n");
+ for (struct opened_tables *iter = opened_tables; iter != NULL; iter = iter->next) {
+ fprintf(stream, " * %s - '%s'\n", iter->name, iter->subname);
+ }
+ }
+}
+
+retvalue table_close(struct table *table) {
+ struct opened_tables *prev = NULL;
+ int dbret;
+ retvalue result = RET_OK;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: table_close(table.name=%s, table.subname=%s) called.\n",
+ table == NULL ? NULL : table->name, table == NULL ? NULL : table->subname);
+ if (table == NULL)
+ return RET_NOTHING;
+ if (table->sec_berkeleydb != NULL) {
+ dbret = table->sec_berkeleydb->close(table->sec_berkeleydb, 0);
+ if (dbret != 0) {
+ fprintf(stderr, "db_sec_close(%s, %s): %s\n",
+ table->name, table->subname,
+ db_strerror(dbret));
+ result = RET_DBERR(dbret);
+ }
+ }
+ if (table->berkeleydb == NULL) {
+ assert (table->readonly);
+ dbret = 0;
+ } else
+ dbret = table->berkeleydb->close(table->berkeleydb, 0);
+ if (dbret != 0) {
+ fprintf(stderr, "db_close(%s, %s): %s\n",
+ table->name, table->subname,
+ db_strerror(dbret));
+ result = RET_DBERR(dbret);
+ }
+
+ for (struct opened_tables *iter = opened_tables; iter != NULL; iter = iter->next) {
+ if(strcmp2(iter->name, table->name) == 0 && strcmp2(iter->subname, table->subname) == 0) {
+ if (prev == NULL) {
+ opened_tables = iter->next;
+ } else {
+ prev->next = iter->next;
+ }
+ free(iter);
+ break;
+ }
+ prev = iter;
+ }
+
+ if (verbose >= 25)
+ print_opened_tables(stderr);
+
+ free(table->name);
+ free(table->subname);
+ free(table);
+ return result;
+}
+
+retvalue table_getrecord(struct table *table, bool secondary, const char *key, char **data_p, size_t *datalen_p) {
+ int dbret;
+ DBT Key, Data;
+ DB *db;
+
+ assert (table != NULL);
+ if (table->berkeleydb == NULL) {
+ assert (table->readonly);
+ return RET_NOTHING;
+ }
+
+ SETDBT(Key, key);
+ CLEARDBT(Data);
+ Data.flags = DB_DBT_MALLOC;
+
+ if (secondary)
+ db = table->sec_berkeleydb;
+ else
+ db = table->berkeleydb;
+ dbret = db->get(db, NULL, &Key, &Data, 0);
+ // TODO: find out what error code means out of memory...
+ if (dbret == DB_NOTFOUND)
+ return RET_NOTHING;
+ if (dbret != 0) {
+ table_printerror(table, dbret, "get");
+ return RET_DBERR(dbret);
+ }
+ if (FAILEDTOALLOC(Data.data))
+ return RET_ERROR_OOM;
+ if (Data.size <= 0 ||
+ ((const char*)Data.data)[Data.size-1] != '\0') {
+ if (table->subname != NULL)
+ fprintf(stderr,
+"Database %s(%s) returned corrupted (not null-terminated) data!\n",
+ table->name, table->subname);
+ else
+ fprintf(stderr,
+"Database %s returned corrupted (not null-terminated) data!\n",
+ table->name);
+ free(Data.data);
+ return RET_ERROR;
+ }
+ *data_p = Data.data;
+ if (datalen_p != NULL)
+ *datalen_p = Data.size-1;
+ return RET_OK;
+}
+
+retvalue table_getpair(struct table *table, const char *key, const char *value, /*@out@*/const char **data_p, /*@out@*/size_t *datalen_p) {
+ int dbret;
+ DBT Key, Data;
+ size_t valuelen = strlen(value);
+
+ assert (table != NULL);
+ if (table->berkeleydb == NULL) {
+ assert (table->readonly);
+ return RET_NOTHING;
+ }
+
+ SETDBT(Key, key);
+ SETDBTl(Data, value, valuelen + 1);
+
+ dbret = table->berkeleydb->get(table->berkeleydb, NULL,
+ &Key, &Data, DB_GET_BOTH);
+ if (dbret == DB_NOTFOUND || dbret == DB_KEYEMPTY)
+ return RET_NOTHING;
+ if (dbret != 0) {
+ table_printerror(table, dbret, "get(BOTH)");
+ return RET_DBERR(dbret);
+ }
+ if (FAILEDTOALLOC(Data.data))
+ return RET_ERROR_OOM;
+ if (Data.size < valuelen + 2 ||
+ ((const char*)Data.data)[Data.size-1] != '\0') {
+ if (table->subname != NULL)
+ fprintf(stderr,
+"Database %s(%s) returned corrupted (not paired) data!",
+ table->name, table->subname);
+ else
+ fprintf(stderr,
+"Database %s returned corrupted (not paired) data!",
+ table->name);
+ return RET_ERROR;
+ }
+ *data_p = ((const char*)Data.data) + valuelen + 1;
+ *datalen_p = Data.size - valuelen - 2;
+ return RET_OK;
+}
+
+retvalue table_gettemprecord(struct table *table, const char *key, const char **data_p, size_t *datalen_p) {
+ int dbret;
+ DBT Key, Data;
+
+ assert (table != NULL);
+ if (table->berkeleydb == NULL) {
+ assert (table->readonly);
+ return RET_NOTHING;
+ }
+
+ SETDBT(Key, key);
+ CLEARDBT(Data);
+
+ dbret = table->berkeleydb->get(table->berkeleydb, NULL,
+ &Key, &Data, 0);
+ // TODO: find out what error code means out of memory...
+ if (dbret == DB_NOTFOUND)
+ return RET_NOTHING;
+ if (dbret != 0) {
+ table_printerror(table, dbret, "get");
+ return RET_DBERR(dbret);
+ }
+ if (FAILEDTOALLOC(Data.data))
+ return RET_ERROR_OOM;
+ if (data_p == NULL) {
+ assert (datalen_p == NULL);
+ return RET_OK;
+ }
+ if (Data.size <= 0 ||
+ ((const char*)Data.data)[Data.size-1] != '\0') {
+ if (table->subname != NULL)
+ fprintf(stderr,
+"Database %s(%s) returned corrupted (not null-terminated) data!\n",
+ table->name, table->subname);
+ else
+ fprintf(stderr,
+"Database %s returned corrupted (not null-terminated) data!\n",
+ table->name);
+ return RET_ERROR;
+ }
+ *data_p = Data.data;
+ if (datalen_p != NULL)
+ *datalen_p = Data.size - 1;
+ return RET_OK;
+}
+
+retvalue table_checkrecord(struct table *table, const char *key, const char *data) {
+ int dbret;
+ DBT Key, Data;
+ DBC *cursor;
+ retvalue r;
+
+ SETDBT(Key, key);
+ SETDBT(Data, data);
+ dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor, 0);
+ if (dbret != 0) {
+ table_printerror(table, dbret, "cursor");
+ return RET_DBERR(dbret);
+ }
+ dbret=cursor->c_get(cursor, &Key, &Data, DB_GET_BOTH);
+ if (dbret == 0) {
+ r = RET_OK;
+ } else if (dbret == DB_NOTFOUND) {
+ r = RET_NOTHING;
+ } else {
+ table_printerror(table, dbret, "c_get");
+ (void)cursor->c_close(cursor);
+ return RET_DBERR(dbret);
+ }
+ dbret = cursor->c_close(cursor);
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_close");
+ return RET_DBERR(dbret);
+ }
+ return r;
+}
+
+retvalue table_removerecord(struct table *table, const char *key, const char *data) {
+ int dbret;
+ DBT Key, Data;
+ DBC *cursor;
+ retvalue r;
+
+ SETDBT(Key, key);
+ SETDBT(Data, data);
+ dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor, 0);
+ if (dbret != 0) {
+ table_printerror(table, dbret, "cursor");
+ return RET_DBERR(dbret);
+ }
+ dbret=cursor->c_get(cursor, &Key, &Data, DB_GET_BOTH);
+
+ if (dbret == 0)
+ dbret = cursor->c_del(cursor, 0);
+
+ if (dbret == 0) {
+ r = RET_OK;
+ } else if (dbret == DB_NOTFOUND) {
+ r = RET_NOTHING;
+ } else {
+ table_printerror(table, dbret, "c_get");
+ (void)cursor->c_close(cursor);
+ return RET_DBERR(dbret);
+ }
+ dbret = cursor->c_close(cursor);
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_close");
+ return RET_DBERR(dbret);
+ }
+ return r;
+}
+
+bool table_recordexists(struct table *table, const char *key) {
+ retvalue r;
+
+ r = table_gettemprecord(table, key, NULL, NULL);
+ return RET_IS_OK(r);
+}
+
+retvalue table_addrecord(struct table *table, const char *key, const char *data, size_t datalen, bool ignoredups) {
+ int dbret;
+ DBT Key, Data;
+
+ assert (table != NULL);
+ assert (!table->readonly && table->berkeleydb != NULL);
+
+ SETDBT(Key, key);
+ SETDBTl(Data, data, datalen + 1);
+ dbret = table->berkeleydb->put(table->berkeleydb, NULL,
+ &Key, &Data, ISSET(table->flags, DB_DUPSORT) ? DB_NODUPDATA : 0);
+ if (dbret != 0 && !(ignoredups && dbret == DB_KEYEXIST)) {
+ table_printerror(table, dbret, "put");
+ return RET_DBERR(dbret);
+ }
+ if (table->verbose) {
+ if (table->subname != NULL)
+ printf("db: '%s' added to %s(%s).\n",
+ key, table->name, table->subname);
+ else
+ printf("db: '%s' added to %s.\n",
+ key, table->name);
+ }
+ return RET_OK;
+}
+
+retvalue table_adduniqsizedrecord(struct table *table, const char *key, const char *data, size_t data_size, bool allowoverwrite, bool nooverwrite) {
+ int dbret;
+ DBT Key, Data;
+
+ assert (table != NULL);
+ assert (!table->readonly && table->berkeleydb != NULL);
+ assert (data_size > 0 && data[data_size-1] == '\0');
+
+ SETDBT(Key, key);
+ SETDBTl(Data, data, data_size);
+ dbret = table->berkeleydb->put(table->berkeleydb, NULL,
+ &Key, &Data, allowoverwrite?0:DB_NOOVERWRITE);
+ if (nooverwrite && dbret == DB_KEYEXIST) {
+ /* if nooverwrite is set, do nothing and ignore: */
+ return RET_NOTHING;
+ }
+ if (dbret != 0) {
+ table_printerror(table, dbret, "put(uniq)");
+ return RET_DBERR(dbret);
+ }
+ if (table->verbose) {
+ if (table->subname != NULL)
+ printf("db: '%s' added to %s(%s).\n",
+ key, table->name, table->subname);
+ else
+ printf("db: '%s' added to %s.\n",
+ key, table->name);
+ }
+ return RET_OK;
+}
+retvalue table_adduniqrecord(struct table *table, const char *key, const char *data) {
+ if (verbose >= 15)
+ fprintf(stderr, "trace: table_adduniqrecord(table={name: %s, subname: %s}, key=%s) called.\n",
+ table->name, table->subname, key);
+ return table_adduniqsizedrecord(table, key, data, strlen(data)+1,
+ false, false);
+}
+
+retvalue table_deleterecord(struct table *table, const char *key, bool ignoremissing) {
+ int dbret;
+ DBT Key;
+
+ assert (table != NULL);
+ assert (!table->readonly && table->berkeleydb != NULL);
+
+ SETDBT(Key, key);
+ dbret = table->berkeleydb->del(table->berkeleydb, NULL, &Key, 0);
+ if (dbret != 0) {
+ if (dbret == DB_NOTFOUND && ignoremissing)
+ return RET_NOTHING;
+ table_printerror(table, dbret, "del");
+ if (dbret == DB_NOTFOUND)
+ return RET_ERROR_MISSING;
+ else
+ return RET_DBERR(dbret);
+ }
+ if (table->verbose) {
+ if (table->subname != NULL)
+ printf("db: '%s' removed from %s(%s).\n",
+ key, table->name, table->subname);
+ else
+ printf("db: '%s' removed from %s.\n",
+ key, table->name);
+ }
+ return RET_OK;
+}
+
+retvalue table_replacerecord(struct table *table, const char *key, const char *data) {
+ retvalue r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: table_replacerecord(table={name: %s, subname: %s}, key=%s) called.\n",
+ table->name, table->subname, key);
+ r = table_deleterecord(table, key, false);
+ if (r != RET_ERROR_MISSING && RET_WAS_ERROR(r))
+ return r;
+ return table_adduniqrecord(table, key, data);
+}
+
+static retvalue newcursor(struct table *table, uint32_t flags, struct cursor **cursor_p) {
+ DB *berkeleydb;
+ struct cursor *cursor;
+ int dbret;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: newcursor(table={name: %s, subname: %s}) called.\n",
+ table->name, table->subname);
+
+ if (table->sec_berkeleydb == NULL) {
+ berkeleydb = table->berkeleydb;
+ } else {
+ berkeleydb = table->sec_berkeleydb;
+ }
+
+ if (berkeleydb == NULL) {
+ assert (table->readonly);
+ *cursor_p = NULL;
+ return RET_NOTHING;
+ }
+
+ cursor = zNEW(struct cursor);
+ if (FAILEDTOALLOC(cursor))
+ return RET_ERROR_OOM;
+
+ cursor->cursor = NULL;
+ cursor->flags = flags;
+ cursor->r = RET_OK;
+ dbret = berkeleydb->cursor(berkeleydb, NULL,
+ &cursor->cursor, 0);
+ if (dbret != 0) {
+ table_printerror(table, dbret, "cursor");
+ free(cursor);
+ return RET_DBERR(dbret);
+ }
+ *cursor_p = cursor;
+ return RET_OK;
+}
+
+retvalue table_newglobalcursor(struct table *table, bool duplicate, struct cursor **cursor_p) {
+ retvalue r;
+
+ r = newcursor(table, duplicate ? DB_NEXT : DB_NEXT_NODUP, cursor_p);
+ if (r == RET_NOTHING) {
+ // table_newglobalcursor returned RET_OK when table->berkeleydb == NULL. Is that return value wanted?
+ r = RET_OK;
+ }
+ return r;
+}
+
+static inline retvalue parse_data(struct table *table, DBT Key, DBT Data, /*@null@*//*@out@*/const char **key_p, /*@out@*/const char **data_p, /*@out@*/size_t *datalen_p) {
+ if (Key.size <= 0 || Data.size <= 0 ||
+ ((const char*)Key.data)[Key.size-1] != '\0' ||
+ ((const char*)Data.data)[Data.size-1] != '\0') {
+ if (table->subname != NULL)
+ fprintf(stderr,
+"Database %s(%s) returned corrupted (not null-terminated) data!",
+ table->name, table->subname);
+ else
+ fprintf(stderr,
+"Database %s returned corrupted (not null-terminated) data!",
+ table->name);
+ return RET_ERROR;
+ }
+ if (key_p != NULL)
+ *key_p = Key.data;
+ *data_p = Data.data;
+ if (datalen_p != NULL)
+ *datalen_p = Data.size - 1;
+ return RET_OK;
+}
+
+static inline retvalue parse_pair(struct table *table, DBT Key, DBT Data, /*@null@*//*@out@*/const char **key_p, /*@out@*/const char **value_p, /*@out@*/const char **data_p, /*@out@*/size_t *datalen_p) {
+ /*@dependant@*/ const char *separator;
+
+ if (Key.size == 0 || Data.size == 0 ||
+ ((const char*)Key.data)[Key.size-1] != '\0' ||
+ ((const char*)Data.data)[Data.size-1] != '\0') {
+ if (table->subname != NULL)
+ fprintf(stderr,
+"Database %s(%s) returned corrupted (not null-terminated) data!",
+ table->name, table->subname);
+ else
+ fprintf(stderr,
+"Database %s returned corrupted (not null-terminated) data!",
+ table->name);
+ return RET_ERROR;
+ }
+ separator = memchr(Data.data, '\0', Data.size-1);
+ if (separator == NULL) {
+ if (table->subname != NULL)
+ fprintf(stderr,
+"Database %s(%s) returned corrupted data!\n",
+ table->name, table->subname);
+ else
+ fprintf(stderr,
+"Database %s returned corrupted data!\n",
+ table->name);
+ return RET_ERROR;
+ }
+ if (key_p != NULL)
+ *key_p = Key.data;
+ *value_p = Data.data;
+ *data_p = separator + 1;
+ *datalen_p = Data.size - (separator - (const char*)Data.data) - 2;
+ return RET_OK;
+}
+
+retvalue table_newduplicatecursor(struct table *table, const char *key, long long skip, struct cursor **cursor_p, const char **key_p, const char **data_p, size_t *datalen_p) {
+ struct cursor *cursor;
+ int dbret;
+ DBT Key, Data;
+ retvalue r;
+
+ r = newcursor(table, DB_NEXT_DUP, &cursor);
+ if(!RET_IS_OK(r)) {
+ return r;
+ }
+ SETDBT(Key, key);
+ CLEARDBT(Data);
+ dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, DB_SET);
+ if (dbret == DB_NOTFOUND || dbret == DB_KEYEMPTY) {
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return RET_NOTHING;
+ }
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_get(DB_SET)");
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return RET_DBERR(dbret);
+ }
+
+ while (skip > 0) {
+ CLEARDBT(Key);
+ CLEARDBT(Data);
+
+ dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, cursor->flags);
+ if (dbret == DB_NOTFOUND) {
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return RET_NOTHING;
+ }
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_get(DB_NEXT_DUP)");
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return RET_DBERR(dbret);
+ }
+
+ skip--;
+ }
+
+ r = parse_data(table, Key, Data, key_p, data_p, datalen_p);
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return r;
+ }
+ *cursor_p = cursor;
+ return RET_OK;
+}
+
+retvalue table_newduplicatepairedcursor(struct table *table, const char *key, struct cursor **cursor_p, const char **value_p, const char **data_p, size_t *datalen_p) {
+ struct cursor *cursor;
+ int dbret;
+ DBT Key, Data;
+ retvalue r;
+
+ r = newcursor(table, DB_NEXT_DUP, cursor_p);
+ if(!RET_IS_OK(r)) {
+ return r;
+ }
+ cursor = *cursor_p;
+ SETDBT(Key, key);
+ CLEARDBT(Data);
+ dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, DB_SET);
+ if (dbret == DB_NOTFOUND || dbret == DB_KEYEMPTY) {
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return RET_NOTHING;
+ }
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_get(DB_SET)");
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return RET_DBERR(dbret);
+ }
+ r = parse_pair(table, Key, Data, NULL, value_p, data_p, datalen_p);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return r;
+ }
+
+ *cursor_p = cursor;
+ return RET_OK;
+}
+
+retvalue table_newpairedcursor(struct table *table, const char *key, const char *value, struct cursor **cursor_p, const char **data_p, size_t *datalen_p) {
+ struct cursor *cursor;
+ int dbret;
+ DBT Key, Data;
+ retvalue r;
+ size_t valuelen = strlen(value);
+
+ /* cursor_next is not allowed with this type: */
+ r = newcursor(table, DB_GET_BOTH, cursor_p);
+ if(!RET_IS_OK(r)) {
+ return r;
+ }
+ cursor = *cursor_p;
+ SETDBT(Key, key);
+ SETDBTl(Data, value, valuelen + 1);
+ dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, DB_GET_BOTH);
+ if (dbret != 0) {
+ if (dbret == DB_NOTFOUND || dbret == DB_KEYEMPTY) {
+ table_printerror(table, dbret, "c_get(DB_GET_BOTH)");
+ r = RET_DBERR(dbret);
+ } else
+ r = RET_NOTHING;
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return r;
+ }
+ if (Data.size < valuelen + 2 ||
+ ((const char*)Data.data)[Data.size-1] != '\0') {
+ if (table->subname != NULL)
+ fprintf(stderr,
+"Database %s(%s) returned corrupted (not paired) data!",
+ table->name, table->subname);
+ else
+ fprintf(stderr,
+"Database %s returned corrupted (not paired) data!",
+ table->name);
+ (void)cursor->cursor->c_close(cursor->cursor);
+ free(cursor);
+ return RET_ERROR;
+ }
+ if (data_p != NULL)
+ *data_p = ((const char*)Data.data) + valuelen + 1;
+ if (datalen_p != NULL)
+ *datalen_p = Data.size - valuelen - 2;
+ *cursor_p = cursor;
+ return RET_OK;
+}
+
+retvalue cursor_close(struct table *table, struct cursor *cursor) {
+ int dbret;
+ retvalue r;
+
+ if (cursor == NULL)
+ return RET_OK;
+
+ r = cursor->r;
+ dbret = cursor->cursor->c_close(cursor->cursor);
+ cursor->cursor = NULL;
+ free(cursor);
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_close");
+ RET_UPDATE(r, RET_DBERR(dbret));
+ }
+ return r;
+}
+
+static bool cursor_next(struct table *table, struct cursor *cursor, DBT *Key, DBT *Data) {
+ int dbret;
+
+ if (cursor == NULL)
+ return false;
+
+ CLEARDBT(*Key);
+ CLEARDBT(*Data);
+
+ dbret = cursor->cursor->c_get(cursor->cursor, Key, Data,
+ cursor->flags);
+ if (dbret == DB_NOTFOUND)
+ return false;
+
+ if (dbret != 0) {
+ table_printerror(table, dbret,
+ (cursor->flags==DB_NEXT)
+ ? "c_get(DB_NEXT)"
+ : (cursor->flags==DB_NEXT_DUP)
+ ? "c_get(DB_NEXT_DUP)"
+ : "c_get(DB_???NEXT)");
+ cursor->r = RET_DBERR(dbret);
+ return false;
+ }
+ return true;
+}
+
+bool cursor_nexttempdata(struct table *table, struct cursor *cursor, const char **key, const char **data, size_t *len_p) {
+ DBT Key, Data;
+ bool success;
+ retvalue r;
+
+ success = cursor_next(table, cursor, &Key, &Data);
+ if (!success)
+ return false;
+ r = parse_data(table, Key, Data, key, data, len_p);
+ if (RET_WAS_ERROR(r)) {
+ cursor->r = r;
+ return false;
+ }
+ return true;
+}
+
+bool cursor_nextpair(struct table *table, struct cursor *cursor, /*@null@*/const char **key_p, const char **value_p, const char **data_p, size_t *datalen_p) {
+ DBT Key, Data;
+ bool success;
+ retvalue r;
+
+ success = cursor_next(table, cursor, &Key, &Data);
+ if (!success)
+ return false;
+ r = parse_pair(table, Key, Data, key_p, value_p, data_p, datalen_p);
+ if (RET_WAS_ERROR(r)) {
+ cursor->r = r;
+ return false;
+ }
+ return true;
+}
+
+retvalue cursor_replace(struct table *table, struct cursor *cursor, const char *data, size_t datalen) {
+ DBT Key, Data;
+ int dbret;
+
+ assert (cursor != NULL);
+ assert (!table->readonly);
+
+ CLEARDBT(Key);
+ SETDBTl(Data, data, datalen + 1);
+
+ dbret = cursor->cursor->c_put(cursor->cursor, &Key, &Data, DB_CURRENT);
+
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_put(DB_CURRENT)");
+ return RET_DBERR(dbret);
+ }
+ return RET_OK;
+}
+
+retvalue cursor_delete(struct table *table, struct cursor *cursor, const char *key, const char *value) {
+ int dbret;
+
+ assert (cursor != NULL);
+ assert (!table->readonly);
+
+ dbret = cursor->cursor->c_del(cursor->cursor, 0);
+
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_del");
+ return RET_DBERR(dbret);
+ }
+ if (table->verbose) {
+ if (value != NULL)
+ if (table->subname != NULL)
+ printf("db: '%s' '%s' removed from %s(%s).\n",
+ key, value,
+ table->name, table->subname);
+ else
+ printf("db: '%s' '%s' removed from %s.\n",
+ key, value, table->name);
+ else
+ if (table->subname != NULL)
+ printf("db: '%s' removed from %s(%s).\n",
+ key, table->name, table->subname);
+ else
+ printf("db: '%s' removed from %s.\n",
+ key, table->name);
+ }
+ return RET_OK;
+}
+
+static bool table_isempty(struct table *table) {
+ DBC *cursor;
+ DBT Key, Data;
+ int dbret;
+
+ dbret = table->berkeleydb->cursor(table->berkeleydb, NULL,
+ &cursor, 0);
+ if (dbret != 0) {
+ table_printerror(table, dbret, "cursor");
+ return true;
+ }
+ CLEARDBT(Key);
+ CLEARDBT(Data);
+
+ dbret = cursor->c_get(cursor, &Key, &Data, DB_NEXT);
+ if (dbret == DB_NOTFOUND) {
+ (void)cursor->c_close(cursor);
+ return true;
+ }
+ if (dbret != 0) {
+ table_printerror(table, dbret, "c_get(DB_NEXT)");
+ (void)cursor->c_close(cursor);
+ return true;
+ }
+ dbret = cursor->c_close(cursor);
+ if (dbret != 0)
+ table_printerror(table, dbret, "c_close");
+ return false;
+}
+
+retvalue database_haspackages(const char *identifier) {
+ struct table *packages;
+ retvalue r;
+ bool empty;
+
+ r = database_openpackages(identifier, true, &packages);
+ if (RET_WAS_ERROR(r))
+ return r;
+ empty = table_isempty(packages);
+ (void)table_close(packages);
+ return empty?RET_NOTHING:RET_OK;
+}
+
+/****************************************************************************
+ * Open the different types of tables with their needed flags: *
+ ****************************************************************************/
+static retvalue database_table_secondary(const char *filename, const char *subtable, enum database_type type, uint32_t flags,
+ const char *secondary_filename, enum database_type secondary_type, /*@out@*/struct table **table_p) {
+ struct table *table;
+ struct opened_tables *opened_table;
+ retvalue r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: database_table_secondary(filename=%s, subtable=%s, type=%i, flags=%u, secondary_filename=%s, secondary_type=%i) called.\n",
+ filename, subtable, type, flags, secondary_filename, secondary_type);
+
+ for (struct opened_tables *iter = opened_tables; iter != NULL; iter = iter->next) {
+ if(strcmp2(iter->name, filename) == 0 && strcmp2(iter->subname, subtable) == 0) {
+ fprintf(stderr,
+ "Internal Error: Trying to open table '%s' from file '%s' multiple times.\n"
+ "This should normally not happen (to avoid triggering bugs in the underlying BerkeleyDB)\n",
+ subtable, filename);
+ return RET_ERROR;
+ }
+ }
+
+ table = zNEW(struct table);
+ if (FAILEDTOALLOC(table))
+ return RET_ERROR_OOM;
+ /* TODO: is filename always an static constant? then we could drop the dup */
+ table->name = strdup(filename);
+ if (FAILEDTOALLOC(table->name)) {
+ free(table);
+ return RET_ERROR_OOM;
+ }
+ if (subtable != NULL) {
+ table->subname = strdup(subtable);
+ if (FAILEDTOALLOC(table->subname)) {
+ free(table->name);
+ free(table);
+ return RET_ERROR_OOM;
+ }
+ } else
+ table->subname = NULL;
+ table->readonly = ISSET(flags, DB_RDONLY);
+ table->verbose = rdb_verbose;
+ table->flags = flags;
+ r = database_opentable(filename, subtable, type, flags,
+ &table->berkeleydb);
+ if (RET_WAS_ERROR(r)) {
+ free(table->subname);
+ free(table->name);
+ free(table);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ if (ISSET(flags, DB_RDONLY)) {
+ /* sometimes we don't want a return here, when? */
+ table->berkeleydb = NULL;
+ r = RET_OK;
+ } else {
+ free(table->subname);
+ free(table->name);
+ free(table);
+ return r;
+ }
+
+ }
+
+ if (secondary_filename != NULL) {
+ r = database_opentable(secondary_filename, subtable, secondary_type, flags,
+ &table->sec_berkeleydb);
+ if (RET_WAS_ERROR(r)) {
+ table->berkeleydb->close(table->berkeleydb, 0);
+ free(table->subname);
+ free(table->name);
+ free(table);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ if (ISSET(flags, DB_RDONLY)) {
+ /* sometimes we don't want a return here, when? */
+ table->sec_berkeleydb = NULL;
+ r = RET_OK;
+ } else {
+ table->berkeleydb->close(table->berkeleydb, 0);
+ free(table->subname);
+ free(table->name);
+ free(table);
+ return r;
+ }
+
+ }
+ }
+
+ opened_table = zNEW(struct opened_tables);
+ if (FAILEDTOALLOC(opened_table)) {
+ free(table->subname);
+ free(table->name);
+ free(table);
+ return RET_ERROR_OOM;
+ }
+ opened_table->name = table->name;
+ opened_table->subname = table->subname;
+ opened_table->next = opened_tables;
+ opened_tables = opened_table;
+
+ if (verbose >= 25)
+ print_opened_tables(stderr);
+
+ *table_p = table;
+ return r;
+}
+
+static retvalue database_table(const char *filename, const char *subtable, enum database_type type, uint32_t flags, /*@out@*/struct table **table_p) {
+ return database_table_secondary(filename, subtable, type, flags, NULL, 0, table_p);
+}
+
+retvalue database_openreferences(void) {
+ retvalue r;
+
+ assert (rdb_references == NULL);
+ r = database_table("references.db", "references",
+ dbt_BTREEDUP, DB_CREATE, &rdb_references);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ rdb_references = NULL;
+ return r;
+ } else
+ rdb_references->verbose = false;
+ return RET_OK;
+}
+
+static int debianversioncompare(UNUSED(DB *db), const DBT *a, const DBT *b) {
+ const char *a_version;
+ const char *b_version;
+ int versioncmp;
+ // There is no way to indicate an error to the caller
+ // Thus return -1 in case of an error
+ retvalue r = -1;
+
+ if (a->size == 0 || ((char*)a->data)[a->size-1] != '\0') {
+ fprintf(stderr, "Database value '%.*s' empty or not NULL terminated.\n", a->size, (char*)a->data);
+ return r;
+ }
+ if (b->size == 0 || ((char*)b->data)[b->size-1] != '\0') {
+ fprintf(stderr, "Database value '%.*s' empty or not NULL terminated.\n", b->size, (char*)b->data);
+ return r;
+ }
+
+ a_version = strchr(a->data, '|');
+ if (a_version == NULL) {
+ fprintf(stderr, "Database value '%s' malformed. It should be 'package|version'.\n", (char*)a->data);
+ return r;
+ }
+ a_version++;
+ b_version = strchr(b->data, '|');
+ if (b_version == NULL) {
+ fprintf(stderr, "Database value '%s' malformed. It should be 'package|version'.\n", (char*)b->data);
+ return r;
+ }
+ b_version++;
+
+ r = dpkgversions_cmp(a_version, b_version, &versioncmp);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr, "Parse errors processing versions.\n");
+ return r;
+ }
+
+ return -versioncmp;
+}
+
+/* only compare the first 0-terminated part of the data */
+static int paireddatacompare(UNUSED(DB *db), const DBT *a, const DBT *b
+#if DB_VERSION_MAJOR >= 6
+#warning Berkeley DB >= 6.0 is not yet tested and highly experimental
+ , UNUSED(size_t *locp)
+ /* "The locp parameter is currently unused, and must be set to NULL or corruption can occur."
+ * What the ...! How am I supposed to handle that? */
+#endif
+) {
+ if (a->size < b->size)
+ return strncmp(a->data, b->data, a->size);
+ else
+ return strncmp(a->data, b->data, b->size);
+}
+
+retvalue database_opentracking(const char *codename, bool readonly, struct table **table_p) {
+ struct table *table;
+ retvalue r;
+
+ if (rdb_nopackages) {
+ (void)fputs(
+"Internal Error: Accessing packages database while that was not prepared!\n",
+ stderr);
+ return RET_ERROR;
+ }
+
+ r = database_table("tracking.db", codename,
+ dbt_BTREEPAIRS, readonly?DB_RDONLY:DB_CREATE, &table);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ *table_p = table;
+ return RET_OK;
+}
+
+static int get_package_name(DB *secondary, const DBT *pkey, const DBT *pdata, DBT *skey) {
+ const char *separator;
+ size_t length;
+
+ separator = memchr(pkey->data, '|', pkey->size);
+ if (unlikely(separator == NULL)) {
+ return DB_MALFORMED_KEY;
+ }
+
+ length = (size_t)separator - (size_t)pkey->data;
+ skey->flags = DB_DBT_APPMALLOC;
+ skey->data = strndup(pkey->data, length);
+ if (FAILEDTOALLOC(skey->data)) {
+ return RET_ERROR_OOM;
+ }
+ skey->size = length + 1;
+ return 0;
+}
+
+static retvalue database_translate_legacy_packages(void) {
+ struct cursor *databases_cursor, *cursor;
+ struct table *legacy_databases, *legacy_table, *packages;
+ const char *chunk, *packagename;
+ char *identifier, *key, *legacy_filename, *packages_filename, *packageversion;
+ retvalue r, result;
+ int ret, e;
+ size_t chunk_len;
+ DBT Key, Data;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: database_translate_legacy_packages() called.\n");
+
+ if (!isdir(global.dbdir)) {
+ fprintf(stderr, "Cannot find directory '%s'!\n", global.dbdir);
+ return RET_ERROR;
+ }
+
+ packages_filename = dbfilename("packages.db");
+ legacy_filename = dbfilename("packages.legacy.db");
+ ret = rename(packages_filename, legacy_filename);
+ if (ret != 0) {
+ e = errno;
+ fprintf(stderr, "error %d renaming %s to %s: %s\n",
+ e, packages_filename, legacy_filename, strerror(e));
+ return (e != 0)?e:EINVAL;
+ }
+ if (verbose >= 15)
+ fprintf(stderr, "trace: Moved '%s' to '%s'.\n", packages_filename, legacy_filename);
+
+ r = database_table("packages.legacy.db", NULL, dbt_BTREE, DB_RDONLY, &legacy_databases);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = table_newglobalcursor(legacy_databases, true, &databases_cursor);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)table_close(legacy_databases);
+ return r;
+ }
+ result = RET_NOTHING;
+ // Iterate over all databases inside the packages.db file.
+ while (cursor_next(legacy_databases, databases_cursor, &Key, &Data)) {
+ identifier = strndup(Key.data, Key.size);
+ if (FAILEDTOALLOC(identifier)) {
+ RET_UPDATE(result, RET_ERROR_OOM);
+ break;
+ }
+ if (verbose >= 15)
+ fprintf(stderr, "Converting table '%s' to new layout...\n", identifier);
+
+ r = database_table("packages.legacy.db", identifier, dbt_BTREE, DB_RDONLY, &legacy_table);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ free(identifier);
+ RET_UPDATE(result, r);
+ break;
+ }
+
+ r = table_newglobalcursor(legacy_table, true, &cursor);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)table_close(legacy_table);
+ free(identifier);
+ RET_UPDATE(result, r);
+ break;
+ }
+
+ r = database_openpackages(identifier, false, &packages);
+ free(identifier);
+ identifier = NULL;
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor_close(legacy_databases, databases_cursor);
+ (void)table_close(legacy_table);
+ RET_UPDATE(result, r);
+ break;
+ }
+
+ while (cursor_nexttempdata(legacy_table, cursor, &packagename, &chunk, &chunk_len)) {
+ r = chunk_getvalue(chunk, "Version", &packageversion);
+ if (!RET_IS_OK(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ key = package_primarykey(packagename, packageversion);
+ r = table_addrecord(packages, key, chunk, chunk_len, false);
+ free(key);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ }
+
+ r = table_close(packages);
+ RET_UPDATE(result, r);
+ r = cursor_close(legacy_table, cursor);
+ RET_UPDATE(result, r);
+ r = table_close(legacy_table);
+ RET_UPDATE(result, r);
+
+ if (RET_WAS_ERROR(result)) {
+ break;
+ }
+ result = RET_OK;
+ }
+ r = cursor_close(legacy_databases, databases_cursor);
+ RET_ENDUPDATE(result, r);
+ r = table_close(legacy_databases);
+ RET_ENDUPDATE(result, r);
+
+ if (RET_IS_OK(result)) {
+ e = deletefile(legacy_filename);
+ if (e != 0) {
+ fprintf(stderr, "Could not delete '%s'!\n"
+"It can now safely be deleted and it all that is left to be done!\n",
+ legacy_filename);
+ return RET_ERRNO(e);
+ }
+ }
+
+ return result;
+}
+
+retvalue database_openpackages(const char *identifier, bool readonly, struct table **table_p) {
+ struct table *table;
+ retvalue r;
+
+ if (rdb_nopackages) {
+ (void)fputs(
+"Internal Error: Accessing packages database while that was not prepared!\n",
+ stderr);
+ return RET_ERROR;
+ }
+
+ r = database_table_secondary("packages.db", identifier,
+ dbt_BTREE, readonly?DB_RDONLY:DB_CREATE,
+ "packagenames.db", dbt_BTREEVERSIONS, &table);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (table->berkeleydb != NULL && table->sec_berkeleydb == NULL) {
+ r = table_close(table);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ r = database_translate_legacy_packages();
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ return database_openpackages(identifier, readonly, table_p);
+ }
+
+ if (table->berkeleydb != NULL && table->sec_berkeleydb != NULL) {
+ r = table->berkeleydb->associate(table->berkeleydb, NULL,
+ table->sec_berkeleydb, get_package_name, 0);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ }
+
+ *table_p = table;
+ return RET_OK;
+}
+
+/* Get a list of all identifiers having a package list */
+retvalue database_listpackages(struct strlist *identifiers) {
+ return database_listsubtables("packages.db", identifiers);
+}
+
+/* drop a database */
+retvalue database_droppackages(const char *identifier) {
+ retvalue r;
+
+ r = database_dropsubtable("packages.db", identifier);
+ if (RET_IS_OK(r))
+ r = database_dropsubtable("packagenames.db", identifier);
+ return r;
+}
+
+retvalue database_openfiles(void) {
+ retvalue r;
+ struct strlist identifiers;
+ bool oldfiles;
+
+ assert (rdb_checksums == NULL);
+ assert (rdb_contents == NULL);
+
+ r = database_listsubtables("contents.cache.db", &identifiers);
+ if (RET_IS_OK(r)) {
+ if (strlist_in(&identifiers, "filelists")) {
+ fprintf(stderr,
+"Your %s/contents.cache.db file still contains a table of cached file lists\n"
+"in the old (pre 3.0.0) format. You have to either delete that file (and lose\n"
+"all caches of file lists) or run reprepro with argument translatefilelists\n"
+"to translate the old caches into the new format.\n",
+ global.dbdir);
+ strlist_done(&identifiers);
+ return RET_ERROR;
+ }
+ strlist_done(&identifiers);
+ }
+
+ r = database_table("checksums.db", "pool",
+ dbt_BTREE, DB_CREATE,
+ &rdb_checksums);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ rdb_checksums = NULL;
+ return r;
+ }
+ r = database_hasdatabasefile("files.db", &oldfiles);
+ if (RET_WAS_ERROR(r)) {
+ (void)table_close(rdb_checksums);
+ rdb_checksums = NULL;
+ return r;
+ }
+ if (oldfiles) {
+ fprintf(stderr,
+"Error: database uses deprecated format.\n"
+"Please run translatelegacychecksums to update to the new format first.\n");
+ return RET_ERROR;
+ }
+
+ // TODO: only create this file once it is actually needed...
+ r = database_table("contents.cache.db", "compressedfilelists",
+ dbt_BTREE, DB_CREATE, &rdb_contents);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)table_close(rdb_checksums);
+ rdb_checksums = NULL;
+ rdb_contents = NULL;
+ }
+ return r;
+}
+
+retvalue database_openreleasecache(const char *codename, struct table **cachedb_p) {
+ retvalue r;
+ char *oldcachefilename;
+
+ /* Since 3.1.0 it's release.caches.db, before release.cache.db.
+ * The new file also contains the sha1 checksums and is extensible
+ * for more in the future. Thus if there is only the old variant,
+ * rename to the new. (So no old version by accident uses it and
+ * puts the additional sha1 data into the md5sum fields.)
+ * If both files are there, just delete both, as neither will
+ * be very current then.
+ * */
+
+ oldcachefilename = dbfilename("release.cache.db");
+ if (FAILEDTOALLOC(oldcachefilename))
+ return RET_ERROR_OOM;
+ if (isregularfile(oldcachefilename)) {
+ char *newcachefilename;
+
+ newcachefilename = dbfilename("release.caches.db");
+ if (FAILEDTOALLOC(newcachefilename)) {
+ free(oldcachefilename);
+ return RET_ERROR_OOM;
+ }
+ if (isregularfile(newcachefilename)
+ || rename(oldcachefilename, newcachefilename) != 0) {
+ fprintf(stderr,
+"Deleting old-style export cache file %s!\n"
+"This means that all index files (even unchanged) will be rewritten the\n"
+"next time parts of their distribution are changed. This should only\n"
+"happen once while migration from pre-3.1.0 to later versions.\n",
+ oldcachefilename);
+
+ if (unlink(oldcachefilename) != 0) {
+ int e = errno;
+ fprintf(stderr, "Cannot delete '%s': %s!",
+ oldcachefilename,
+ strerror(e));
+ free(oldcachefilename);
+ free(newcachefilename);
+ return RET_ERRNO(e);
+ }
+ (void)unlink(oldcachefilename);
+ }
+ free(newcachefilename);
+ }
+ free(oldcachefilename);
+
+ r = database_table("release.caches.db", codename,
+ dbt_HASH, DB_CREATE, cachedb_p);
+ if (RET_IS_OK(r))
+ (*cachedb_p)->verbose = false;
+ return r;
+}
+
+static retvalue table_copy(struct table *oldtable, struct table *newtable) {
+ retvalue r;
+ struct cursor *cursor;
+ const char *filekey, *data;
+ size_t data_len;
+
+ r = table_newglobalcursor(oldtable, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+ while (cursor_nexttempdata(oldtable, cursor, &filekey,
+ &data, &data_len)) {
+ r = table_adduniqsizedrecord(newtable, filekey,
+ data, data_len+1, false, true);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+retvalue database_translate_filelists(void) {
+ char *dbname, *tmpdbname;
+ struct table *oldtable, *newtable;
+ struct strlist identifiers;
+ int ret;
+ retvalue r, r2;
+
+ r = database_listsubtables("contents.cache.db", &identifiers);
+ if (RET_IS_OK(r)) {
+ if (!strlist_in(&identifiers, "filelists")) {
+ fprintf(stderr,
+"Your %s/contents.cache.db file does not contain an old style database!\n",
+ global.dbdir);
+ strlist_done(&identifiers);
+ return RET_NOTHING;
+ }
+ strlist_done(&identifiers);
+ }
+
+ dbname = dbfilename("contents.cache.db");
+ if (FAILEDTOALLOC(dbname))
+ return RET_ERROR_OOM;
+ tmpdbname = dbfilename("old.contents.cache.db");
+ if (FAILEDTOALLOC(tmpdbname)) {
+ free(dbname);
+ return RET_ERROR_OOM;
+ }
+ ret = rename(dbname, tmpdbname);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr, "Could not rename '%s' into '%s': %s(%d)\n",
+ dbname, tmpdbname, strerror(e), e);
+ free(dbname);
+ free(tmpdbname);
+ return RET_ERRNO(e);
+ }
+ newtable = NULL;
+ r = database_table("contents.cache.db", "compressedfilelists",
+ dbt_BTREE, DB_CREATE, &newtable);
+ assert (r != RET_NOTHING);
+ oldtable = NULL;
+ if (RET_IS_OK(r)) {
+ r = database_table("old.contents.cache.db", "filelists",
+ dbt_BTREE, DB_RDONLY, &oldtable);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Could not find old-style database!\n");
+ r = RET_ERROR;
+ }
+ }
+ if (RET_IS_OK(r)) {
+ r = filelists_translate(oldtable, newtable);
+ if (r == RET_NOTHING)
+ r = RET_OK;
+ }
+ r2 = table_close(oldtable);
+ RET_ENDUPDATE(r, r2);
+ oldtable = NULL;
+ if (RET_IS_OK(r)) {
+ /* copy the new-style database, */
+ r = database_table("old.contents.cache.db", "compressedfilelists",
+ dbt_BTREE, DB_RDONLY, &oldtable);
+ if (RET_IS_OK(r)) {
+ /* if there is one... */
+ r = table_copy(oldtable, newtable);
+ r2 = table_close(oldtable);
+ RET_ENDUPDATE(r, r2);
+ }
+ if (r == RET_NOTHING) {
+ r = RET_OK;
+ }
+ }
+ r2 = table_close(newtable);
+ RET_ENDUPDATE(r, r2);
+ if (RET_IS_OK(r))
+ (void)unlink(tmpdbname);
+
+ if (RET_WAS_ERROR(r)) {
+ ret = rename(tmpdbname, dbname);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr,
+"Could not rename '%s' back into '%s': %s(%d)\n",
+ dbname, tmpdbname, strerror(e), e);
+ free(tmpdbname);
+ free(dbname);
+ return RET_ERRNO(e);
+ }
+ free(tmpdbname);
+ free(dbname);
+ return r;
+ }
+ free(tmpdbname);
+ free(dbname);
+ return RET_OK;
+}
+
+/* This is already implemented as standalone functions duplicating a bit
+ * of database_create and from files.c,
+ * because database_create is planed to error out if * there is still an old
+ * files.db and files.c is supposed to lose all support for it in the next
+ * major version */
+
+static inline retvalue translate(struct table *oldmd5sums, struct table *newchecksums) {
+ long numold = 0, numnew = 0, numreplace = 0, numretro = 0;
+ struct cursor *cursor, *newcursor;
+ const char *filekey, *md5sum, *all;
+ size_t alllen;
+ retvalue r;
+
+ /* first add all md5sums to checksums if not there yet */
+
+ r = table_newglobalcursor(oldmd5sums, true, &cursor);
+ if (RET_WAS_ERROR(r))
+ return r;
+ while (cursor_nexttempdata(oldmd5sums, cursor,
+ &filekey, &md5sum, NULL)) {
+ struct checksums *n = NULL;
+ const char *combined;
+ size_t combinedlen;
+
+ r = table_gettemprecord(newchecksums, filekey,
+ &all, &alllen);
+ if (RET_IS_OK(r))
+ r = checksums_setall(&n, all, alllen);
+ if (RET_IS_OK(r)) {
+ if (checksums_matches(n, cs_md5sum, md5sum)) {
+ /* already there, nothing to do */
+ checksums_free(n);
+ numnew++;
+ continue;
+ }
+ /* new item does not match */
+ if (verbose > 0)
+ printf(
+"Overwriting stale new-checksums entry '%s'!\n",
+ filekey);
+ numreplace++;
+ checksums_free(n);
+ n = NULL;
+ }
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor_close(oldmd5sums, cursor);
+ return r;
+ }
+ /* parse and recreate, to only have sanitized strings
+ * in the database */
+ r = checksums_parse(&n, md5sum);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor_close(oldmd5sums, cursor);
+ return r;
+ }
+
+ r = checksums_getcombined(n, &combined, &combinedlen);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r)) {
+ (void)cursor_close(oldmd5sums, cursor);
+ return r;
+ }
+ numold++;
+ r = table_adduniqsizedrecord(newchecksums, filekey,
+ combined, combinedlen + 1, true, false);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r)) {
+ (void)cursor_close(oldmd5sums, cursor);
+ return r;
+ }
+ }
+ r = cursor_close(oldmd5sums, cursor);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* then delete everything from checksums that is not in md5sums */
+
+ r = table_newglobalcursor(oldmd5sums, true, &cursor);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = table_newglobalcursor(newchecksums, true, &newcursor);
+ if (RET_WAS_ERROR(r)) {
+ cursor_close(oldmd5sums, cursor);
+ return r;
+ }
+ while (cursor_nexttempdata(oldmd5sums, cursor,
+ &filekey, &md5sum, NULL)) {
+ bool more;
+ int cmp;
+ const char *newfilekey, *dummy;
+
+ do {
+ more = cursor_nexttempdata(newchecksums, newcursor,
+ &newfilekey, &dummy, NULL);
+ /* should have been added in the last step */
+ assert (more);
+ cmp = strcmp(filekey, newfilekey);
+ /* should have been added in the last step */
+ assert (cmp >= 0);
+ more = cmp > 0;
+ if (more) {
+ numretro++;
+ if (verbose > 0)
+ printf(
+"Deleting stale new-checksums entry '%s'!\n",
+ newfilekey);
+ r = cursor_delete(newchecksums, newcursor,
+ newfilekey, dummy);
+ if (RET_WAS_ERROR(r)) {
+ cursor_close(oldmd5sums, cursor);
+ cursor_close(newchecksums, newcursor);
+ return r;
+ }
+ }
+ } while (more);
+ }
+ r = cursor_close(oldmd5sums, cursor);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = cursor_close(newchecksums, newcursor);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (verbose >= 0) {
+ printf("%ld packages were already in the new checksums.db\n",
+ numnew);
+ printf("%ld packages were added to the new checksums.db\n",
+ numold - numreplace);
+ if (numretro != 0)
+ printf(
+"%ld were only in checksums.db and not in files.db\n"
+"This should only have happened if you added them with a newer version\n"
+"and then deleted them with an older version of reprepro.\n",
+ numretro);
+ if (numreplace != 0)
+ printf(
+"%ld were different checksums.db and not in files.db\n"
+"This should only have happened if you added them with a newer version\n"
+"and then deleted them with an older version of reprepro and\n"
+"then readded them with a old version.\n",
+ numreplace);
+ if (numretro != 0 || numreplace != 0)
+ printf(
+"If you never run a old version after a new version,\n"
+"you might want to check with check and checkpool if something went wrong.\n");
+ }
+ return RET_OK;
+}
+
+retvalue database_translate_legacy_checksums(bool verbosedb) {
+ struct table *newchecksums, *oldmd5sums;
+ char *fullfilename;
+ retvalue r;
+ int e;
+
+ if (rdb_initialized || rdb_used) {
+ fputs("Internal Error: database initialized a 2nd time!\n",
+ stderr);
+ return RET_ERROR_INTERNAL;
+ }
+
+ if (!isdir(global.dbdir)) {
+ fprintf(stderr, "Cannot find directory '%s'!\n",
+ global.dbdir);
+ return RET_ERROR;
+ }
+
+ rdb_initialized = true;
+ rdb_used = true;
+
+ r = database_lock(0);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r)) {
+ database_free();
+ return r;
+ }
+ rdb_readonly = READWRITE;
+ rdb_verbose = verbosedb;
+
+ r = readversionfile(false);
+ if (RET_WAS_ERROR(r)) {
+ releaselock();
+ database_free();
+ return r;
+ }
+
+ r = database_table("files.db", "md5sums", dbt_BTREE, 0, &oldmd5sums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"There is no old files.db in %s. Nothing to translate!\n",
+ global.dbdir);
+ releaselock();
+ database_free();
+ return RET_NOTHING;
+ } else if (RET_WAS_ERROR(r)) {
+ releaselock();
+ database_free();
+ return r;
+ }
+
+ r = database_table("checksums.db", "pool", dbt_BTREE, DB_CREATE,
+ &newchecksums);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)table_close(oldmd5sums);
+ releaselock();
+ database_free();
+ return r;
+ }
+
+ r = translate(oldmd5sums, newchecksums);
+ if (RET_WAS_ERROR(r)) {
+ (void)table_close(oldmd5sums);
+ (void)table_close(newchecksums);
+ releaselock();
+ database_free();
+ return r;
+ }
+
+ (void)table_close(oldmd5sums);
+ r = table_close(newchecksums);
+ if (RET_WAS_ERROR(r)) {
+ releaselock();
+ database_free();
+ return r;
+ }
+ fullfilename = dbfilename("files.db");
+ if (FAILEDTOALLOC(fullfilename)) {
+ releaselock();
+ database_free();
+ return RET_ERROR_OOM;
+ }
+ e = deletefile(fullfilename);
+ if (e != 0) {
+ fprintf(stderr, "Could not delete '%s'!\n"
+"It can now safely be deleted and it all that is left to be done!\n",
+ fullfilename);
+ database_free();
+ return RET_ERRNO(e);
+ }
+ r = writeversionfile();
+ releaselock();
+ database_free();
+ return r;
+}
diff --git a/database.h b/database.h
new file mode 100644
index 0000000..1fae614
--- /dev/null
+++ b/database.h
@@ -0,0 +1,59 @@
+#ifndef REPREPRO_DATABASE_H
+#define REPREPRO_DATABASE_H
+
+#ifndef REPREPRO_GLOBALS_H
+#include "globals.h"
+#endif
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+
+struct distribution;
+struct table;
+struct cursor;
+
+retvalue database_create(struct distribution *, bool fast, bool /*nopackages*/, bool /*allowunused*/, bool /*readonly*/, size_t /*waitforlock*/, bool /*verbosedb*/);
+retvalue database_close(void);
+
+retvalue database_openfiles(void);
+retvalue database_openreferences(void);
+retvalue database_listpackages(/*@out@*/struct strlist *);
+retvalue database_droppackages(const char *);
+retvalue database_openpackages(const char *, bool /*readonly*/, /*@out@*/struct table **);
+retvalue database_openreleasecache(const char *, /*@out@*/struct table **);
+retvalue database_opentracking(const char *, bool /*readonly*/, /*@out@*/struct table **);
+retvalue database_translate_filelists(void);
+retvalue database_translate_legacy_checksums(bool /*verbosedb*/);
+
+retvalue table_close(/*@only@*/struct table *);
+
+retvalue database_haspackages(const char *);
+
+bool table_recordexists(struct table *, const char *);
+/* retrieve a record from the database, return RET_NOTHING if there is none: */
+retvalue table_getrecord(struct table *, bool, const char *, /*@out@*/char **, /*@out@*/ /*@null@*/ size_t *);
+retvalue table_gettemprecord(struct table *, const char *, /*@out@*//*@null@*/const char **, /*@out@*//*@null@*/size_t *);
+retvalue table_getpair(struct table *, const char *, const char *, /*@out@*/const char **, /*@out@*/size_t *);
+
+retvalue table_adduniqsizedrecord(struct table *, const char * /*key*/, const char * /*data*/, size_t /*data_size*/, bool /*allowoverwrote*/, bool /*nooverwrite*/);
+retvalue table_adduniqrecord(struct table *, const char * /*key*/, const char * /*data*/);
+retvalue table_addrecord(struct table *, const char * /*key*/, const char * /*data*/, size_t /*len*/, bool /*ignoredups*/);
+retvalue table_replacerecord(struct table *, const char *key, const char *data);
+retvalue table_deleterecord(struct table *, const char *key, bool ignoremissing);
+retvalue table_checkrecord(struct table *, const char *key, const char *data);
+retvalue table_removerecord(struct table *, const char *key, const char *data);
+
+retvalue table_newglobalcursor(struct table *, bool /*duplicate*/, /*@out@*/struct cursor **);
+retvalue table_newduplicatecursor(struct table *, const char *, long long, /*@out@*/struct cursor **, /*@out@*/const char **, /*@out@*/const char **, /*@out@*/size_t *);
+retvalue table_newduplicatepairedcursor(struct table *, const char *, /*@out@*/struct cursor **, /*@out@*/const char **, /*@out@*/const char **, /*@out@*/size_t *);
+retvalue table_newpairedcursor(struct table *, const char *, const char *, /*@out@*/struct cursor **, /*@out@*//*@null@*/const char **, /*@out@*//*@null@*/size_t *);
+bool cursor_nexttempdata(struct table *, struct cursor *, /*@out@*/const char **, /*@out@*/const char **, /*@out@*/size_t *);
+bool cursor_nextpair(struct table *, struct cursor *, /*@null@*//*@out@*/const char **, /*@out@*/const char **, /*@out@*/const char **, /*@out@*/size_t *);
+retvalue cursor_replace(struct table *, struct cursor *, const char *, size_t);
+retvalue cursor_delete(struct table *, struct cursor *, const char *, /*@null@*/const char *);
+retvalue cursor_close(struct table *, /*@only@*/struct cursor *);
+
+#endif
diff --git a/database_p.h b/database_p.h
new file mode 100644
index 0000000..033f0e6
--- /dev/null
+++ b/database_p.h
@@ -0,0 +1,14 @@
+#ifndef REPREPRO_DATABASE_P_H
+#define REPREPRO_DATABASE_P_H
+
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+
+extern /*@null@*/ struct table *rdb_checksums, *rdb_contents;
+extern /*@null@*/ struct table *rdb_references;
+
+retvalue database_listsubtables(const char *, /*@out@*/struct strlist *);
+retvalue database_dropsubtable(const char *, const char *);
+
+#endif
diff --git a/debfile.c b/debfile.c
new file mode 100644
index 0000000..61d73cd
--- /dev/null
+++ b/debfile.c
@@ -0,0 +1,229 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <archive.h>
+#include <archive_entry.h>
+#include "error.h"
+#include "uncompression.h"
+#include "ar.h"
+#include "chunks.h"
+#include "debfile.h"
+
+#ifndef HAVE_LIBARCHIVE
+#error Why did this file got compiled instead of extractcontrol.c?
+#endif
+#if ARCHIVE_VERSION_NUMBER < 3000000
+#define archive_read_free archive_read_finish
+#endif
+
+static retvalue read_control_file(char **control, const char *debfile, struct archive *tar, struct archive_entry *entry) {
+ int64_t size;
+ char *buffer, *n;
+ const char *afterchanges;
+ size_t len, controllen;
+ ssize_t got;
+
+ size = archive_entry_size(entry);
+ if (size <= 0) {
+ fprintf(stderr, "Error: Empty control file within %s!\n",
+ debfile);
+ return RET_ERROR;
+ }
+ if (size > 10*1024*1024) {
+ fprintf(stderr,
+"Error: Ridiculously long control file within %s!\n",
+ debfile);
+ return RET_ERROR;
+ }
+ buffer = malloc(size + 2);
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+ len = 0;
+ while ((got = archive_read_data(tar, buffer+len, ((size_t)size+1)-len)) > 0
+ && !interrupted()) {
+ len += got;
+ if (len > (size_t)size) {
+ fprintf(stderr,
+"Internal Error: libarchive miscalculated length of the control file inside '%s',\n"
+" perhaps the file is corrupt, perhaps libarchive!\n", debfile);
+ free(buffer);
+ return RET_ERROR;
+ }
+ }
+ if (interrupted()) {
+ free(buffer);
+ return RET_ERROR_INTERRUPTED;
+ }
+ if (got < 0) {
+ free(buffer);
+ fprintf(stderr, "Error reading control file from %s\n",
+ debfile);
+ return RET_ERROR;
+ }
+ if (len < (size_t)size)
+ fprintf(stderr,
+"Warning: libarchive miscalculated length of the control file inside '%s'.\n"
+"Maybe the file is corrupt, perhaps libarchive!\n", debfile);
+ buffer[len] = '\0';
+
+ controllen = chunk_extract(buffer, buffer, len, true, &afterchanges);
+
+ if (controllen == 0) {
+ fprintf(stderr,
+"Could only find spaces within control file of '%s'!\n",
+ debfile);
+ free(buffer);
+ return RET_ERROR;
+ }
+ if ((size_t)(afterchanges - buffer) < len) {
+ if (*afterchanges == '\0')
+ fprintf(stderr,
+"Unexpected \\0 character within control file of '%s'!\n", debfile);
+ else
+ fprintf(stderr,
+"Unexpected data after ending empty line in control file of '%s'!\n", debfile);
+ free(buffer);
+ return RET_ERROR;
+ }
+ assert (buffer[controllen] == '\0');
+ n = realloc(buffer, controllen+1);
+ if (FAILEDTOALLOC(n)) {
+ free(buffer);
+ return RET_ERROR_OOM;
+ }
+ *control = n;
+ return RET_OK;
+}
+
+static retvalue read_control_tar(char **control, const char *debfile, struct ar_archive *ar, struct archive *tar) {
+ struct archive_entry *entry;
+ int a;
+ retvalue r;
+
+ archive_read_support_format_tar(tar);
+ archive_read_support_format_gnutar(tar);
+ a = archive_read_open(tar, ar,
+ ar_archivemember_open,
+ ar_archivemember_read,
+ ar_archivemember_close);
+ if (a != ARCHIVE_OK) {
+ fprintf(stderr,
+"open control.tar.gz within '%s' failed: %d:%d:%s\n",
+ debfile,
+ a, archive_errno(tar),
+ archive_error_string(tar));
+ return RET_ERROR;
+ }
+ while ((a=archive_read_next_header(tar, &entry)) == ARCHIVE_OK) {
+ if (strcmp(archive_entry_pathname(entry), "./control") != 0 &&
+ strcmp(archive_entry_pathname(entry), "control") != 0) {
+ a = archive_read_data_skip(tar);
+ if (a != ARCHIVE_OK) {
+ int e = archive_errno(tar);
+ printf(
+"Error skipping %s within data.tar.gz from %s: %d=%s\n",
+ archive_entry_pathname(entry),
+ debfile,
+ e, archive_error_string(tar));
+ return (e!=0)?(RET_ERRNO(e)):RET_ERROR;
+ }
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ } else {
+ r = read_control_file(control, debfile, tar, entry);
+ if (r != RET_NOTHING)
+ return r;
+ }
+ }
+ if (a != ARCHIVE_EOF) {
+ int e = archive_errno(tar);
+ printf("Error reading control.tar.gz from %s: %d=%s\n",
+ debfile,
+ e, archive_error_string(tar));
+ return (e!=0)?(RET_ERRNO(e)):RET_ERROR;
+ }
+ fprintf(stderr,
+"Could not find a control file within control.tar.gz within '%s'!\n",
+ debfile);
+ return RET_ERROR_MISSING;
+}
+
+retvalue extractcontrol(char **control, const char *debfile) {
+ struct ar_archive *ar;
+ retvalue r;
+ bool hadcandidate = false;
+
+ r = ar_open(&ar, debfile);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (r != RET_NOTHING);
+ do {
+ char *filename;
+ enum compression c;
+
+ r = ar_nextmember(ar, &filename);
+ if (RET_IS_OK(r)) {
+ if (strncmp(filename, "control.tar", 11) != 0) {
+ free(filename);
+ continue;
+ }
+ hadcandidate = true;
+ for (c = 0 ; c < c_COUNT ; c++) {
+ if (strcmp(filename + 11,
+ uncompression_suffix[c]) == 0)
+ break;
+ }
+ if (c >= c_COUNT) {
+ free(filename);
+ continue;
+ }
+ ar_archivemember_setcompression(ar, c);
+ if (uncompression_supported(c)) {
+ struct archive *tar;
+
+ tar = archive_read_new();
+ r = read_control_tar(control, debfile, ar, tar);
+ // TODO run archive_read_close to get error messages?
+ archive_read_free(tar);
+ if (r != RET_NOTHING) {
+ ar_close(ar);
+ free(filename);
+ return r;
+ }
+
+ }
+ free(filename);
+ }
+ } while (RET_IS_OK(r));
+ ar_close(ar);
+ if (hadcandidate)
+ fprintf(stderr,
+"Could not find a suitable control.tar file within '%s'!\n", debfile);
+ else
+ fprintf(stderr,
+"Could not find a control.tar file within '%s'!\n", debfile);
+ return RET_ERROR_MISSING;
+}
diff --git a/debfile.h b/debfile.h
new file mode 100644
index 0000000..cf47f61
--- /dev/null
+++ b/debfile.h
@@ -0,0 +1,15 @@
+#ifndef REPREPRO_DEBFILE_H
+#define REPREPRO_DEBFILE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+
+/* Read the control information of a .deb file */
+retvalue extractcontrol(/*@out@*/char **, const char *);
+
+/* Read a list of files from a .deb file */
+retvalue getfilelist(/*@out@*/char **, /*@out@*/ size_t *, const char *);
+
+#endif
diff --git a/debfilecontents.c b/debfilecontents.c
new file mode 100644
index 0000000..611a93f
--- /dev/null
+++ b/debfilecontents.c
@@ -0,0 +1,221 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006,2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <archive.h>
+#include <archive_entry.h>
+#include "error.h"
+#include "uncompression.h"
+#include "ar.h"
+#include "filelist.h"
+#include "debfile.h"
+
+#ifndef HAVE_LIBARCHIVE
+#error Why did this file got compiled?
+#endif
+#if ARCHIVE_VERSION_NUMBER < 3000000
+#define archive_read_free archive_read_finish
+#endif
+
+static retvalue read_data_tar(/*@out@*/char **list, /*@out@*/size_t *size, const char *debfile, struct ar_archive *ar, struct archive *tar) {
+ struct archive_entry *entry;
+ struct filelistcompressor c;
+ retvalue r;
+ int a, e;
+
+ r = filelistcompressor_setup(&c);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ archive_read_support_format_tar(tar);
+ archive_read_support_format_gnutar(tar);
+ a = archive_read_open(tar, ar,
+ ar_archivemember_open,
+ ar_archivemember_read,
+ ar_archivemember_close);
+ if (a != ARCHIVE_OK) {
+ filelistcompressor_cancel(&c);
+ e = archive_errno(tar);
+ if (e == -EINVAL) /* special code to say there is none */
+ fprintf(stderr,
+"open data.tar within '%s' failed: %s\n",
+ debfile, archive_error_string(tar));
+ else
+ fprintf(stderr,
+"open data.tar within '%s' failed: %d:%d:%s\n", debfile, a, e,
+ archive_error_string(tar));
+ return RET_ERROR;
+ }
+ while ((a=archive_read_next_header(tar, &entry)) == ARCHIVE_OK) {
+ const char *name = archive_entry_pathname(entry);
+ mode_t mode;
+
+ if (name[0] == '.')
+ name++;
+ if (name[0] == '/')
+ name++;
+ if (name[0] == '\0')
+ continue;
+ mode = archive_entry_mode(entry);
+ if (!S_ISDIR(mode)) {
+ r = filelistcompressor_add(&c, name, strlen(name));
+ if (RET_WAS_ERROR(r)) {
+ filelistcompressor_cancel(&c);
+ return r;
+ }
+ }
+ if (interrupted()) {
+ filelistcompressor_cancel(&c);
+ return RET_ERROR_INTERRUPTED;
+ }
+ a = archive_read_data_skip(tar);
+ if (a != ARCHIVE_OK) {
+ e = archive_errno(tar);
+ if (e == -EINVAL) {
+ r = RET_ERROR;
+ fprintf(stderr,
+"Error skipping %s within data.tar from %s: %s\n",
+ archive_entry_pathname(entry),
+ debfile, archive_error_string(tar));
+ } else {
+ fprintf(stderr,
+"Error %d skipping %s within data.tar from %s: %s\n",
+ e, archive_entry_pathname(entry),
+ debfile, archive_error_string(tar));
+ if (e != 0)
+ r = RET_ERRNO(e);
+ else
+ r = RET_ERROR;
+ }
+ filelistcompressor_cancel(&c);
+ return r;
+ }
+ }
+ if (a != ARCHIVE_EOF) {
+ e = archive_errno(tar);
+ if (e == -EINVAL) {
+ r = RET_ERROR;
+ fprintf(stderr,
+"Error reading data.tar from %s: %s\n", debfile, archive_error_string(tar));
+ } else {
+ fprintf(stderr,
+"Error %d reading data.tar from %s: %s\n",
+ e, debfile, archive_error_string(tar));
+ if (e != 0)
+ r = RET_ERRNO(e);
+ else
+ r = RET_ERROR;
+ }
+ filelistcompressor_cancel(&c);
+ return r;
+ }
+ return filelistcompressor_finish(&c, list, size);
+}
+
+
+retvalue getfilelist(/*@out@*/char **filelist, size_t *size, const char *debfile) {
+ struct ar_archive *ar;
+ retvalue r;
+ bool hadcandidate;
+ bool uncompressedretry = false;
+ retrylabel:
+ hadcandidate = false;
+ r = ar_open(&ar, debfile);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (r != RET_NOTHING);
+ do {
+ char *filename;
+ enum compression c;
+
+ r = ar_nextmember(ar, &filename);
+ if (RET_IS_OK(r)) {
+ if (strncmp(filename, "data.tar", 8) != 0) {
+ free(filename);
+ continue;
+ }
+ hadcandidate = true;
+ for (c = 0 ; c < c_COUNT ; c++) {
+ if (strcmp(filename + 8,
+ uncompression_suffix[c]) == 0)
+ break;
+ }
+ if (c >= c_COUNT) {
+ free(filename);
+ continue;
+ }
+ if (uncompressedretry) c = c_none;
+ ar_archivemember_setcompression(ar, c);
+ if (uncompression_supported(c)) {
+ struct archive *tar;
+ int a;
+
+ tar = archive_read_new();
+ r = read_data_tar(filelist, size,
+ debfile, ar, tar);
+ a = archive_read_close(tar);
+ if (a != ARCHIVE_OK && !RET_WAS_ERROR(r)) {
+ int e = archive_errno(tar);
+ if (e == -EINVAL)
+ fprintf(stderr,
+"reading data.tar within '%s' failed: %s\n",
+ debfile,
+ archive_error_string(
+ tar));
+ else
+ fprintf(stderr,
+"reading data.tar within '%s' failed: %d:%d:%s\n", debfile, a, e,
+ archive_error_string(
+ tar));
+ r = RET_ERROR;
+ }
+ a = archive_read_free(tar);
+ if (a != ARCHIVE_OK && !RET_WAS_ERROR(r)) {
+ r = RET_ERROR;
+ }
+ if (r != RET_NOTHING) {
+ ar_close(ar);
+ free(filename);
+ if ((r == RET_ERROR) && (!uncompressedretry)) {
+ uncompressedretry = true;
+ fprintf(stderr,"retrying uncompressed\n");
+ goto retrylabel;
+ } else {
+ return r;
+ }
+ }
+
+ }
+ free(filename);
+ }
+ } while (RET_IS_OK(r));
+ ar_close(ar);
+ if (hadcandidate)
+ fprintf(stderr,
+"Could not find a suitable data.tar file within '%s'!\n", debfile);
+ else
+ fprintf(stderr,
+"Could not find a data.tar file within '%s'!\n", debfile);
+ return RET_ERROR_MISSING;
+}
diff --git a/descriptions.c b/descriptions.c
new file mode 100644
index 0000000..d79c862
--- /dev/null
+++ b/descriptions.c
@@ -0,0 +1,202 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include "error.h"
+#include "ignore.h"
+#include "strlist.h"
+#include "chunks.h"
+#include "files.h"
+#include "debfile.h"
+#include "binaries.h"
+#include "descriptions.h"
+#include "md5.h"
+
+/* get the description from a .(u)deb file */
+static retvalue description_from_package(const char *control, char **description_p) {
+ struct strlist filekeys;
+ char *filename;
+ char *deb_control;
+ retvalue r;
+
+ r = binaries_getfilekeys(control, &filekeys);
+ if (r == RET_NOTHING)
+ r = RET_ERROR;
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (filekeys.count != 1) {
+ fprintf(stderr, "Strange number of files for binary package: %d\n",
+ filekeys.count);
+ strlist_done(&filekeys);
+ return RET_ERROR;
+ }
+ filename = files_calcfullfilename(filekeys.values[0]);
+ strlist_done(&filekeys);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+ if (verbose > 7) {
+ fprintf(stderr, "Reading '%s' to extract description...\n",
+ filename);
+ }
+ r = extractcontrol(&deb_control, filename);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ free(filename);
+ return r;
+ }
+ r = chunk_getwholedata(deb_control, "Description", description_p);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Not found any Description within file '%s'!\n",
+ filename);
+ }
+ free(filename);
+ free(deb_control);
+ return r;
+}
+
+static const char tab[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+
+/* This only matches the official one if the description is well-formed enough.
+ * If it has less or more leading spaces or anything else our reading has stripped,
+ * it will not match.... */
+static void description_genmd5(const char *description, /*@out@*/ char *d, size_t len) {
+ struct MD5Context context;
+ unsigned char md5buffer[MD5_DIGEST_SIZE];
+ int i;
+
+ assert (len == 2*MD5_DIGEST_SIZE + 1);
+ MD5Init(&context);
+ MD5Update(&context, (const unsigned char*)description, strlen(description));
+ MD5Update(&context, (const unsigned char*)"\n", 1);
+ MD5Final(md5buffer, &context);
+ for (i=0 ; i < MD5_DIGEST_SIZE ; i++) {
+ *(d++) = tab[md5buffer[i] >> 4];
+ *(d++) = tab[md5buffer[i] & 0xF];
+ }
+ *d = '\0';
+}
+
+/* Currently only normalizing towards a full Description is supported,
+ * the cached description is not yet used, long descriptions are not stored elsewhere
+ * and thus also no reference counting is done. */
+
+retvalue description_addpackage(struct target *target, const char *package, const char *control, char **control_p) {
+ char *description, *description_md5, *deb_description, *newcontrol;
+ struct fieldtoadd *todo;
+ size_t dlen;
+ retvalue r;
+
+ /* source packages have no descriptions */
+ if (target->packagetype == pt_dsc)
+ return RET_NOTHING;
+
+ r = chunk_getwholedata(control, "Description", &description);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Strange control data for '%s': no Description at all\n",
+ package);
+ return RET_NOTHING;
+ }
+ if (strchr(description, '\n') != NULL) {
+ /* there already is a long description, nothing to do */
+ free(description);
+ return RET_NOTHING;
+ }
+ dlen = strlen(description);
+
+ r = chunk_getwholedata(control, "Description-md5", &description_md5);
+ if (RET_WAS_ERROR(r)) {
+ free(description);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ /* only short description and no -md5?
+ * unusual but can happen, especially with .udeb */
+ free(description);
+ return RET_NOTHING;
+ }
+ r = description_from_package(control, &deb_description);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr, "Cannot retrieve long description for package '%s' out of package's files!\n",
+ package);
+ free(description);
+ free(description_md5);
+ /* not finding the .deb file is not fatal */
+ return RET_NOTHING;
+ }
+ /* check if the existing short description matches the found one */
+ if (strncmp(description, deb_description, dlen) != 0) {
+ fprintf(stderr,
+"Short Description of package '%s' does not match\n"
+"the start of the long descriptiongfound in the .deb\n",
+ package);
+ //if (!force) {
+ free(description);
+ free(description_md5);
+ free(deb_description);
+ /* not fatal, only not processed */
+ return RET_NOTHING;
+ //}
+ }
+ if (strlen(deb_description) == dlen) {
+ /* nothing new, only a short description in the .deb, too: */
+ free(description);
+ free(description_md5);
+ free(deb_description);
+ return RET_NOTHING;
+ }
+ free(description);
+ /* check if Description-md5 matches */
+ if (description_md5 != NULL) {
+ char found[2 * MD5_DIGEST_SIZE + 1];
+
+ description_genmd5(deb_description, found, sizeof(found));
+ if (strcmp(found, description_md5) != 0) {
+ fprintf(stderr,
+"Description-md5 of package '%s' does not match\n"
+"the md5 of the description found in the .deb\n"
+"('%s' != '%s')!\n",
+ package, description_md5, found);
+ //if (!force) {
+ free(description_md5);
+ /* not fatal, only not processed */
+ free(deb_description);
+ return RET_NOTHING;
+ //}
+ }
+ free(description_md5);
+ }
+
+ todo = deletefield_new("Description-md5", NULL);
+ if (!FAILEDTOALLOC(todo))
+ todo = addfield_new("Description", deb_description, todo);
+ newcontrol = chunk_replacefields(control, todo, "Description", false);
+ addfield_free(todo);
+ free(deb_description);
+ if (FAILEDTOALLOC(newcontrol))
+ return RET_ERROR_OOM;
+ *control_p = newcontrol;
+ return RET_OK;
+}
diff --git a/descriptions.h b/descriptions.h
new file mode 100644
index 0000000..c3aa060
--- /dev/null
+++ b/descriptions.h
@@ -0,0 +1,11 @@
+#ifndef REPREPRO_DESCRIPTIONS_H
+#define REPREPRO_DESCRIPTIONS_H
+
+/* Do what is needed description/translation wise for a new package added.
+ * control is the control chunk of the new package to be normalized
+ * (depending on the target, towards containing full Description or checksumed),
+ * newcontrol_p gets the new normalized control chunk.
+ */
+
+retvalue description_addpackage(struct target*, const char */*package*/, const char */*control*/, /*@out@*/char **/*newcontrol_p*/);
+#endif
diff --git a/diffindex.c b/diffindex.c
new file mode 100644
index 0000000..0bff9aa
--- /dev/null
+++ b/diffindex.c
@@ -0,0 +1,227 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "error.h"
+#include "names.h"
+#include "chunks.h"
+#include "readtextfile.h"
+#include "checksums.h"
+#include "diffindex.h"
+
+
+void diffindex_free(struct diffindex *diffindex) {
+ int i;
+
+ if (diffindex == NULL)
+ return;
+ checksums_free(diffindex->destination);
+ for (i = 0 ; i < diffindex->patchcount ; i ++) {
+ checksums_free(diffindex->patches[i].frompackages);
+ free(diffindex->patches[i].name);
+ checksums_free(diffindex->patches[i].checksums);
+ }
+ free(diffindex);
+}
+
+static void parse_sha1line(const char *p, /*@out@*/struct hashes *hashes, /*@out@*/const char **rest) {
+ setzero(struct hashes, hashes);
+
+ hashes->hashes[cs_sha1sum].start = p;
+ while ((*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F')
+ || (*p >= '0' && *p <= '9'))
+ p++;
+ hashes->hashes[cs_sha1sum].len = p - hashes->hashes[cs_sha1sum].start;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ hashes->hashes[cs_length].start = p;
+ while (*p >= '0' && *p <= '9')
+ p++;
+ hashes->hashes[cs_length].len = p - hashes->hashes[cs_length].start;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ *rest = p;
+}
+
+static inline retvalue add_current(const char *diffindexfile, struct diffindex *n, const char *current) {
+ struct hashes hashes;
+ const char *p;
+ retvalue r;
+
+ parse_sha1line(current, &hashes, &p);
+ if (hashes.hashes[cs_sha1sum].len == 0
+ || hashes.hashes[cs_length].len == 0
+ || *p != '\0') {
+ r = RET_ERROR;
+ } else
+ r = checksums_initialize(&n->destination, hashes.hashes);
+ ASSERT_NOT_NOTHING(r);
+ if (RET_WAS_ERROR(r))
+ fprintf(stderr, "Error parsing SHA1-Current in '%s'!\n",
+ diffindexfile);
+ return r;
+}
+
+static inline retvalue add_patches(const char *diffindexfile, struct diffindex *n, const struct strlist *patches) {
+ int i;
+
+ assert (patches->count == n->patchcount);
+
+ for (i = 0 ; i < n->patchcount; i++) {
+ struct hashes hashes;
+ const char *patchname;
+ retvalue r;
+
+ parse_sha1line(patches->values[i], &hashes, &patchname);
+ if (hashes.hashes[cs_sha1sum].len == 0
+ || hashes.hashes[cs_length].len == 0
+ || *patchname == '\0') {
+ r = RET_ERROR;
+ } else
+ r = checksums_initialize(&n->patches[i].checksums,
+ hashes.hashes);
+ ASSERT_NOT_NOTHING(r);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error parsing SHA1-Patches line %d in '%s':!\n'%s'\n",
+ i, diffindexfile, patches->values[i]);
+ return r;
+ }
+ n->patches[i].name = strdup(patchname);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ }
+ return RET_OK;
+}
+
+static inline retvalue add_history(const char *diffindexfile, struct diffindex *n, const struct strlist *history) {
+ int i, j;
+
+ for (i = 0 ; i < history->count ; i++) {
+ struct hashes hashes;
+ const char *patchname;
+ struct checksums *checksums;
+ retvalue r;
+
+ parse_sha1line(history->values[i], &hashes, &patchname);
+ if (hashes.hashes[cs_sha1sum].len == 0
+ || hashes.hashes[cs_length].len == 0
+ || *patchname == '\0') {
+ r = RET_ERROR;
+ } else
+ r = checksums_initialize(&checksums,
+ hashes.hashes);
+ ASSERT_NOT_NOTHING(r);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error parsing SHA1-History line %d in '%s':!\n'%s'\n",
+ i, diffindexfile, history->values[i]);
+ return r;
+ }
+ j = 0;
+ while (j < n->patchcount && strcmp(n->patches[j].name,
+ patchname) != 0)
+ j++;
+ if (j >= n->patchcount) {
+ fprintf(stderr,
+"'%s' lists '%s' in history but not in patches!\n",
+ diffindexfile, patchname);
+ checksums_free(checksums);
+ continue;
+ }
+ if (n->patches[j].frompackages != NULL) {
+ fprintf(stderr,
+"Warning: '%s' lists multiple histories for '%s'!\nOnly using last one!\n",
+ diffindexfile, patchname);
+ checksums_free(n->patches[j].frompackages);
+ }
+ n->patches[j].frompackages = checksums;
+ }
+ return RET_OK;
+}
+
+retvalue diffindex_read(const char *diffindexfile, struct diffindex **out_p) {
+ retvalue r;
+ char *chunk, *current;
+ struct strlist history, patches;
+ struct diffindex *n;
+
+ r = readtextfile(diffindexfile, diffindexfile, &chunk, NULL);
+ ASSERT_NOT_NOTHING(r);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = chunk_getextralinelist(chunk, "SHA1-History", &history);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "'%s' misses SHA1-History field\n",
+ diffindexfile);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(chunk);
+ return r;
+ }
+ r = chunk_getextralinelist(chunk, "SHA1-Patches", &patches);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "'%s' misses SHA1-Patches field\n",
+ diffindexfile);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(chunk);
+ strlist_done(&history);
+ return r;
+ }
+ r = chunk_getvalue(chunk, "SHA1-Current", &current);
+ free(chunk);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "'%s' misses SHA1-Current field\n",
+ diffindexfile);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&history);
+ strlist_done(&patches);
+ return r;
+ }
+ n = calloc(1, sizeof(struct diffindex) +
+ patches.count * sizeof(struct diffindex_patch));
+ if (FAILEDTOALLOC(n)) {
+ strlist_done(&history);
+ strlist_done(&patches);
+ free(current);
+ return r;
+ }
+ n->patchcount = patches.count;
+ r = add_current(diffindexfile, n, current);
+ if (RET_IS_OK(r))
+ r = add_patches(diffindexfile, n, &patches);
+ if (RET_IS_OK(r))
+ r = add_history(diffindexfile, n, &history);
+ ASSERT_NOT_NOTHING(r);
+ strlist_done(&history);
+ strlist_done(&patches);
+ free(current);
+ if (RET_IS_OK(r))
+ *out_p = n;
+ else
+ diffindex_free(n);
+ return r;
+}
diff --git a/diffindex.h b/diffindex.h
new file mode 100644
index 0000000..db78f6f
--- /dev/null
+++ b/diffindex.h
@@ -0,0 +1,19 @@
+#ifndef REPREPRO_DIFFINDEX_H
+#define REPREPRO_DIFFINDEX_H
+
+struct diffindex {
+ struct checksums *destination;
+ int patchcount;
+ struct diffindex_patch {
+ struct checksums *frompackages;
+ char *name;
+ struct checksums *checksums;
+ /* safe-guard against cycles */
+ bool done;
+ } patches[];
+};
+
+void diffindex_free(/*@only@*/struct diffindex *);
+retvalue diffindex_read(const char *, /*@out@*/struct diffindex **);
+
+#endif
diff --git a/dirs.c b/dirs.c
new file mode 100644
index 0000000..c5d18e9
--- /dev/null
+++ b/dirs.c
@@ -0,0 +1,230 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include "error.h"
+#include "strlist.h"
+#include "dirs.h"
+#include "names.h"
+
+/* create directory dirname. */
+retvalue dirs_create(const char *dirname) {
+ int ret, e;
+
+ ret = mkdir(dirname, 0775);
+ if (ret == 0) {
+ if (verbose > 1)
+ printf("Created directory \"%s\"\n", dirname);
+ return RET_OK;
+ } else if (ret < 0 && (e = errno) != EEXIST) {
+ fprintf(stderr, "Error %d creating directory \"%s\": %s\n",
+ e, dirname, strerror(e));
+ return RET_ERROR;
+ }
+ return RET_NOTHING;
+}
+
+/* create recursively all parent directories before the last '/' */
+retvalue dirs_make_parent(const char *filename) {
+ const char *p;
+ char *h;
+ int i;
+ retvalue r;
+
+ for (p = filename+1, i = 1 ; *p != '\0' ; p++, i++) {
+ if (*p == '/') {
+ h = strndup(filename, i);
+ if (FAILEDTOALLOC(h))
+ return RET_ERROR_OOM;
+ r = dirs_create(h);
+ if (RET_WAS_ERROR(r)) {
+ free(h);
+ return r;
+ }
+ free(h);
+ }
+ }
+ return RET_OK;
+}
+
+/* create dirname and any '/'-separated part of it */
+retvalue dirs_make_recursive(const char *directory) {
+ retvalue r, result;
+
+ if (interrupted()) {
+ return RET_ERROR_INTERRUPTED;
+ }
+ r = dirs_make_parent(directory);
+ result = dirs_create(directory);
+ RET_UPDATE(result, r);
+ return result;
+}
+
+/* create directory and return the number of created directoried */
+retvalue dir_create_needed(const char *directory, int *createddepth) {
+ retvalue r;
+ int ret;
+ size_t len = strlen(directory);
+ int check, depth = 0;
+ char *this;
+ int e;
+
+ if (interrupted()) {
+ return RET_ERROR_INTERRUPTED;
+ }
+ while (len > 0 && directory[len-1] == '/')
+ len--;
+ while (len > 0) {
+ this = strndup(directory, len);
+ if (FAILEDTOALLOC(this))
+ return RET_ERROR_OOM;
+ ret = mkdir(this, 0777);
+ e = errno;
+ if (ret == 0) {
+ if (verbose > 1)
+ printf("Created directory \"%s\"\n", this);
+ } else if (e == EEXIST) {
+ free(this);
+ break;
+ /* normally ENOENT should be the only problem,
+ * but check the others to be nice to annoying filesystems */
+ } else if (e != ENOENT && e != EACCES && e != EPERM) {
+ fprintf(stderr,
+"Cannot create directory \"%s\": %s(%d)\n",
+ this, strerror(e), e);
+ free(this);
+ return RET_ERRNO(e);
+ }
+ free(this);
+ depth++;
+ while (len > 0 && directory[len-1] != '/')
+ len--;
+ while (len > 0 && directory[len-1] == '/')
+ len--;
+ }
+ check = depth;
+ while (directory[len] == '/')
+ len++;
+ while (directory[len] != '\0') {
+ while (directory[len] != '\0' && directory[len] != '/')
+ len++;
+ this = strndup(directory, len);
+ if (FAILEDTOALLOC(this))
+ return RET_ERROR_OOM;
+ r = dirs_create(this);
+ free(this);
+ if (RET_WAS_ERROR(r))
+ return r;
+ // TODO: if we get RET_NOTHING here, reduce depth?
+ check--;
+ while (directory[len] == '/')
+ len++;
+ }
+ assert(check == 0);
+ *createddepth = depth;
+ return RET_OK;
+}
+
+void dir_remove_new(const char *directory, int created) {
+ size_t len = strlen(directory);
+ char *this;
+ int ret;
+
+ while (len > 0 && directory[len-1] == '/')
+ len--;
+ while (created > 0 && len > 0) {
+ this = strndup(directory, len);
+ if (FAILEDTOALLOC(this))
+ return;
+ ret = rmdir(this);
+ if (ret == 0) {
+ if (verbose > 1)
+ printf(
+"Removed empty directory \"%s\"\n",
+ this);
+ } else {
+ int e = errno;
+ if (e != ENOTEMPTY) {
+ fprintf(stderr,
+"Error removing directory \"%s\": %s(%d)\n",
+ this, strerror(e), e);
+ }
+ free(this);
+ return;
+ }
+ free(this);
+ created--;
+ while (len > 0 && directory[len-1] != '/')
+ len--;
+ while (len > 0 && directory[len-1] == '/')
+ len--;
+ }
+ return;
+}
+
+retvalue dirs_getdirectory(const char *filename, char **directory) {
+ size_t len;
+
+ assert (filename != NULL && *filename != '\0');
+
+ len = strlen(filename);
+ while (len > 1 && filename[len-1] == '/') {
+ len--;
+ }
+ while (len > 0 && filename[len-1] != '/') {
+ len--;
+ }
+ if (len == 0) {
+ *directory = strdup(".");
+ } else {
+ if (len == 1)
+ *directory = strdup("/");
+ else
+ *directory = strndup(filename, len-1);
+ }
+ if (FAILEDTOALLOC(*directory))
+ return RET_ERROR_OOM;
+ else
+ return RET_OK;
+
+}
+const char *dirs_basename(const char *filename) {
+ const char *bn;
+
+ bn = strrchr(filename, '/');
+ if (bn == NULL)
+ return filename;
+ // not really suited for the basename of directories,
+ // things like /bla/blub/ will give empty string...
+ return bn+1;
+}
+
+bool isdir(const char *fullfilename) {
+ struct stat s;
+ int i;
+
+ assert(fullfilename != NULL);
+ i = stat(fullfilename, &s);
+ return i == 0 && S_ISDIR(s.st_mode);
+}
diff --git a/dirs.h b/dirs.h
new file mode 100644
index 0000000..112f376
--- /dev/null
+++ b/dirs.h
@@ -0,0 +1,29 @@
+#ifndef REPREPRO_DIRS_H
+#define REPREPRO_DIRS_H
+
+#ifndef REPREPRO_ERROR_H
+#warning "What is happening here?"
+#include "error.h"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#warning "What is happening here?"
+#include "strlist.h"
+#endif
+
+/* create a directory, return RET_NOTHING if already existing */
+retvalue dirs_create(const char *);
+/* create recursively all parent directories before the last '/' */
+retvalue dirs_make_parent(const char *);
+/* create dirname and any '/'-separated part of it */
+retvalue dirs_make_recursive(const char *);
+/* create directory and parents as needed, and save count to remove them later */
+retvalue dir_create_needed(const char *, int *);
+void dir_remove_new(const char *, int);
+
+/* Behave like dirname(3) */
+retvalue dirs_getdirectory(const char *, /*@out@*/char **);
+
+const char *dirs_basename(const char *);
+
+bool isdir(const char *);
+#endif
diff --git a/distribution.c b/distribution.c
new file mode 100644
index 0000000..8b24365
--- /dev/null
+++ b/distribution.c
@@ -0,0 +1,1233 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008,2009,2010,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <limits.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <time.h>
+#include "error.h"
+#include "mprintf.h"
+#include "atoms.h"
+#include "sources.h"
+#include "dirs.h"
+#include "names.h"
+#include "release.h"
+#include "tracking.h"
+#include "override.h"
+#include "log.h"
+#include "ignore.h"
+#include "uploaderslist.h"
+#include "configparser.h"
+#include "byhandhook.h"
+#include "package.h"
+#include "distribution.h"
+
+static retvalue distribution_free(struct distribution *distribution) {
+ retvalue result, r;
+ bool needsretrack = false;
+
+ if (distribution != NULL) {
+ distribution->archive = NULL;
+ free(distribution->suite);
+ free(distribution->fakecomponentprefix);
+ free(distribution->version);
+ free(distribution->origin);
+ free(distribution->notautomatic);
+ free(distribution->butautomaticupgrades);
+ free(distribution->label);
+ free(distribution->description);
+ free(distribution->signed_by);
+ free(distribution->deb_override);
+ free(distribution->udeb_override);
+ free(distribution->dsc_override);
+ free(distribution->uploaders);
+ atomlist_done(&distribution->udebcomponents);
+ atomlist_done(&distribution->architectures);
+ atomlist_done(&distribution->components);
+ strlist_done(&distribution->signwith);
+ strlist_done(&distribution->updates);
+ strlist_done(&distribution->pulls);
+ strlist_done(&distribution->alsoaccept);
+ exportmode_done(&distribution->dsc);
+ exportmode_done(&distribution->deb);
+ exportmode_done(&distribution->udeb);
+ exportmode_done(&distribution->ddeb);
+ atomlist_done(&distribution->contents_architectures);
+ atomlist_done(&distribution->contents_components);
+ atomlist_done(&distribution->contents_dcomponents);
+ atomlist_done(&distribution->contents_ucomponents);
+ override_free(distribution->overrides.deb);
+ override_free(distribution->overrides.udeb);
+ override_free(distribution->overrides.dsc);
+ logger_free(distribution->logger);
+ if (distribution->uploaderslist != NULL) {
+ uploaders_unlock(distribution->uploaderslist);
+ }
+ byhandhooks_free(distribution->byhandhooks);
+ result = RET_OK;
+
+ while (distribution->targets != NULL) {
+ struct target *next = distribution->targets->next;
+
+ if (distribution->targets->staletracking)
+ needsretrack = true;
+
+ r = target_free(distribution->targets);
+ RET_UPDATE(result, r);
+ distribution->targets = next;
+ }
+ if (distribution->tracking != dt_NONE && needsretrack) {
+ fprintf(stderr,
+"WARNING: Tracking data of '%s' might have become out of date.\n"
+"Consider running retrack to avoid getting funny effects.\n",
+ distribution->codename);
+ }
+ free(distribution->codename);
+ free(distribution);
+ return result;
+ } else
+ return RET_OK;
+}
+
+/* allow premature free'ing of overrides to save some memory */
+void distribution_unloadoverrides(struct distribution *distribution) {
+ override_free(distribution->overrides.deb);
+ override_free(distribution->overrides.udeb);
+ override_free(distribution->overrides.dsc);
+ distribution->overrides.deb = NULL;
+ distribution->overrides.udeb = NULL;
+ distribution->overrides.dsc = NULL;
+}
+
+/* create all contained targets... */
+static retvalue createtargets(struct distribution *distribution) {
+ retvalue r;
+ int i, j;
+ struct target *t;
+ struct target *last = NULL;
+ bool has_source = false;
+
+ for (i = 0 ; i < distribution->components.count ; i++) {
+ component_t c = distribution->components.atoms[i];
+ for (j = 0 ; j < distribution->architectures.count ; j++) {
+ architecture_t a = distribution->architectures.atoms[j];
+
+ if (a == architecture_source) {
+ has_source = true;
+ continue;
+ }
+ if (a == architecture_all) {
+ fprintf(stderr,
+"Error: Distribution %s contains an architecture called 'all'.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+ if (strcmp(atoms_architectures[a], "any") == 0) {
+ fprintf(stderr,
+"Error: Distribution %s contains an architecture called 'any'.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ r = target_initialize_binary(
+ distribution,
+ c, a,
+ &distribution->deb,
+ distribution->readonly,
+ distribution->exportoptions[deo_noexport],
+ distribution->fakecomponentprefix,
+ &t);
+ if (RET_IS_OK(r)) {
+ if (last != NULL) {
+ last->next = t;
+ } else {
+ distribution->targets = t;
+ }
+ last = t;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (atomlist_in(&distribution->udebcomponents, c)) {
+ r = target_initialize_ubinary(
+ distribution,
+ c, a,
+ &distribution->udeb,
+ distribution->readonly,
+ distribution->exportoptions
+ [deo_noexport],
+ distribution->fakecomponentprefix,
+ &t);
+ if (RET_IS_OK(r)) {
+ if (last != NULL) {
+ last->next = t;
+ } else {
+ distribution->targets = t;
+ }
+ last = t;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ }
+ if (atomlist_in(&distribution->ddebcomponents, c)) {
+ r = target_initialize_dbinary(
+ distribution,
+ c, a,
+ &distribution->ddeb,
+ distribution->readonly,
+ distribution->exportoptions[deo_noexport],
+ distribution->fakecomponentprefix,
+ &t);
+ if (RET_IS_OK(r)) {
+ if (last != NULL) {
+ last->next = t;
+ } else {
+ distribution->targets = t;
+ }
+ last = t;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ /* check if this distribution contains source
+ * (yes, yes, source is not really an architecture, but
+ * the .changes files started with this...) */
+ if (has_source) {
+ r = target_initialize_source(distribution,
+ c, &distribution->dsc,
+ distribution->readonly,
+ distribution->exportoptions
+ [deo_noexport],
+ distribution->fakecomponentprefix, &t);
+ if (last != NULL) {
+ last->next = t;
+ } else {
+ distribution->targets = t;
+ }
+ last = t;
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ return RET_OK;
+}
+
+struct read_distribution_data {
+ struct distribution *distributions;
+};
+
+CFstartparse(distribution) {
+ CFstartparseVAR(distribution, result_p);
+ struct distribution *n;
+ retvalue r;
+
+ n = zNEW(struct distribution);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ /* set some default value: */
+ n->limit = 1;
+ r = exportmode_init(&n->udeb, true, NULL, "Packages");
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_free(n);
+ return r;
+ }
+ r = exportmode_init(&n->ddeb, true, "Release", "Packages");
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_free(n);
+ return r;
+ }
+ r = exportmode_init(&n->deb, true, "Release", "Packages");
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_free(n);
+ return r;
+ }
+ r = exportmode_init(&n->dsc, false, "Release", "Sources");
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_free(n);
+ return r;
+ }
+ *result_p = n;
+ return RET_OK;
+}
+
+static bool notpropersuperset(const struct atomlist *allowed, const char *allowedname, const struct atomlist *check, const char *checkname, const char **atoms, const struct distribution *d) {
+ atom_t missing;
+
+ if (!atomlist_subset(allowed, check, &missing)) {
+ fprintf(stderr,
+"In distribution description of '%s' (line %u to %u in %s):\n"
+"%s contains '%s' not found in %s!\n",
+ d->codename,
+ d->firstline, d->lastline, d->filename,
+ checkname, atoms[missing], allowedname);
+ return true;
+ }
+ return false;
+}
+
+static inline retvalue checkcomponentsequalduetofake(const struct distribution *d) {
+ size_t l;
+ int i, j;
+
+ if (d->fakecomponentprefix == NULL)
+ return RET_OK;
+
+ l = strlen(d->fakecomponentprefix);
+
+ for (i = 0 ; i < d->components.count ; i++) {
+ const char *c1 = atoms_components[d->components.atoms[i]];
+
+ if (strncmp(c1, d->fakecomponentprefix, l) != 0)
+ continue;
+ if (d->fakecomponentprefix[l] != '/')
+ continue;
+
+ for (j = 0 ; i < d->components.count ; j++) {
+ const char *c2;
+
+ if (j == i)
+ continue;
+
+ c2 = atoms_components[d->components.atoms[j]];
+
+ if (strcmp(c1 + l + 1, c2) == 0) {
+ fprintf(stderr,
+"ERROR: distribution '%s' has components '%s' and '%s',\n"
+"which would be output to the same place due to FakeComponentPrefix '%s'.\n",
+ d->codename, c1, c2,
+ d->fakecomponentprefix);
+ return RET_ERROR;
+ }
+ }
+ }
+ return RET_OK;
+}
+
+CFfinishparse(distribution) {
+ CFfinishparseVARS(distribution, n, last_p, mydata);
+ struct distribution *d;
+ retvalue r;
+
+ if (!complete) {
+ distribution_free(n);
+ return RET_NOTHING;
+ }
+ n->filename = config_filename(iter);
+ n->firstline = config_firstline(iter);
+ n->lastline = config_line(iter) - 1;
+
+ /* Do some consistency checks */
+ for (d = mydata->distributions; d != NULL; d = d->next) {
+ if (strcmp(d->codename, n->codename) == 0) {
+ fprintf(stderr,
+"Multiple distributions with the common codename: '%s'!\n"
+"First was in %s line %u to %u,\n"
+"now another in lines %u to %u of %s.\n",
+ n->codename, d->filename,
+ d->firstline, d->lastline,
+ n->firstline, n->lastline,
+ n->filename);
+ distribution_free(n);
+ return RET_ERROR;
+ }
+ }
+
+ if (notpropersuperset(&n->architectures, "Architectures",
+ &n->contents_architectures, "ContentsArchitectures",
+ atoms_architectures, n) ||
+ notpropersuperset(&n->components, "Components",
+ &n->contents_components, "ContentsComponents",
+ atoms_components, n) ||
+ notpropersuperset(&n->ddebcomponents, "DDebComponents",
+ &n->contents_dcomponents, "ContentsDComponents",
+ atoms_components, n) ||
+ notpropersuperset(&n->udebcomponents, "UDebComponents",
+ &n->contents_ucomponents, "ContentsUComponents",
+ atoms_components, n) ||
+ // TODO: instead of checking here make sure it can have more
+ // in the rest of the code...:
+ notpropersuperset(&n->components, "Components",
+ &n->udebcomponents, "UDebComponents",
+ atoms_components, n) ||
+ notpropersuperset(&n->components, "Components",
+ &n->ddebcomponents, "DDebComponents",
+ atoms_components, n)) {
+ (void)distribution_free(n);
+ return RET_ERROR;
+ }
+ /* overwrite creation of contents files based on given lists: */
+ if (n->contents_components_set) {
+ if (n->contents_components.count > 0) {
+ n->contents.flags.enabled = true;
+ n->contents.flags.nodebs = false;
+ } else {
+ n->contents.flags.nodebs = true;
+ }
+ }
+ if (n->contents_ucomponents_set) {
+ if (n->contents_ucomponents.count > 0) {
+ n->contents.flags.enabled = true;
+ n->contents.flags.udebs = true;
+ } else {
+ n->contents.flags.udebs = false;
+ }
+ }
+ if (n->contents_dcomponents_set) {
+ if (n->contents_dcomponents.count > 0) {
+ n->contents.flags.enabled = true;
+ n->contents.flags.ddebs = true;
+ } else {
+ n->contents.flags.ddebs = false;
+ }
+ }
+ if (n->contents_architectures_set) {
+ if (n->contents_architectures.count > 0)
+ n->contents.flags.enabled = true;
+ else
+ n->contents.flags.enabled = false;
+ }
+
+ r = checkcomponentsequalduetofake(n);
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_free(n);
+ return r;
+ }
+
+ /* prepare substructures */
+
+ r = createtargets(n);
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_free(n);
+ return r;
+ }
+ n->status = RET_NOTHING;
+ n->lookedat = false;
+ n->selected = false;
+
+ /* put in linked list */
+ if (*last_p == NULL)
+ mydata->distributions = n;
+ else
+ (*last_p)->next = n;
+ *last_p = n;
+ return RET_OK;
+}
+
+CFallSETPROC(distribution, suite)
+CFallSETPROC(distribution, version)
+CFallSETPROC(distribution, origin)
+CFallSETPROC(distribution, notautomatic)
+CFallSETPROC(distribution, butautomaticupgrades)
+CFtruthSETPROC2(distribution, readonly, readonly)
+CFallSETPROC(distribution, label)
+CFallSETPROC(distribution, description)
+CFallSETPROC(distribution, signed_by)
+CFsignwithSETPROC(distribution, signwith)
+CFnumberSETPROC(distribution, -1, LLONG_MAX, limit)
+CFfileSETPROC(distribution, deb_override)
+CFfileSETPROC(distribution, udeb_override)
+CFfileSETPROC(distribution, dsc_override)
+CFfileSETPROC(distribution, uploaders)
+CFuniqstrlistSETPROC(distribution, alsoaccept)
+CFstrlistSETPROC(distribution, updates)
+CFstrlistSETPROC(distribution, pulls)
+CFinternatomsSETPROC(distribution, components, checkforcomponent, at_component)
+CFinternatomsSETPROC(distribution, architectures, checkforarchitecture, at_architecture)
+CFatomsublistSETPROC(distribution, contents_architectures, at_architecture, architectures, "Architectures")
+CFatomsublistSETPROC(distribution, contents_components, at_component, components, "Components")
+CFatomsublistSETPROC(distribution, ddebcomponents, at_component, components, "Components")
+CFatomsublistSETPROC(distribution, udebcomponents, at_component, components, "Components")
+CFatomsublistSETPROC(distribution, contents_ucomponents, at_component, udebcomponents, "UDebComponents")
+CFexportmodeSETPROC(distribution, ddeb)
+CFexportmodeSETPROC(distribution, udeb)
+CFexportmodeSETPROC(distribution, deb)
+CFexportmodeSETPROC(distribution, dsc)
+CFcheckvalueSETPROC(distribution, codename, checkforcodename)
+CFcheckvalueSETPROC(distribution, fakecomponentprefix, checkfordirectoryandidentifier)
+CFtimespanSETPROC(distribution, validfor)
+
+CFuSETPROC(distribution, archive) {
+ CFSETPROCVARS(distribution, data, mydata);
+ char *codename;
+ retvalue r;
+
+ r = config_getall(iter, &codename);
+ if (!RET_IS_OK(r))
+ return r;
+
+ for (struct distribution *d = mydata->distributions; d != NULL; d = d->next) {
+ if (strcmp(d->codename, codename) == 0) {
+ data->archive = d;
+ free(codename);
+ return RET_OK;
+ }
+ }
+
+ fprintf(stderr,
+"Error parsing config file %s, line %u:\n"
+"No distribution has '%s' as codename.\n"
+"Note: The archive distribution '%s' must be specified before '%s'.\n",
+ config_filename(iter), config_line(iter), codename, codename, data->codename);
+ free(codename);
+ return RET_ERROR_MISSING;
+}
+
+CFUSETPROC(distribution, Contents) {
+ CFSETPROCVAR(distribution, d);
+ return contentsoptions_parse(d, iter);
+}
+CFUSETPROC(distribution, logger) {
+ CFSETPROCVAR(distribution, d);
+ return logger_init(iter, &d->logger);
+}
+CFUSETPROC(distribution, Tracking) {
+ CFSETPROCVAR(distribution, d);
+ return tracking_parse(d, iter);
+}
+
+CFUSETPROC(distribution, byhandhooks) {
+ CFSETPROCVAR(distribution, d);
+
+ return byhandhooks_parse(iter, &d->byhandhooks);
+}
+
+static const struct constant exportnames[deo_COUNT+1] = {
+ {"noexport", deo_noexport},
+ {"keepunknown", deo_keepunknown},
+ {NULL, 0}
+};
+
+CFUSETPROC(distribution, exportoptions) {
+ CFSETPROCVAR(distribution, d);
+ return config_getflags(iter, name, exportnames, d->exportoptions,
+ IGNORABLE(unknownfield),
+ "(allowed values: noexport, keepunknown)");
+}
+
+static const struct configfield distributionconfigfields[] = {
+ CF("AlsoAcceptFor", distribution, alsoaccept),
+ CFr("Architectures", distribution, architectures),
+ CF("Archive", distribution, archive),
+ CF("ByHandHooks", distribution, byhandhooks),
+ CFr("Codename", distribution, codename),
+ CFr("Components", distribution, components),
+ CF("ContentsArchitectures", distribution, contents_architectures),
+ CF("ContentsComponents", distribution, contents_components),
+ CF("Contents", distribution, Contents),
+ CF("ContentsUComponents", distribution, contents_ucomponents),
+ CF("DDebComponents", distribution, ddebcomponents),
+ CF("DDebIndices", distribution, ddeb),
+ CF("DebIndices", distribution, deb),
+ CF("DebOverride", distribution, deb_override),
+ CF("Description", distribution, description),
+ CF("Signed-By", distribution, signed_by),
+ CF("DscIndices", distribution, dsc),
+ CF("DscOverride", distribution, dsc_override),
+ CF("FakeComponentPrefix", distribution, fakecomponentprefix),
+ CF("Label", distribution, label),
+ CF("Limit", distribution, limit),
+ CF("Log", distribution, logger),
+ CF("NotAutomatic", distribution, notautomatic),
+ CF("ButAutomaticUpgrades", distribution, butautomaticupgrades),
+ CF("Origin", distribution, origin),
+ CF("Pull", distribution, pulls),
+ CF("ReadOnly", distribution, readonly),
+ CF("ExportOptions", distribution, exportoptions),
+ CF("SignWith", distribution, signwith),
+ CF("Suite", distribution, suite),
+ CF("Tracking", distribution, Tracking),
+ CF("UDebComponents", distribution, udebcomponents),
+ CF("UDebIndices", distribution, udeb),
+ CF("UDebOverride", distribution, udeb_override),
+ CF("Update", distribution, updates),
+ CF("Uploaders", distribution, uploaders),
+ CF("ValidFor", distribution, validfor),
+ CF("Version", distribution, version)
+};
+
+/* read specification of all distributions */
+retvalue distribution_readall(struct distribution **distributions) {
+ struct read_distribution_data mydata;
+ retvalue result;
+
+ mydata.distributions = NULL;
+
+ // TODO: readd some way to tell about -b or --confdir here?
+ /*
+ result = regularfileexists(fn);
+ if (RET_WAS_ERROR(result)) {
+ fprintf(stderr, "Could not find '%s'!\n"
+"(Have you forgotten to specify a basedir by -b?\n"
+"To only set the conf/ dir use --confdir)\n", fn);
+ free(mydata.filter.found);
+ free(fn);
+ return RET_ERROR_MISSING;
+ }
+ */
+
+ result = configfile_parse("distributions",
+ IGNORABLE(unknownfield),
+ startparsedistribution, finishparsedistribution,
+ "distribution definition",
+ distributionconfigfields,
+ ARRAYCOUNT(distributionconfigfields),
+ &mydata);
+ if (result == RET_ERROR_UNKNOWNFIELD)
+ fprintf(stderr,
+"Use --ignore=unknownfield to ignore unknown fields\n");
+ if (RET_WAS_ERROR(result)) {
+ distribution_freelist(mydata.distributions);
+ return result;
+ }
+ if (mydata.distributions == NULL) {
+ fprintf(stderr,
+"No distribution definitions found in %s/distributions!\n",
+ global.confdir);
+ distribution_freelist(mydata.distributions);
+ return RET_ERROR_MISSING;
+ }
+ *distributions = mydata.distributions;
+ return RET_OK;
+}
+
+/* call <action> for each package */
+retvalue package_foreach(struct distribution *distribution, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, action_each_package action, action_each_target target_action, void *data) {
+ retvalue result, r;
+ struct target *t;
+ struct package_cursor iterator;
+
+ result = RET_NOTHING;
+ for (t = distribution->targets ; t != NULL ; t = t->next) {
+ if (!target_matches(t, components, architectures, packagetypes))
+ continue;
+ if (target_action != NULL) {
+ r = target_action(t, data);
+ if (RET_WAS_ERROR(r))
+ return result;
+ if (r == RET_NOTHING)
+ continue;
+ }
+ r = package_openiterator(t, READONLY, true, &iterator);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return result;
+ while (package_next(&iterator)) {
+ r = action(&iterator.current, data);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ return result;
+ }
+ return result;
+}
+
+retvalue package_foreach_c(struct distribution *distribution, const struct atomlist *components, architecture_t architecture, packagetype_t packagetype, action_each_package action, void *data) {
+ retvalue result, r;
+ struct target *t;
+ struct package_cursor iterator;
+
+ result = RET_NOTHING;
+ for (t = distribution->targets ; t != NULL ; t = t->next) {
+ if (components != NULL &&
+ !atomlist_in(components, t->component))
+ continue;
+ if (limitation_missed(architecture, t->architecture))
+ continue;
+ if (limitation_missed(packagetype, t->packagetype))
+ continue;
+ r = package_openiterator(t, READONLY, true, &iterator);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return result;
+ while (package_next(&iterator)) {
+ r = action(&iterator.current, data);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ return result;
+ }
+ return result;
+}
+
+struct target *distribution_gettarget(const struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype) {
+ struct target *t = distribution->targets;
+
+ assert (atom_defined(component));
+ assert (atom_defined(architecture));
+ assert (atom_defined(packagetype));
+
+ // TODO: think about making read only access and only alowing readwrite when lookedat is set
+
+ while (t != NULL &&
+ (t->component != component ||
+ t->architecture != architecture ||
+ t->packagetype != packagetype)) {
+ t = t->next;
+ }
+ return t;
+}
+
+struct target *distribution_getpart(const struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype) {
+ struct target *t = distribution->targets;
+
+ assert (atom_defined(component));
+ assert (atom_defined(architecture));
+ assert (atom_defined(packagetype));
+
+ while (t != NULL &&
+ (t->component != component ||
+ t->architecture != architecture ||
+ t->packagetype != packagetype)) {
+ t = t->next;
+ }
+ if (t == NULL) {
+ fprintf(stderr,
+"Internal error in distribution_getpart: Bogus request for c='%s' a='%s' t='%s' in '%s'!\n",
+ atoms_components[component],
+ atoms_architectures[architecture],
+ atoms_packagetypes[packagetype],
+ distribution->codename);
+ abort();
+ }
+ return t;
+}
+
+/* mark all distributions matching one of the first argc argv */
+retvalue distribution_match(struct distribution *alldistributions, int argc, const char *argv[], bool lookedat, bool allowreadonly) {
+ struct distribution *d;
+ bool found[argc], unusable_as_suite[argc];
+ struct distribution *has_suite[argc];
+ int i;
+
+ assert (alldistributions != NULL);
+
+ if (argc <= 0) {
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!allowreadonly && d->readonly)
+ continue;
+ d->selected = true;
+ d->lookedat = lookedat;
+ }
+ return RET_OK;
+ }
+ memset(found, 0, sizeof(found));
+ memset(unusable_as_suite, 0, sizeof(unusable_as_suite));
+ memset(has_suite, 0, sizeof(has_suite));
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ for (i = 0 ; i < argc ; i++) {
+ if (strcmp(argv[i], d->codename) == 0) {
+ assert (!found[i]);
+ found[i] = true;
+ d->selected = true;
+ if (lookedat)
+ d->lookedat = lookedat;
+ if (!allowreadonly && d->readonly) {
+ fprintf(stderr,
+"Error: %s is readonly, so operation not allowed!\n",
+ d->codename);
+ return RET_ERROR;
+ }
+ } else if (d->suite != NULL &&
+ strcmp(argv[i], d->suite) == 0) {
+ if (has_suite[i] != NULL)
+ unusable_as_suite[i] = true;
+ has_suite[i] = d;
+ }
+ }
+ }
+ for (i = 0 ; i < argc ; i++) {
+ if (!found[i]) {
+ if (has_suite[i] != NULL && !unusable_as_suite[i]) {
+ if (!allowreadonly && has_suite[i]->readonly) {
+ fprintf(stderr,
+"Error: %s is readonly, so operation not allowed!\n",
+ has_suite[i]->codename);
+ return RET_ERROR;
+ }
+ has_suite[i]->selected = true;
+ if (lookedat)
+ has_suite[i]->lookedat = lookedat;
+ continue;
+ }
+ fprintf(stderr,
+"No distribution definition of '%s' found in '%s/distributions'!\n",
+ argv[i], global.confdir);
+ if (unusable_as_suite[i])
+ fprintf(stderr,
+"(It is not the codename of any distribution and there are multiple\n"
+"distributions with this as suite name.)\n");
+ return RET_ERROR_MISSING;
+ }
+ }
+ return RET_OK;
+}
+
+retvalue distribution_get(struct distribution *alldistributions, const char *name, bool lookedat, struct distribution **distribution) {
+ struct distribution *d, *d2;
+
+ d = alldistributions;
+ while (d != NULL && strcmp(name, d->codename) != 0)
+ d = d->next;
+ if (d == NULL) {
+ for (d2 = alldistributions; d2 != NULL ; d2 = d2->next) {
+ if (d2->suite == NULL)
+ continue;
+ if (strcmp(name, d2->suite) != 0)
+ continue;
+ if (d != NULL) {
+ fprintf(stderr,
+"No distribution has '%s' as codename, but multiple as suite name,\n"
+"thus it cannot be used to determine a distribution.\n", name);
+ return RET_ERROR_MISSING;
+ }
+ d = d2;
+ }
+ }
+ if (d == NULL) {
+ fprintf(stderr,
+"Cannot find definition of distribution '%s'!\n",
+ name);
+ return RET_ERROR_MISSING;
+ }
+ d->selected = true;
+ if (lookedat)
+ d->lookedat = true;
+ *distribution = d;
+ return RET_OK;
+}
+
+retvalue distribution_snapshot(struct distribution *distribution, const char *name) {
+ struct target *target;
+ retvalue result, r;
+ struct release *release;
+ char *id;
+
+ assert (distribution != NULL);
+
+ r = release_initsnapshot(distribution->codename, name, &release);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ result = RET_NOTHING;
+ for (target=distribution->targets; target != NULL ;
+ target = target->next) {
+ r = release_mkdir(release, target->relativedirectory);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ r = target_export(target, false, true, release);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (target->exportmode->release != NULL) {
+ r = release_directorydescription(release, distribution,
+ target, target->exportmode->release,
+ false);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ }
+ if (!RET_WAS_ERROR(result)) {
+ result = release_prepare(release, distribution, false);
+ assert (result != RET_NOTHING);
+ }
+ if (RET_WAS_ERROR(result)) {
+ release_free(release);
+ return result;
+ }
+ result = release_finish(release, distribution);
+ if (RET_WAS_ERROR(result))
+ return r;
+ id = mprintf("s=%s=%s", distribution->codename, name);
+ if (FAILEDTOALLOC(id))
+ return RET_ERROR_OOM;
+ r = package_foreach(distribution,
+ atom_unknown, atom_unknown, atom_unknown,
+ package_referenceforsnapshot, NULL, id);
+ free(id);
+ RET_UPDATE(result, r);
+ return result;
+}
+
+static retvalue export(struct distribution *distribution, bool onlyneeded) {
+ struct target *target;
+ retvalue result, r;
+ struct release *release;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: export(distribution={codename: %s}, onlyneeded=%s)\n",
+ distribution->codename, onlyneeded ? "true" : "false");
+ assert (distribution != NULL);
+
+ if (distribution->exportoptions[deo_noexport])
+ return RET_NOTHING;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Error: trying to re-export read-only distribution %s\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ r = release_init(&release, distribution->codename, distribution->suite,
+ distribution->fakecomponentprefix);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ result = RET_NOTHING;
+ for (target=distribution->targets; target != NULL ;
+ target = target->next) {
+ r = release_mkdir(release, target->relativedirectory);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ r = target_export(target, onlyneeded, false, release);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (target->exportmode->release != NULL) {
+ r = release_directorydescription(release, distribution,
+ target, target->exportmode->release,
+ onlyneeded);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ }
+ if (!RET_WAS_ERROR(result) && distribution->contents.flags.enabled) {
+ r = contents_generate(distribution, release, onlyneeded);
+ }
+ if (!RET_WAS_ERROR(result)) {
+ result = release_prepare(release, distribution, onlyneeded);
+ if (result == RET_NOTHING) {
+ release_free(release);
+ return result;
+ }
+ }
+ if (RET_WAS_ERROR(result)) {
+ bool workleft = false;
+ release_free(release);
+ fprintf(stderr, "ERROR: Could not finish exporting '%s'!\n",
+ distribution->codename);
+ for (target=distribution->targets; target != NULL ;
+ target = target->next) {
+ workleft |= target->saved_wasmodified;
+ }
+ if (workleft) {
+ (void)fputs(
+"This means that from outside your repository will still look like before (and\n"
+"should still work if this old state worked), but the changes intended with this\n"
+"call will not be visible until you call export directly (via reprepro export)\n"
+"Changes will also get visible when something else changes the same file and\n"
+"thus creates a new export of that file, but even changes to other parts of the\n"
+"same distribution will not!\n",
+ stderr);
+ }
+ } else {
+ r = release_finish(release, distribution);
+ RET_UPDATE(result, r);
+ }
+ if (RET_IS_OK(result))
+ distribution->status = RET_NOTHING;
+ return result;
+}
+
+retvalue distribution_fullexport(struct distribution *distribution) {
+ return export(distribution, false);
+}
+
+retvalue distribution_freelist(struct distribution *distributions) {
+ retvalue result, r;
+
+ result = RET_NOTHING;
+ while (distributions != NULL) {
+ struct distribution *d = distributions->next;
+ r = distribution_free(distributions);
+ RET_UPDATE(result, r);
+ distributions = d;
+ }
+ return result;
+}
+
+retvalue distribution_exportlist(enum exportwhen when, struct distribution *distributions) {
+ retvalue result, r;
+ bool todo = false;
+ struct distribution *d;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: distribution_exportlist() called.\n");
+ if (when == EXPORT_SILENT_NEVER) {
+ for (d = distributions ; d != NULL ; d = d->next) {
+ struct target *t;
+
+ for (t = d->targets ; t != NULL ; t = t->next)
+ t->wasmodified = false;
+ }
+ return RET_NOTHING;
+ }
+ if (when == EXPORT_NEVER) {
+ if (verbose > 10)
+ fprintf(stderr,
+"Not exporting anything as --export=never specified\n");
+ return RET_NOTHING;
+ }
+
+ for (d=distributions; d != NULL; d = d->next) {
+ if (d->omitted || !d->selected || d->exportoptions[deo_noexport])
+ continue;
+ if (d->lookedat && (RET_IS_OK(d->status) ||
+ (d->status == RET_NOTHING && when != EXPORT_CHANGED) ||
+ when == EXPORT_FORCE)) {
+ todo = true;
+ }
+ }
+
+ if (verbose >= 0 && todo)
+ printf("Exporting indices...\n");
+
+ result = RET_NOTHING;
+ for (d=distributions; d != NULL; d = d->next) {
+ if (verbose >= 20)
+ fprintf(stderr, " looking at distribution {codename: %s, exportoptions[deo_noexport]: %s, omitted: %s, selected: %s, status: %d}.\n",
+ d->codename,
+ d->exportoptions[deo_noexport] ? "true" : "false",
+ d->omitted ? "true" : "false",
+ d->selected ? "true" : "false",
+ d->status);
+ if (d->exportoptions[deo_noexport])
+ continue;
+ if (d->omitted || !d->selected)
+ continue;
+ if (!d->lookedat) {
+ if (verbose >= 30)
+ printf(
+" Not exporting %s because not looked at.\n", d->codename);
+ } else if ((RET_WAS_ERROR(d->status)||interrupted()) &&
+ when != EXPORT_FORCE) {
+ if (verbose >= 10)
+ fprintf(stderr,
+" Not exporting %s because there have been errors and no --export=force.\n",
+ d->codename);
+ } else if (d->status==RET_NOTHING && when==EXPORT_CHANGED) {
+ struct target *t;
+
+ if (verbose >= 10)
+ printf(
+" Not exporting %s because of no recorded changes and --export=changed.\n",
+ d->codename);
+
+ /* some paranoid check */
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (t->wasmodified) {
+ fprintf(stderr,
+"A paranoid check found distribution %s would not have been exported,\n"
+"despite having parts that are marked changed by deeper code.\n"
+"Please report this and how you got this message as bugreport. Thanks.\n"
+"Doing a export despite --export=changed....\n",
+ d->codename);
+ r = export(d, true);
+ RET_UPDATE(result, r);
+ break;
+ }
+ }
+ } else {
+ assert (RET_IS_OK(d->status) ||
+ (d->status == RET_NOTHING &&
+ when != EXPORT_CHANGED) ||
+ when == EXPORT_FORCE);
+ r = export(d, true);
+ RET_UPDATE(result, r);
+ }
+ }
+ return result;
+}
+
+
+/* get a pointer to the apropiate part of the linked list */
+struct distribution *distribution_find(struct distribution *distributions, const char *name) {
+ struct distribution *d = distributions, *r;
+
+ while (d != NULL && strcmp(d->codename, name) != 0)
+ d = d->next;
+ if (d != NULL)
+ return d;
+ d = distributions;
+ while (d != NULL && !strlist_in(&d->alsoaccept, name))
+ d = d->next;
+ r = d;
+ if (r != NULL) {
+ d = d->next;
+ while (d != NULL && ! strlist_in(&d->alsoaccept, name))
+ d = d->next;
+ if (d == NULL)
+ return r;
+ fprintf(stderr,
+"No distribution has codename '%s' and multiple have it in AlsoAcceptFor!\n",
+ name);
+ return NULL;
+ }
+ d = distributions;
+ while (d != NULL && (d->suite == NULL || strcmp(d->suite, name) != 0))
+ d = d->next;
+ r = d;
+ if (r == NULL) {
+ fprintf(stderr, "No distribution named '%s' found!\n", name);
+ return NULL;
+ }
+ d = d->next;
+ while (d != NULL && (d->suite == NULL || strcmp(d->suite, name) != 0))
+ d = d->next;
+ if (d == NULL)
+ return r;
+ fprintf(stderr,
+"No distribution has codename '%s' and multiple have it as suite-name!\n",
+ name);
+ return NULL;
+}
+
+retvalue distribution_loadalloverrides(struct distribution *distribution) {
+ retvalue r;
+
+ if (distribution->overrides.deb == NULL) {
+ r = override_read(distribution->deb_override,
+ &distribution->overrides.deb, false);
+ if (RET_WAS_ERROR(r)) {
+ distribution->overrides.deb = NULL;
+ return r;
+ }
+ }
+ if (distribution->overrides.udeb == NULL) {
+ r = override_read(distribution->udeb_override,
+ &distribution->overrides.udeb, false);
+ if (RET_WAS_ERROR(r)) {
+ distribution->overrides.udeb = NULL;
+ return r;
+ }
+ }
+ if (distribution->overrides.dsc == NULL) {
+ r = override_read(distribution->dsc_override,
+ &distribution->overrides.dsc, true);
+ if (RET_WAS_ERROR(r)) {
+ distribution->overrides.dsc = NULL;
+ return r;
+ }
+ }
+ if (distribution->overrides.deb != NULL ||
+ distribution->overrides.udeb != NULL ||
+ distribution->overrides.dsc != NULL)
+ return RET_OK;
+ else
+ return RET_NOTHING;
+}
+
+retvalue distribution_loaduploaders(struct distribution *distribution) {
+ if (distribution->uploaders != NULL) {
+ if (distribution->uploaderslist != NULL)
+ return RET_OK;
+ return uploaders_get(&distribution->uploaderslist,
+ distribution->uploaders);
+ } else {
+ distribution->uploaderslist = NULL;
+ return RET_NOTHING;
+ }
+}
+
+void distribution_unloaduploaders(struct distribution *distribution) {
+ if (distribution->uploaderslist != NULL) {
+ uploaders_unlock(distribution->uploaderslist);
+ distribution->uploaderslist = NULL;
+ }
+}
+
+retvalue distribution_prepareforwriting(struct distribution *distribution) {
+ retvalue r;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Error: distribution %s is read-only.\n"
+"Current operation not possible because it needs write access.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ if (distribution->logger != NULL) {
+ r = logger_prepare(distribution->logger);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ distribution->lookedat = true;
+ return RET_OK;
+}
+
+/* delete every package decider returns RET_OK for */
+retvalue package_remove_each(struct distribution *distribution, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, action_each_package decider, struct trackingdata *trackingdata, void *data) {
+ retvalue result, r;
+ struct target *t;
+ struct package_cursor iterator;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Error: trying to delete packages in read-only distribution %s.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ result = RET_NOTHING;
+ for (t = distribution->targets ; t != NULL ; t = t->next) {
+ if (!target_matches(t, components, architectures, packagetypes))
+ continue;
+ r = package_openiterator(t, READWRITE, true, &iterator);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return result;
+ while (package_next(&iterator)) {
+ r = decider(&iterator.current, data);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (RET_IS_OK(r)) {
+ r = package_remove_by_cursor(&iterator,
+ distribution->logger, trackingdata);
+ RET_UPDATE(result, r);
+ RET_UPDATE(distribution->status, r);
+ }
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ return result;
+ }
+ return result;
+}
diff --git a/distribution.h b/distribution.h
new file mode 100644
index 0000000..1078084
--- /dev/null
+++ b/distribution.h
@@ -0,0 +1,162 @@
+#ifndef REPREPRO_DISTRIBUTION_H
+#define REPREPRO_DISTRIBUTION_H
+
+struct distribution;
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_TARGET_H
+#include "target.h"
+#endif
+#ifndef REPREPRO_EXPORTS_H
+#include "exports.h"
+#endif
+#ifndef REPREPRO_CONTENTS_H
+#include "contents.h"
+#endif
+struct overridefile;
+struct uploaders;
+
+enum exportoptions {
+ deo_noexport = 0,
+ deo_keepunknown,
+ deo_COUNT,
+};
+
+struct distribution {
+ struct distribution *next;
+ /* the primary name to access this distribution: */
+ char *codename;
+ /* for more helpfull error messages: */
+ const char *filename; /* only valid while parsing! */
+ unsigned int firstline, lastline;
+ /* additional information for the Release-file to be
+ * generated, may be NULL. only suite is sometimes used
+ * (and only for sanity checks) */
+ /*@null@*/char *suite, *version;
+ /*@null@*/char *origin, *label, *description,
+ *notautomatic, *butautomaticupgrades, *signed_by;
+ /* What architectures and components are there */
+ struct atomlist architectures, components;
+ /* which update rules to use */
+ struct strlist updates;
+ /* which rules to use to pull packages from other distributions */
+ struct strlist pulls;
+ /* the key to sign with, may have no entries to mean unsigned: */
+ struct strlist signwith;
+ long long limit;
+ /* the codename of the archive distribution (when the limit is exceeded) */
+ /*@null@*/struct distribution *archive;
+ /* the override file to use by default */
+ /*@null@*/char *deb_override, *udeb_override, *dsc_override;
+ /* fake component prefix (and codename antisuffix) for Release files: */
+ /*@null@*/char *fakecomponentprefix;
+ /* only loaded when you've done it yourself: */
+ struct {
+ /*@null@*/struct overridefile *dsc, *deb, *udeb;
+ } overrides;
+ /* the list of components containing a debian-installer dir,
+ * normally only "main" */
+ struct atomlist udebcomponents;
+ /* the list of components containing a debug directory */
+ struct atomlist ddebcomponents;
+ /* what kind of index files to generate */
+ struct exportmode dsc, deb, udeb, ddeb;
+ bool exportoptions[deo_COUNT];
+ /* (NONE must be 0 so it is the default) */
+ enum trackingtype { dt_NONE=0, dt_KEEP, dt_ALL, dt_MINIMAL } tracking;
+ struct trackingoptions { bool includechanges;
+ bool includebyhand;
+ bool includebuildinfos;
+ bool includelogs;
+ bool needsources;
+ bool keepsources;
+ bool embargoalls;
+ } trackingoptions;
+ trackingdb trackingdb;
+ /* what content files to generate */
+ struct contentsoptions contents;
+ struct atomlist contents_architectures,
+ contents_components,
+ contents_dcomponents,
+ contents_ucomponents;
+ bool contents_architectures_set,
+ contents_components_set,
+ contents_dcomponents_set,
+ contents_ucomponents_set,
+ /* not used, just here to keep things simpler: */
+ ddebcomponents_set,
+ udebcomponents_set;
+ /* A list of all targets contained in the distribution*/
+ struct target *targets;
+ /* a filename to look for who is allowed to upload packages */
+ /*@null@*/char *uploaders;
+ /* only loaded after _loaduploaders */
+ /*@null@*/struct uploaders *uploaderslist;
+ /* how and where to log */
+ /*@null@*/struct logger *logger;
+ /* scripts to feed byhand/raw-* files in */
+ /*@null@*/struct byhandhook *byhandhooks;
+ /* a list of names beside Codename and Suite to accept .changes
+ * files via include */
+ struct strlist alsoaccept;
+ /* if != 0, number of seconds to add for Vaild-Until */
+ unsigned long validfor;
+ /* RET_NOTHING: do not export with EXPORT_CHANGED, EXPORT_NEVER
+ * RET_OK: export unless EXPORT_NEVER
+ * RET_ERROR_*: only export with EXPORT_FORCE */
+ retvalue status;
+ /* false: not looked at, do not export at all */
+ bool lookedat;
+ /* false: not requested, do not handle at all */
+ bool selected;
+ /* forbid all writing operations and exports if true */
+ bool readonly;
+ /* tracking information might be obsolete */
+ bool needretrack;
+ /* omitted because of --onlysmalldeletes */
+ bool omitted;
+};
+
+retvalue distribution_get(struct distribution * /*all*/, const char *, bool /*lookedat*/, /*@out@*/struct distribution **);
+
+/* set lookedat, start logger, ... */
+retvalue distribution_prepareforwriting(struct distribution *);
+
+/*@dependent@*/struct target *distribution_getpart(const struct distribution *distribution, component_t, architecture_t, packagetype_t);
+
+/* like distribtion_getpart, but returns NULL if there is no such target */
+/*@null@*//*@dependent@*/struct target *distribution_gettarget(const struct distribution *distribution, component_t, architecture_t, packagetype_t);
+
+retvalue distribution_fullexport(struct distribution *);
+
+
+retvalue distribution_snapshot(struct distribution *, const char */*name*/);
+
+/* read the configuration from all distributions */
+retvalue distribution_readall(/*@out@*/struct distribution **distributions);
+
+/* mark all dists from <conf> fitting in the filter given in <argc, argv> */
+retvalue distribution_match(struct distribution * /*alldistributions*/, int /*argc*/, const char * /*argv*/ [], bool /*lookedat*/, bool /*readonly*/);
+
+/* get a pointer to the apropiate part of the linked list */
+struct distribution *distribution_find(struct distribution *, const char *);
+
+retvalue distribution_freelist(/*@only@*/struct distribution *distributions);
+enum exportwhen {EXPORT_NEVER, EXPORT_SILENT_NEVER, EXPORT_CHANGED, EXPORT_NORMAL, EXPORT_FORCE };
+retvalue distribution_exportlist(enum exportwhen when, /*@only@*/struct distribution *);
+
+retvalue distribution_loadalloverrides(struct distribution *);
+void distribution_unloadoverrides(struct distribution *distribution);
+
+retvalue distribution_loaduploaders(struct distribution *);
+void distribution_unloaduploaders(struct distribution *distribution);
+#endif
diff --git a/docs/FAQ b/docs/FAQ
new file mode 100644
index 0000000..f5d42a4
--- /dev/null
+++ b/docs/FAQ
@@ -0,0 +1,219 @@
+This is a list of "frequently" asked questions.
+
+1.1) What can I do when reprepro complains about a missing .orig.tar.gz?
+1.2) Why does it refuse a file when one in another suite has the same name?
+1.4) The key to sign my Release files needs a passphrase, what to do?
+1.5) How do I change how files are downloaded.
+1.6) How to omit packages missing files when updating.
+2.1) Does reprepro support to generate Release.gpg files?
+2.2) Does reprepro support tildes ('~') in versions?
+2.3) Does reprepro support generation of Contents-<arch>.gz files?
+3.1) Can I have two versions of a package in the same distribution?
+3.2) Can reprepro pass through a server-supplied Release.gpg?
+9) Feature ... is missing, can you add it?
+
+
+1.1) What can I do when reprepro complains about a missing .orig.tar.gz?
+------------------------------------------------------------------------
+When 'include'ing a .changes file reprepro by default only adds files
+referenced in the .changes file into the pool/-hierarchy and does not
+search for files referenced in a .dsc file and thus fails if this .orig.tar.gz
+is not already in the pool.
+You are facing the choice:
+- copy the .orig.tar.gz by hand into the appropriate place within pool/
+ and try again. reprepro will find it there when you try it the next time
+ and add it to its database.
+- use --ignore=missingfile to tell reprepro to search for such files
+ in the directory the .changes file resides in.
+- modify the .changes file by hand to reference the .orig.tar.gz
+- use changestool (comes with reprepro since version 1.3.0) to
+ list the file. ("changestool <.changesfile> includeallsources")
+- use dpkg-buildpackage -sa the next time you build a package so that
+ it calls dpkg-genchanges with -sa which then always lists .orig.tar.gz
+ and not only if it ends in -0 or -1.
+
+1.2) Why does it refuse a file when one in another suite has the same name?
+----------------------------------------------------------------------------
+Reprepro uses Debian's way to organize the pool hierarchy, which means
+that the directory and name a file is saved under is only determined by
+its sourcename, its name and its version and especially not by the
+distribution it belongs to. (This is the intent of having a pool directory,
+so that if two distributions have the same version, the disk space is only
+used once). This means that if different versions of a packaged having the
+same version string are put in the same reprepro repository (even if put
+into different distributions within that), reprepro will refuse to do so.
+(Unless you have a md5sum collision, in which case it will put the one and
+just replace the second with the first).
+
+The only way to work around, is too put the different distributions into
+different repositories. But in general it is really worth the effort to
+get the versioning right instead: Having multiple packages with the same
+version make it hard to track down problems, because it is easy to mix
+them up. Also up/downgrading a host from one distribution to the other
+will not change the package but just keep the old (as they are the
+same version, so they have to be the same, apt and dpkg will think).
+
+How to deal with this without separating repositories depends on how
+you reached this situation:
+
+- in the past Debian's stable and stable-security buildds sometimes both
+ built a package and for some architectures the one version entered
+ security.debian.org and the other ftp.debian.org with the next
+ point release. (This should be fixed now. And it is always considered
+ a bug, so if you hit this, please report it). If you mirror such
+ a situation, just update one of the distributions and manually
+ include the package into the other distribution. As the versions
+ are the same, reprepro will keep with this and not try to download
+ the other version, err other same version, err ...
+- backports (i.e. packages rebuild for older distributions)
+ Common practise is to append the version with reducing ~,
+ i.e. 1.0-1 becomes 1.0-1~bpo.7, or 3.0 becomes 3.0~sarge.
+ (This makes sure that if a host is updated the backport is
+ replaced by the real package).
+ If backporting to multiple distributions you get bonus points
+ for making sure newer distributions have higher version numbers.
+ (To make sure which version is considered newer by dpkg use
+ dpkg's --compare-versions action).
+- a package built for multiple distributions
+ is equivalent to the backports case
+- locally modified packages that should be replace by newer official
+ versions: append something like "a0myname". If it should be
+ replaced by security updates of the official package, make sure (using
+ dpkg's --compare-versions) that a security update would have
+ a higher version.
+- locally modified packages that should not be replaced by newer
+ official versions: prefix the version with "99:" and perhaps appending
+ it with something like "-myname". (appending only makes it easier to
+ distinguish, as some tools do not show epochs).
+
+1.4) The key to sign my Release files needs a passphrase, what to do?
+---------------------------------------------------------------------
+Please take a look at gpg-agent.
+You can also use the --ask-passphrase option, but please note this is quite insecure.
+
+1.5) How do I change how files are downloaded.
+----------------------------------------------
+reprepro just calls apt's methods for file retrieval.
+You can give them options in conf/updates like
+Config: Acquire::Http::Proxy=http://proxy.yours.org:8080
+or replace them with other programs speaking the same
+interface.
+
+1.6) How to omit packages missing files when updating.
+------------------------------------------------------
+reprepro does not like broken upstream repositories and just splits out
+errors and does not process the rest. (Implementing simply a ignore for
+that is not that easy, as I would have special case holding an old version
+in that case when unavailable packages should be deleted, and make some
+costly information-pushing between layers (after all, each file can belong
+to multiple packages and packages can have more than one file, so keeping
+track which package should get a mark that files where missing needs a
+n-to-n relation that should never be uses expect the case where such a
+error happens)).
+What you can do when a upstream repository you update from misses a file:
+- try once with a different mirror not missing those files. You can either
+ change the mirror to use once and change it back afterwards. Or if both
+ mirrors have the same inner directory structure (they usually have) and
+ are accessible via the same method (like both http or both ftp) you can
+ also use the Fallback: option in conf/updates to tell reprepro to get
+ missing files from the other Mirror. This an even be used for things not
+ being a mirror of the same thing, but only having some files at the same
+ place. For example to work around etch r1 listing many older kernel
+ packages but no longer having the needed files, a line
+ Fallback: http://snapshot.debian.net/archive/2007/04/02/debian/
+ can help. (But make sure to look at the run and remove this line
+ once reprepro downloaded the missing files. With this line active and
+ the primary mirror you list in Method: unreachable, reprepro will also
+ download index files from snapshot and make your repository a copy of
+ unstable from 2007-04-02 instead of an updated etch version.)
+- get the file elsewhere (with the correct md5sum), place it in the
+ appropriate place in the pool/ hierarchy and do the update. Reprepro will
+ see the file is already there, add it to its database and just continue
+ with the update.
+- tell reprepro to exclude this package
+* There are multiple ways to do so. Easiest is adding something like
+ FilterFormula: package (!= xen-shell)
+ or
+ FilterFormula: package (!= xen-shell) | version (!=1.0-2) | !files
+ to your rule in conf/updates. ( the "| ! files" tells it to only
+ omit the source package xen-shell, as source packages have a files
+ field. Make sure the package in question does not require you to
+ make the source available or you are not making your repository
+ accessible to others).
+* Another way is adding something like
+ FilterList: install excludefile
+ and adding a file conf/excludefile with content
+ xen-shell deinstall
+ (the install will tell it to install what is not listed in the file,
+ the deinstall on xen-shell will tell it to not install that package)
+* Finally you can also supply a ListHook: with a script copying
+ its first argument to the second argument, removing all occurrences
+ of the package you do not want (take a look intro the dctrl-tool
+ package for tools helping you with this).
+- the worst solution is to just propagate the problem further, by just
+ telling reprepro the file is there with the correct md5sum while it
+ is not. (Via the _addmd5sums command of reprepro). Unless you
+ run checkpool reprepro will not notice what you have done and will
+ not even try to download that file once it becomes available. So
+ don't do this.
+
+2.1) Does reprepro support to generate Release.gpg files?
+---------------------------------------------------------
+Yes, add a SignWith in the suite's definition in conf/distributions.
+(and take a look what the man page says about SignWith)
+
+2.2) Does reprepro support tildes ('~') in versions?
+----------------------------------------------------
+Yes, but in .changes files only since version 0.5.
+(You can work around this in older versions by using includedeb and
+ includedsc on the .deb and .dsc files within the .changes file, though)
+
+2.3) Does reprepro support generation of Contents-<arch>.gz files?
+------------------------------------------------------------------
+Yes, since version 1.1.0 (well, actually since 0.8.2 but a bug
+caused the generated files to not be up to date unless manually
+exporting the distributions in question).
+Look for "Contents" in the man page.
+
+3.1) Can I have two versions of a package in the same distribution?
+-------------------------------------------------------------------
+Sorry, this is not possible right now, as reprepro heavily optimizes
+at only having one version of a package in a suite-type-component-architecture
+quadruple.
+You can have different versions in different architectures and/or components
+within the same suite. (Even different versions of a architecture all package
+in different architectures of the same suite). But within the same
+architecture and the same component of a distribution it is not possible.
+
+3.2) Can reprepro pass through a server-supplied Release.gpg?
+-------------------------------------------------------------------
+No.
+The reason for this is that the Release file will be different,
+so a Release.gpg would not match.
+The reason that the Release file looks differently is that reprepro
+mirrors packages. While it can create a distribution with the same
+packages as a distribution it mirrors. It will decide on its own where
+to put the files, so their Filename: or Directory: might differ. It may
+create a different set of compressions for the generated index files.
+It does not mirror Packages.diff directories (but only comes with helpers
+to create diffs between different states of the mirror). It does not mirror
+Contents files but creates them; and so on. So to be able to mirror
+distribution signatures almost all the functionality of reprepro would need
+to be duplicated (once supporting literal mirroring, once support local
+packages, partial mirroring, merging mirroring, pool condensing), thus I
+decided that this is better a task for another program. (Note that if
+you already have a local literal mirror, you can also use that as upstream
+for partial/merged/extended mirrored distributions of that. If you use
+the file:/// in Method: (as opposed to copy:///), reprepro will make
+hardlinks for files in pool/ if possible).
+
+9) Feature ... is missing, can you add it?
+------------------------------------------
+First, please take another look at the man page. My documentation is not
+very good, so it is easy to overlook some feature even when it is described
+already. If it is not there, just write me a mail (or better write a wishlist
+report to the Debian BTS, then it cannot get lost). Some things I add quite
+fast, other stuff takes a bit. Things incompatible with the current underlying
+infrastructures or past design decisions may never come, but if I have it on the
+TODO list of things to add, it help the code to develop in a direction that
+things like that might be possible in the future.
diff --git a/docs/Makefile.am b/docs/Makefile.am
new file mode 100644
index 0000000..197b7fe
--- /dev/null
+++ b/docs/Makefile.am
@@ -0,0 +1,4 @@
+
+EXTRA_DIST = short-howto reprepro.1 changestool.1 rredtool.1 recovery bzip.example xz.example pdiff.example di.example/README di.example/DI-filter.sh di.example/distributions di.example/updates reprepro.bash_completion reprepro.zsh_completion FAQ changelogs.example manual.html copybyhand.example outstore.py sftp.py outsftphook.py
+man_MANS = reprepro.1 changestool.1 rredtool.1
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/docs/bzip.example b/docs/bzip.example
new file mode 100755
index 0000000..ca5db42
--- /dev/null
+++ b/docs/bzip.example
@@ -0,0 +1,35 @@
+#!/bin/sh
+# since reprepro 0.8 this is no longer needed, as it can
+# create .bz2 files on its own (when compiled with libbz2-dev
+# present). It's still here for reference how to such a filter works.
+
+# Copy this script to your conf/ dir as bzip2.sh, make it executable
+# and add to some definition in conf/distributions
+# DscIndices: Sources Release . .gz bzip2.sh
+# DebIndices: Packages Release . .gz bzip2.sh
+# UDebIndices: Packages . .gz bzip2.sh
+# and you have .bz2'd Packages and Sources.
+# (alternatively, if you are very brave, put the full path to this file in there)
+
+DIROFDIST="$1"
+NEWNAME="$2"
+OLDNAME="$3"
+# this can be old($3 exists), new($2 exists) or change (both):
+STATUS="$4"
+BASENAME="`basename "$OLDNAME"`"
+
+# with reprepro <= 0.7 this could also be Packages.gz or Sources.gz,
+# but now only the uncompressed name is given. (even if not generated)
+if [ "xPackages" = "x$BASENAME" ] || [ "xSources" = "x$BASENAME" ] ; then
+ if [ "x$STATUS" = "xold" ] ; then
+ if [ -f "$DIROFDIST/$OLDNAME.bz2" ] ; then
+ echo "$OLDNAME.bz2" >&3
+ else
+ bzip2 -c -- "$DIROFDIST/$OLDNAME" >"$DIROFDIST/$OLDNAME.bz2.new" 3>/dev/null
+ echo "$OLDNAME.bz2.new" >&3
+ fi
+ else
+ bzip2 -c -- "$DIROFDIST/$NEWNAME" >"$DIROFDIST/$OLDNAME.bz2.new" 3>/dev/null
+ echo "$OLDNAME.bz2.new" >&3
+ fi
+fi
diff --git a/docs/changelogs.example b/docs/changelogs.example
new file mode 100755
index 0000000..22ec3f9
--- /dev/null
+++ b/docs/changelogs.example
@@ -0,0 +1,246 @@
+#!/bin/sh
+# This is an example script that can be hooked into reprepro
+# to either generate a hierarchy like packages.debian.org/changelogs/
+# or to generate changelog files in the "third party sites"
+# location apt-get changelogs looks if it is not found in
+# Apt::Changelogs::Server.
+#
+# All you have to do is to:
+# - copy it into you conf/ directory,
+# - if you want "third party site" style changelogs, edit the
+# CHANGELOGDIR variable below,
+# and
+# - add the following to any distribution in conf/distributions
+# you want to have changelogs and copyright files extracted:
+#Log:
+# --type=dsc changelogs.example
+# (note the space at the beginning of the second line).
+# This will cause this script to extract changelogs for all
+# newly added source packages. (To generate them for already
+# existing packages, call "reprepro rerunnotifiers").
+
+# DEPENDENCIES: dpkg >= 1.13.9
+
+if test "x${REPREPRO_OUT_DIR:+set}" = xset ; then
+ # Note: due to cd, REPREPRO_*_DIR will no longer
+ # be usable. And only things relative to outdir will work...
+ cd "${REPREPRO_OUT_DIR}" || exit 1
+else
+ # this will also trigger if reprepro < 3.5.1 is used,
+ # in that case replace this with a manual cd to the
+ # correct directory...
+ cat "changelog.example needs to be run by reprepro!" >&2
+ exit 1
+fi
+
+# CHANGELOGDIR set means generate full hierarchy
+# (clients need to set Apt::Changelogs::Server to use that)
+CHANGELOGDIR=changelogs
+
+# CHANGELOGDIR empty means generate changelog (and only changelog) files
+# in the new "third party site" place apt-get changelog is using as fallback:
+#CHANGELOGDIR=
+
+# Set to avoid using some predefined TMPDIR or even /tmp as
+# tempdir:
+
+# TMPDIR=/var/cache/whateveryoucreated
+
+if test -z "$CHANGELOGDIR" ; then
+addsource() {
+ DSCFILE="$1"
+ CANONDSCFILE="$(readlink --canonicalize "$DSCFILE")"
+ CHANGELOGFILE="${DSCFILE%.dsc}.changelog"
+ BASEDIR="$(dirname "$CHANGELOGFILE")"
+ if ! [ -f "$CHANGELOGFILE" ] ; then
+ EXTRACTDIR="$(mktemp -d)"
+ (cd -- "$EXTRACTDIR" && dpkg-source --no-copy -x "$CANONDSCFILE" > /dev/null)
+ install --mode=644 -- "$EXTRACTDIR"/*/debian/changelog "$CHANGELOGFILE"
+ chmod -R u+rwX -- "$EXTRACTDIR"
+ rm -r -- "$EXTRACTDIR"
+ fi
+ if [ -L "$BASEDIR"/current."$CODENAME" ] ; then
+ # should not be there, just to be sure
+ rm -f -- "$BASEDIR"/current."$CODENAME"
+ fi
+ # mark this as needed by this distribution
+ ln -s -- "$(basename "$CHANGELOGFILE")" "$BASEDIR/current.$CODENAME"
+ JUSTADDED="$CHANGELOGFILE"
+}
+delsource() {
+ DSCFILE="$1"
+ CHANGELOGFILE="${DSCFILE%.dsc}.changelog"
+ BASEDIR="$(dirname "$CHANGELOGFILE")"
+ BASENAME="$(basename "$CHANGELOGFILE")"
+ if [ "x$JUSTADDED" = "x$CHANGELOGFILE" ] ; then
+ exit 0
+ fi
+# echo "delete, basedir=$BASEDIR changelog=$CHANGELOGFILE, dscfile=$DSCFILE, "
+ if [ "x$(readlink "$BASEDIR/current.$CODENAME")" = "x$BASENAME" ] ; then
+ rm -- "$BASEDIR/current.$CODENAME"
+ fi
+ NEEDED=0
+ for c in "$BASEDIR"/current.* ; do
+ if [ "x$(readlink -- "$c")" = "x$BASENAME" ] ; then
+ NEEDED=1
+ fi
+ done
+ if [ "$NEEDED" -eq 0 -a -f "$CHANGELOGFILE" ] ; then
+ rm -r -- "$CHANGELOGFILE"
+ # to remove the directory if now empty
+ rmdir --ignore-fail-on-non-empty -- "$BASEDIR"
+ fi
+}
+
+else # "$CHANGELOGDIR" set:
+
+addsource() {
+ DSCFILE="$1"
+ CANONDSCFILE="$(readlink --canonicalize "$DSCFILE")"
+ TARGETDIR="${CHANGELOGDIR}/${DSCFILE%.dsc}"
+ SUBDIR="$(basename $TARGETDIR)"
+ BASEDIR="$(dirname $TARGETDIR)"
+ if ! [ -d "$TARGETDIR" ] ; then
+ #echo "extract $CANONDSCFILE information to $TARGETDIR"
+ mkdir -p -- "$TARGETDIR"
+ EXTRACTDIR="$(mktemp -d)"
+ (cd -- "$EXTRACTDIR" && dpkg-source --no-copy -x "$CANONDSCFILE" > /dev/null)
+ install --mode=644 -- "$EXTRACTDIR"/*/debian/copyright "$TARGETDIR/copyright"
+ install --mode=644 -- "$EXTRACTDIR"/*/debian/changelog "$TARGETDIR/changelog"
+ chmod -R u+rwX -- "$EXTRACTDIR"
+ rm -r -- "$EXTRACTDIR"
+ fi
+ if [ -L "$BASEDIR"/current."$CODENAME" ] ; then
+ # should not be there, just to be sure
+ rm -f -- "$BASEDIR"/current."$CODENAME"
+ fi
+ # mark this as needed by this distribution
+ ln -s -- "$SUBDIR" "$BASEDIR/current.$CODENAME"
+ JUSTADDED="$TARGETDIR"
+}
+delsource() {
+ DSCFILE="$1"
+ TARGETDIR="${CHANGELOGDIR}/${DSCFILE%.dsc}"
+ SUBDIR="$(basename $TARGETDIR)"
+ BASEDIR="$(dirname $TARGETDIR)"
+ if [ "x$JUSTADDED" = "x$TARGETDIR" ] ; then
+ exit 0
+ fi
+# echo "delete, basedir=$BASEDIR targetdir=$TARGETDIR, dscfile=$DSCFILE, "
+ if [ "x$(readlink "$BASEDIR/current.$CODENAME")" = "x$SUBDIR" ] ; then
+ rm -- "$BASEDIR/current.$CODENAME"
+ fi
+ NEEDED=0
+ for c in "$BASEDIR"/current.* ; do
+ if [ "x$(readlink -- "$c")" = "x$SUBDIR" ] ; then
+ NEEDED=1
+ fi
+ done
+ if [ "$NEEDED" -eq 0 -a -d "$TARGETDIR" ] ; then
+ rm -r -- "$TARGETDIR"
+ # to remove the directory if now empty
+ rmdir --ignore-fail-on-non-empty -- "$BASEDIR"
+ fi
+}
+fi # CHANGELOGDIR
+
+ACTION="$1"
+CODENAME="$2"
+PACKAGETYPE="$3"
+if [ "x$PACKAGETYPE" != "xdsc" ] ; then
+# the --type=dsc should cause this to never happen, but better safe than sorry.
+ exit 1
+fi
+COMPONENT="$4"
+ARCHITECTURE="$5"
+if [ "x$ARCHITECTURE" != "xsource" ] ; then
+ exit 1
+fi
+NAME="$6"
+shift 6
+JUSTADDED=""
+if [ "x$ACTION" = "xadd" -o "x$ACTION" = "xinfo" ] ; then
+ VERSION="$1"
+ shift
+ if [ "x$1" != "x--" ] ; then
+ exit 2
+ fi
+ shift
+ while [ "$#" -gt 0 ] ; do
+ case "$1" in
+ *.dsc)
+ addsource "$1"
+ ;;
+ --)
+ exit 2
+ ;;
+ esac
+ shift
+ done
+elif [ "x$ACTION" = "xremove" ] ; then
+ OLDVERSION="$1"
+ shift
+ if [ "x$1" != "x--" ] ; then
+ exit 2
+ fi
+ shift
+ while [ "$#" -gt 0 ] ; do
+ case "$1" in
+ *.dsc)
+ delsource "$1"
+ ;;
+ --)
+ exit 2
+ ;;
+ esac
+ shift
+ done
+elif [ "x$ACTION" = "xreplace" ] ; then
+ VERSION="$1"
+ shift
+ OLDVERSION="$1"
+ shift
+ if [ "x$1" != "x--" ] ; then
+ exit 2
+ fi
+ shift
+ while [ "$#" -gt 0 -a "x$1" != "x--" ] ; do
+ case "$1" in
+ *.dsc)
+ addsource "$1"
+ ;;
+ esac
+ shift
+ done
+ if [ "x$1" != "x--" ] ; then
+ exit 2
+ fi
+ shift
+ while [ "$#" -gt 0 ] ; do
+ case "$1" in
+ *.dsc)
+ delsource "$1"
+ ;;
+ --)
+ exit 2
+ ;;
+ esac
+ shift
+ done
+fi
+
+exit 0
+# Copyright 2007,2008,2012 Bernhard R. Link <brlink@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
diff --git a/docs/changestool.1 b/docs/changestool.1
new file mode 100644
index 0000000..e7e49f0
--- /dev/null
+++ b/docs/changestool.1
@@ -0,0 +1,172 @@
+.TH CHANGESTOOL 1 "2010-03-19" "reprepro" REPREPRO
+.SH NAME
+changestool \- verify, dump, modify, create or fix Debian .changes files
+.SH SYNOPSIS
+.B changestool \-\-help
+
+.B changestool
+[
+\fIoptions\fP
+]
+\fI.changes-filename\fP
+\fIcommand\fP
+[
+\fIper-command-arguments\fP
+]
+.SH DESCRIPTION
+changestool is a little program to operate on Debian .changes files,
+as they are produced by \fBdpkg\-genchanges\fP(1) and used to feed
+built Debian packages into Debian repository managers like
+.BR reprepro (1)
+or
+.BR dak .
+
+.SH EXAMPLES
+.P
+.B changestool \fIbloat.changes\fP setdistribution \fIlocal\fP
+.br
+will modify the \fBDistribution:\fP header inside \fIbloat.changes\fP
+to say \fIlocal\fP instead of what was there before.
+.P
+.B changestool \fIreprepro_1.2.0\-1.local_sparc.changes\fP includeallsources
+.br
+will modify the given file to also list \fB.orig.tar.gz\fP it does not list
+because you forgot to build it with
+.BR "dpkg\-buildpackage \-sa" .
+.P
+.B changestool \fIblafasel_1.0_abacus.changes\fP updatechecksums
+.br
+will update the md5sums to those of the files referenced by this file.
+(So one can do quick'n'dirty corrections to them before uploading to
+your private package repository)
+.P
+.B changestool \-\-create \fItest.changes\fP add \fIbla_1\-1.dsc bla_1\-1_abacus.deb\fP
+.br
+will add the specified files (format detected by filename,
+use \fBadddeb\fP or \fBadddsc\fP if you know it).
+If the file \fItest.changes\fP does not exist yet, a minimal one will be
+generated. Though that might be too minimal for most direct uses.
+
+.SH "GLOBAL OPTIONS"
+Options can be specified before the command. Each affects a different
+subset of commands and is ignored by other commands.
+.TP
+.B \-h \-\-help
+Displays a short list of options and commands with description.
+.TP
+.B \-o \-\-outputdir \fIdir\fP
+Not yet implemented.
+.TP
+.B \-s \-\-searchpath \fIpath\fP
+A colon-separated list of directories to search for files if they
+are not found in the directory of the .changes file.
+.TP
+.B \-\-create
+Flag for the commands starting with \fBadd\fP to create the \fB.changes\fP
+file if it does not yet exists.
+.TP
+.B \-\-create\-with\-all\-fields
+Flag for the commands starting with \fBadd\fP to create the \fB.changes\fP
+file if it does not yet exists.
+Unlike \fB\-\-create\fP, this creates more fields to make things like dupload
+happier.
+Currently that creates fake \fBUrgency\fP and \fBChanges\fP fields.
+.TP
+.B \-\-unlzma \fIcommand\fP
+External uncompressor used to uncompress lzma files to look
+into .diff.lzma, .tar.lzma or .tar.lzma within .debs.
+.TP
+.B \-\-unxz \fIcommand\fP
+External uncompressor used to uncompress xz files to look
+into .diff.xz, .tar.xz or .tar.xz within .debs.
+.TP
+.B \-\-lunzip \fIcommand\fP
+External uncompressor used to uncompress lzip files to look
+into .diff.lz, .tar.lz or .tar.lz within .debs.
+.TP
+.B \-\-bunzip2 \fIcommand\fP
+External uncompressor used to uncompress bz2 when compiled without
+libbz2.
+.SH COMMANDS
+.TP
+.BR verify
+Check for inconsistencies in the specified \fB.changes\fP file and the
+files referenced by it.
+.TP
+.BR updatechecksums " [ " \fIfilename\fP " ]"
+Update the checksum (md5sum and size) information within the specified
+\fB.changes\fP file and all \fB.dsc\fP files referenced by it.
+Without arguments, all files will be updated.
+To only update specific files, give their filename (without path) as
+arguments.
+.TP
+.BR setdistribution " [ " \fIdistributions\fP " ]"
+Change the \fBDistribution:\fP header to list the remaining arguments.
+.TP
+.BR includeallsources " [ " \fIfilename\fP " ]"
+List all files referenced by \fB.dsc\fP files mentioned in the \fB.changes\fP
+file in said file.
+Without arguments, all missing files will be included.
+To only include specific files, give their filename (without path) as
+arguments.
+
+Take a look at the description of \fB\-si\fP, \fB\-sa\fP and \fB\-sd\fP in
+the manpage of \fBdpkg\-genchanges\fP/\fBdpkg\-buildpackage\fP how to avoid
+to have to do this at all.
+
+Note that while \fBreprepro\fP will just ignore files listed in a \fB.changes\fP
+file when it already has the file with the same size and md5sum, \fBdak\fP
+might choke in that case.
+.TP
+.B adddeb \fIfilenames\fP
+Add the \fB.deb\fP and \fB.udeb\fP files specified by their filenames to
+the \fB.changes\fP file.
+Filenames without a slash will be searched
+in the current directory,
+the directory the changes file resides in
+and in the directories specified by the \fB\-\-searchpath\fP.
+.TP
+.B adddsc \fIfilenames\fP
+Add the \fB.dsc\fP files specified by their filenames to
+the \fB.changes\fP file.
+Filenames without a slash will be searched
+in the current directory,
+in the directory the changes file resides in
+and in the directories specified by the \fB\-\-searchpath\fP.
+.TP
+.B addrawfile \fIfilenames\fP
+Add the files specified by their filenames to
+the \fB.changes\fP file.
+Filenames without a slash will be searched
+in the current directory,
+in the directory the changes file resides in
+and in the directories specified by the \fB\-\-searchpath\fP.
+.TP
+.B add \fIfilenames\fP
+Behave like \fBadddsc\fP for filenames ending in \fB.dsc\fP,
+like \fBadddeb\fP for filenames ending in \fB.deb\fP or \fB.udeb\fP,
+and like \fBaddrawfile\fP for all other filenames
+.TP
+.B dumbremove \fIfilenames\fP
+Remove the specified files from the .changes file.
+No other fields (Architectures, Binaries, ...) are updated and
+no related files is removed.
+Just the given files (which must be specified without any \fB/\fP)
+are no longer listen in the .changes file (and only no longer in the
+changes file).
+
+.SH "SEE ALSO"
+.BR reprepro (1),
+.BR dpkg\-genchanges (1),
+.BR dpkg\-buildpackage (1),
+.BR md5sum (1).
+.SH "REPORTING BUGS"
+Report bugs or wishlist requests the Debian BTS
+(e.g. by using \fBreportbug reperepro\fP)
+or directly to <brlink@debian.org>.
+.br
+.SH COPYRIGHT
+Copyright \(co 2006-2009 Bernhard R. Link
+.br
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
diff --git a/docs/copybyhand.example b/docs/copybyhand.example
new file mode 100755
index 0000000..0b1762d
--- /dev/null
+++ b/docs/copybyhand.example
@@ -0,0 +1,28 @@
+#!/bin/sh
+# This is an example script for a byhandhook.
+# Add to you conf/distributions something like
+##ByhandHooks:
+## * * * copybyhand.sh
+# and copy this script as copybyhand.sh in your conf/
+# directory (or give the full path), and processincoming
+# will copy all byhand/raw files to dists/codename/extra/*
+
+set -e
+
+if [ $# != 5 ] ; then
+ echo "to be called by reprepro as byhandhook" >&2
+ exit 1
+fi
+if [ -z "$REPREPRO_DIST_DIR" ] ; then
+ echo "to be called by reprepro as byhandhook" >&2
+ exit 1
+fi
+
+codename="$1"
+section="$2"
+priority="$3"
+basefilename="$4"
+fullfilename="$5"
+
+mkdir -p "$REPREPRO_DIST_DIR/$codename/extra"
+install -T -- "$fullfilename" "$REPREPRO_DIST_DIR/$codename/extra/$basefilename"
diff --git a/docs/di.example/DI-filter.sh b/docs/di.example/DI-filter.sh
new file mode 100644
index 0000000..38696a3
--- /dev/null
+++ b/docs/di.example/DI-filter.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# Select only debs needed for a D-I netinstall cd
+
+IN="$1"
+OUT="$2"
+
+DIR=`dirname "$IN"`
+FILE=`basename "$IN"`
+CODENAME=`echo $FILE | cut -d"_" -f1`
+COMPONENT=`echo $FILE | cut -d"_" -f4`
+ARCH=`echo $FILE | cut -d"_" -f5`
+
+echo "### $IN"
+echo "# Source: $IN"
+echo "# Debs: $DIR/$FILE.debs"
+echo "# Out: $OUT"
+echo
+
+# generate list of packages needed
+DEBCDDIR="/usr/share/debian-cd"
+export ARCH CODENAME DEBCDDIR DIR
+make -f "$DEBCDDIR/Makefile" \
+ BDIR='$(DIR)' \
+ INSTALLER_CD='2' \
+ TASK='$(DEBCDDIR)/tasks/debian-installer+kernel' \
+ BASEDIR='$(DEBCDDIR)' \
+ forcenonusoncd1='0' \
+ VERBOSE_MAKE='yes' \
+ "$DIR/list"
+
+# hotfix abi name for sparc kernel
+sed -e 's/-1-/-2-/' "$DIR/list" > "$DIR/$FILE.debs"
+rm -f "$DIR/list"
+
+# filter only needed packages
+grep-dctrl `cat "$DIR/$FILE.debs" | while read P; do echo -n " -o -X -P $P"; done | cut -b 5-` "$IN" >"$OUT"
+
+# cleanup
+rm -f "$DIR/$FILE.debs"
diff --git a/docs/di.example/README b/docs/di.example/README
new file mode 100644
index 0000000..983ff5f
--- /dev/null
+++ b/docs/di.example/README
@@ -0,0 +1,13 @@
+This is an example from Goswin Brederlow <brederlo@informatik.uni-tuebingen.de>
+for the ListHook directive.
+
+He describes the example as:
+> attached a sample config that mirrors only packages from the debian-cd
+> netinstall CD image task. I think this would make a usefull example
+> for making a partial mirror by filtering.
+
+The speciality of the example needing the ListHook and not
+easer possible with FilterList is the need for different
+packages in different architectured. (Though extending
+FilterList to support this is on my TODO-List)
+
diff --git a/docs/di.example/distributions b/docs/di.example/distributions
new file mode 100644
index 0000000..316a051
--- /dev/null
+++ b/docs/di.example/distributions
@@ -0,0 +1,23 @@
+Origin: Debian-Installer
+Label: Debian-Installer
+Suite: testing
+Codename: sarge
+Version: 3.1
+Architectures: sparc i386
+Components: main
+UDebComponents: main
+Description: Debian Installer partial mirror
+Update: - debian
+#SignWith: yes
+
+Origin: Debian-Installer
+Label: Debian-Installer
+Suite: unstable
+Codename: sid
+Version: 3.2
+Architectures: sparc i386
+Components: main
+UDebComponents: main
+Description: Debian Installer partial mirror
+Update: - debian
+#SignWith: yes
diff --git a/docs/di.example/updates b/docs/di.example/updates
new file mode 100644
index 0000000..af39f5d
--- /dev/null
+++ b/docs/di.example/updates
@@ -0,0 +1,5 @@
+Name: debian
+Architectures: sparc i386
+Method: http://ftp.de.debian.org/debian
+#VerifyRelease: FBC60EA91B67D3C0
+ListHook: /mnt/mirror/DI/DI-filter.sh
diff --git a/docs/mail-changes.example b/docs/mail-changes.example
new file mode 100755
index 0000000..2402aaf
--- /dev/null
+++ b/docs/mail-changes.example
@@ -0,0 +1,69 @@
+#!/bin/sh
+#
+#
+# Copyright 2016 Luca Capello <luca.capello@infomaniak.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+#
+#
+# This is an example script that can be hooked into reprepro
+# to send an email after a .changes file is processed.
+#
+# All you have to do is to:
+# - copy it into you conf/ directory,
+# - add the following to any distribution in conf/distributions
+# you want to have emails sent for:
+#Log:
+# --changes mail-changes.example
+# (note the space at the beginning of the second line).
+#
+# DEPENDENCIES: mailx
+
+
+set -e
+
+
+if test "x${REPREPRO_OUT_DIR:+set}" = xset ; then
+ # Note: due to cd, REPREPRO_*_DIR will no longer
+ # be usable. And only things relative to outdir will work...
+ cd "${REPREPRO_OUT_DIR}" || exit 1
+else
+ # this will also trigger if reprepro < 3.5.1 is used,
+ # in that case replace this with a manual cd to the
+ # correct directory...
+ cat "mail-accepted.example needs to be run by reprepro!" >&2
+ exit 1
+fi
+
+
+MAIL_TO="$USER"
+
+ACTION="$1"
+CODENAME="$2"
+PACKAGENAME="$3"
+PACKAGEVERSION="$4"
+CHANGESFILE="$5"
+
+if [ "x$ACTION" = "xaccepted" ]; then
+ MAIL_FROM="$(grep Changed-By $CHANGESFILE | \
+ sed -e 's/Changed-By/From/')"
+ ARCHITECTURE="$(grep Architecture $CHANGESFILE | \
+ sed -e 's/Architecture: //')"
+ MAIL_SUBJECT="Accepted $PACKAGENAME $PACKAGEVERSION ($ARCHITECTURE) into $CODENAME"
+ cat "$CHANGESFILE" | \
+ mail -a "$MAIL_FROM" -s "$MAIL_SUBJECT" "$MAIL_TO"
+fi
+
+
+exit 0
diff --git a/docs/manual.html b/docs/manual.html
new file mode 100644
index 0000000..af6a993
--- /dev/null
+++ b/docs/manual.html
@@ -0,0 +1,1497 @@
+<html><head>
+<title>reprepro manual</title>
+<!-- some style elements stolen from or inspired by bugs.debian.org /-->
+<style>
+<!--
+html { color: #000; background: #fefefe; font-family: serif; margin: 1em; border: 0; padding: 0; line-height: 120%; }
+body { margin: 0; border: 0; padding: 0; }
+pre { text-align: left; border: #f0f0f0 1px solid; padding: 1px;}
+pre.shell { text-align: left; border: none; border-left: #f0f0f0 1px solid; padding: 2px;}
+h1, h2, h3 { text-align: left; font-family: sans-serif; background-color: #f0f0ff; color: #3c3c3c; border: #a7a7a7 1px solid; padding: 10px;}
+h1 { font-size: 180%; line-height: 150%; }
+h2 { font-size: 150% }
+h3 { font-size: 100% }
+ul.dir { list-style-type: disc; }
+ul { list-style-type: square; }
+dt.dir, dt.file, dt.symlink { font-weight:bold; font-family: sans-serif; }
+/-->
+</style>
+</head>
+<body>
+<h1>reprepro manual</h1>
+This manual documents reprepro, a tool to generate and administer
+Debian package repositories.
+<br>
+Other useful resources:
+<ul>
+<li> the <a href="http://mirrorer.alioth.debian.org/">homepage</a> of reprepro.</li>
+<li> <a href="file://localhost/usr/share/doc/reprepro/">local directory</a> with documentation and examples, if you have reprepro installed.</li>
+<li> the <a href="http://git.debian.org/?p=mirrorer/reprepro.git;a=blob_plain;f=docs/FAQ;hb=HEAD">Frequently Asked Questions</a></li>
+</ul>
+<h2>Table of contents</h2>
+Sections of this document:
+<ul>
+<li><a href="#introduction">Introduction</a></li>
+<li><a href="#firststeps">First steps</a></li>
+<li><a href="#dirbasics">Repository basics</a></li>
+<li><a href="#config">Config files</a></li>
+<li><a href="#export">Generation of index files</a>
+ <ul>
+ <li><a href="#compression">Compression and file names</a></li>
+ <li><a href="#signing">Signing</a></li>
+ <li><a href="#contents">Contents files</a></li>
+ <li><a href="#exporthook">Additional index files (like .diff)</a></li>
+ </ul></li>
+<li><a href="#localpackages">Local packages</a>
+ <ul>
+ <li><a href="#include">Including via command line</a></li>
+ <li><a href="#incoming">Processing an incoming queue</a></li>
+ </ul></li>
+<li><a href="#mirroring">Mirroring</a></li>
+<li><a href="#propagation">Propagation of packages</a></li>
+<li><a href="#snapshosts">Snapshots</a> (TODO)</li>
+<li><a href="#tracking">Source package tracking</a> (TODO)</li>
+<li><a href="#hooks">Extending reprepro / Hooks and more</a></li>
+<li><a href="#maintenance">Maintenance</a></li>
+<li><a href="#internals">Internals</a></li>
+<li><a href="#recovery">Disaster recovery</a></li>
+<li><a href="#paranoia">Paranoia</a></li>
+<li><a href="#counterindications">What reprepro cannot do</a></li>
+</ul>
+<h2><a name="introduction">Introduction</a></h2>
+<h3>What reprepro does</h3>
+Reprepro is a tool to take care of a repository of Debian packages
+(<tt>.dsc</tt>,<tt>.deb</tt> and <tt>.udeb</tt>).
+It installs them to the proper places, generates indices of packages
+(<tt>Packages</tt> and <tt>Sources</tt> and their compressed variants)
+and of index files (<tt>Release</tt> and optionally <tt>Release.gpg</tt>),
+so tools like <tt>apt</tt> know what is available and where to get it from.
+It will keep track which file belongs to where and remove files no longer
+needed (unless told to not do so).
+It can also make (partial) partial mirrors of remote repositories,
+including merging multiple sources and
+automatically (if explicitly requested) removing packages no longer available
+in the source.
+And many other things (sometimes I fear it got a few features too much).
+<h3>What reprepro needs</h3>
+It needs some libraries (<tt>zlib</tt>, <tt>libgpgme</tt>, <tt>libdb</tt> (Version 3, 4.3 or 4.4)) and can be compiled with some more for additional features (<tt>libarchive</tt>,
+<tt>libbz2</tt>).
+Otherwise it only needs
+<tt>apt</tt>'s methods (only when downloading stuff),
+<tt>gpg</tt> (only when signing or checking signatures),
+and if compiled without <tt>libarchive</tt> it needs <tt>tar</tt> and <tt>ar</tt> installed.
+<br>
+If you tell reprepro to call scripts for you, you will of course need the interpreters for these scripts:
+The included example to generate pdiff files needs python. The example to extract
+changelogs needs dpkg-source.
+<h3>What this manual aims to do</h3>
+This manual aims to give some overview over the most important features,
+so people can use them and so that I do not implement something a second
+time because I forgot support is already there.
+For a full reference of all possible commands and config options take a
+look at the man page, as this manual might miss some of the more obscure
+options.
+<h2><a name="firststeps">First steps</a></h2>
+<h3>generate a repository with local packages</h3>
+<ul>
+<li>Choose a directory (or create it).</li>
+<li>Create a subdirectory called <tt>conf</tt> in there.</li>
+<li>In the <tt>conf/</tt> subdirectory create a file called <tt>distributions</tt>,
+with content like:
+<pre class="file">
+Codename: mystuff
+Components: main bad
+Architectures: sparc i386 source
+</pre>
+or with content like:
+<pre class="file">
+Codename: andy
+Suite: rusty
+Components: main bad
+Architectures: sparc i386 source
+Origin: myorg
+Version: 20.3
+Description: my first little repository
+</pre>
+(Multiple distributions are separated by empty lines, Origin, Version and Description
+are just copied to the generated Release files, more things controlling reprepro can
+appear which are described later).
+</li>
+<li>If your <tt>conf/distributions</tt> file contained a <tt>Suite:</tt> and you
+are too lazy to generate the symbolic links yourself, call:
+<pre class="shell">
+reprepro -b $YOURBASEDIR createsymlinks
+</pre>
+</li>
+<li>Include some package, like:
+<pre class="shell">
+reprepro -b $YOURBASEDIR include mystuff mypackage.changes
+</pre>
+or:
+<pre class="shell">
+reprepro -b $YOURBASEDIR includedeb mystuff mypackage.deb
+</pre>
+</li>
+<li>Take a look at the generated <tt>pool</tt> and <tt>dists</tt>
+directories. They contain everything needed to apt-get from.
+Tell apt to include it by adding the following to your <tt>sources.list</tt>:
+<pre class="file">
+deb file:///$YOURBASEDIR mystuff main bad
+</pre>
+or make it available via http or ftp and do the same <tt>http://</tt> or <tt>ftp://</tt> source.</li>
+</ul>
+<h3>mirroring packages from other repositories</h3>
+This example shows how to generate a mirror of a single architecture with
+all packages of etch plus security updates:
+<ul>
+<li>Choose a directory (or create it).</li>
+<li>Create a subdirectory called <tt>conf</tt> in there (if not already existent).</li>
+<li>In the <tt>conf/</tt> subdirectory create a file called <tt>distributions</tt>,
+with content like (or add to that file after an empty line):
+<pre class="file">
+Origin: Debian
+Label: Debian
+Suite: stable
+Version: 4.0
+Codename: etch
+Architectures: i386
+Components: main
+Description: Debian 4.0 etch + security updates
+Update: - debian security
+Log: logfile
+</pre>
+Actually only <tt>Codename</tt>, <tt>Components</tt>, <tt>Architecture</tt> and <tt>Update</tt> is needed, the rest is just information for clients.
+The <tt>Update</tt> line tells to delete everything no longer available (<tt>-</tt>),
+then add the <tt>debian</tt> and <tt>security</tt> rules, which still have to be defined:
+</li>
+<li>In the <tt>conf/</tt> subdirectory create a file called <tt>updates</tt>,
+with content like (or add to that file after an empty line:):
+or with content like:
+<pre class="file">
+Name: security
+Method: http://security.debian.org/debian-security
+Fallback: ftp://klecker.debian.org/debian-security
+Suite: */updates
+VerifyRelease: A70DAF536070D3A1|B5D0C804ADB11277
+Architectures: i386
+Components: main
+UDebComponents:
+
+Name: debian
+Method: http://ftp2.de.debian.org/debian
+Config: Acquire::Http::Proxy=http://proxy.myorg.de:8080
+VerifyRelease: A70DAF536070D3A1|B5D0C804ADB11277
+</pre>
+(If there are no Architecture, Components or UDebComponents, it will try all the distribution to update has. Fallback means a URL to try when the first cannot offer some file (Has to be the same method)).
+</li>
+<li>Tell reprepro to update:
+<pre class="shell">
+reprepro -b $YOURBASEDIR update etch
+</pre>
+</li>
+<li>Take a look at the generated <tt>pool</tt> and <tt>dists</tt>
+directories. They contain everything needed to apt-get from.
+Tell apt to include it by adding the following to your <tt>sources.list</tt>:
+<pre class="shell">
+deb file:///$YOURBASEDIR etch main
+</pre>
+or make it available via http or ftp.</li>
+</ul>
+<h2><a name="dirbasics">Repository basics</a></h2>
+An <tt>apt-get</tt>able repository of Debian packages consists of two parts:
+the index files describing what is available and where it is and the actual
+Debian binary (<tt class="suffix">.deb</tt>),
+installer binary (<tt class="suffix">.udeb</tt>),
+and source (<tt class="suffix">.dsc</tt> together with
+<tt class="suffix">.tar.gz</tt> or
+<tt class="suffix">.orig.tar.gz</tt> and
+<tt class="suffix">.diff.gz</tt>) packages.
+<br>
+While you do not know how these look like to use reprepro, it's always a good
+idea to know what you are creating.
+<h3>Index files</h3>
+All index files are in subdirectories of a directory called
+<tt class="dirname">dists</tt>. Apt is very decided what names those should
+have, including the name of <tt class="dirname">dists</tt>.
+Including all optional and extensional files, the hierarchy looks like this:
+
+<dl class="dir">
+<dt class="dir">dists</dt><dd>
+ <dl class="dir">
+ <dt class="dir">CODENAME</dt><dd>
+Each distribution has it's own subdirectory here, named by it's codename.
+ <dl class="dir">
+ <dt class="file">Release</dt><dd>
+This file describes what distribution this is and the checksums of
+all index files included.
+ </dd>
+ <dt class="file">Release.gpg</dt><dd>
+This is the optional detached gpg signature of the Release file.
+Take a look at the <a name="#signing">section about signing</a> for how to
+active this.
+ </dd>
+ <dt class="file">Contents-ARCHITECTURE.gz</dt><dd>
+This optional file lists all files and which packages they belong to.
+It's downloaded and used by tools like
+<a href="http://packages.debian.org/apt-file">apt-file</a>
+to allow users to determine which package to install to get a specific file.
+<br>
+To activate generating of these files by reprepro, you need a <a href="#contents">Contents</a>
+header in your distribution declaration.
+ </dd>
+ <dt class="dir">COMPONENT1</dt><dd>
+Each component has it's own subdirectory here. They can be named whatever users
+can be bothered to write into their <tt class="filename">sources.list</tt>, but
+things like <tt>main</tt>, <tt>non-free</tt> and <tt>contrib</tt> are common.
+But funny names like <tt>bad</tt> or <tt>universe</tt> are just as possible.
+ <dl class="dir">
+ <dt class="dir">source</dt><dd>
+If this distribution supports sources, this directory lists which source
+packages are available in this component.
+ <dl class="dir">
+ <dt class="file">Release</dt><dd>
+This file contains a copy of those information about the distribution
+applicable to this directory.
+ </dd>
+ <dt class="file">Sources</dt>
+ <dt class="file">Sources.gz</dt>
+ <dt class="file">Sources.bz2</dt><dd>
+These files contain the actual description of the source Packages. By default
+only the <tt class="suffix">.gz</tt> file created, to create all three add the
+following to the declarations of the distributions:
+<pre class="config">
+DscIndices Sources Release . .gz .bz2
+</pre>
+That header can also be used to name those files differently, but then apt
+will no longer find them...
+ </dd>
+ <dt class="dir">Sources.diff</dt><dd>
+This optional directory contains diffs, so that only parts of the index
+file must be downloaded if it changed. While reprepro cannot generate these
+so-called <tt>pdiff</tt>s itself, it ships both with a program called rredtool
+and with an example python script to generate those.
+ </dd>
+ </dl>
+ </dd>
+ </dl>
+ <dl class="dir">
+ <dt class="dir">binary-ARCHITECTURE</dt><dd>
+Each architecture has its own directory in each component.
+ <dl class="dir">
+ <dt class="file">Release</dt><dd>
+This file contains a copy of those information about the distribution
+applicable to this directory.
+ </dd>
+ <dt class="file">Packages</dt>
+ <dt class="file">Packages.gz</dt>
+ <dt class="file">Packages.bz2</dt><dd>
+These files contain the actual description of the binary Packages. By default
+only the uncompressed and <tt class="suffix">.gz</tt> files are created.
+To create all three, add the following to the declarations of the distributions:
+<pre class="config">
+DebIndices Packages Release . .gz .bz2
+</pre>
+That header can also be used to name those files differently, but then apt
+will no longer find them...
+ </dd>
+ <dt class="dir">Packages.diff</dt><dd>
+This optional directory contains diffs, so that only parts of the index
+file must be downloaded if it changed. While reprepro cannot generate these
+so-called <tt>pdiff</tt>s itself, it ships both with a program called rredtool
+and with an example python script to generate those.
+ </dd>
+ </dl>
+ </dd>
+ <dt class="dir">debian-installer</dt><dd>
+This directory contains information about the <tt class="suffix">.udeb</tt>
+modules for the <a href="http://www.debian.org/devel/debian-installer/">Debian-Installer</a>.
+Those are actually just a very stripped down form of normal <tt class="suffix">.deb</tt>
+packages and this the hierarchy looks very similar:
+
+ <dl class="dir">
+ <dt class="dir">binary-ARCHITECTURE</dt><dd>
+ <dl class="dir">
+ <dt class="file">Packages</dt><dd></dd>
+ <dt class="file">Packages.gz</dt><dd></dd>
+ </dl>
+ </dd>
+ </dl>
+ </dd>
+ </dl>
+ </dd>
+ <dt class="dir">COMPONENT2</dt><dd>
+There is one dir for every component. All look just the same.
+ </dd>
+ </dl>
+ </dd>
+ <dt class="symlink">SUITE -> CODENAME</dt><dd>
+To allow accessing distribution by function instead of by name, there are often
+symbolic links from suite to codenames. That way users can write
+<pre class="config">
+deb http://some.domain.tld/debian SUITE COMPONENT1 COMPONENT2
+</pre>
+instead of
+<pre class="config">
+deb http://some.domain.tld/debian CODENAME COMPONENT1 COMPONENT2
+</pre>
+in their <tt class="filename">/etc/apt/sources.list</tt> and totally get
+surprised by getting something new after a release.
+ </dd>
+ </dl>
+</dd></dl>
+<h3>Package pool</h3>
+While the index files have a required filename, the actual files
+are given just as relative path to the base directory you specify
+in your sources list. That means apt can get them no matter what
+scheme is used to place them. The classical way Debian used till
+woody was to just put them in subdirectories of the
+<tt class="dir">binary-ARCHITECTURE</tt> directories, with the exception
+of the architecture-independent packages, which were put into a
+artificial <tt class="dir">binary-all</tt> directory. This was replaced
+for the official repository with package pools, which reprepro also uses.
+(Actually reprepro stores everything in pool a bit longer than the official
+repositories, that's why it recalculates all filenames without exception).
+<br>
+In a package pool, all package files of all distributions in that repository
+are stored in a common directory hierarchy starting with <tt class="dir">pool/</tt>,
+only separated by the component they belong to and the source package name.
+As everything this has disadvantages and advantages:
+<ul><li>disadvantages
+ <ul><li>different files in different distributions must have different filenames
+ </li><li>it's impossible to determine which distribution a file belongs to by path and filename (think mirroring)
+ </li><li>packages can no longer be grouped together in common subdirectories by having similar functions
+ </li></ul>
+</li><li>advantages
+ <ul><li>the extremely confusing situation of having differently build packages with the same version if different distributions gets impossible by design.
+ </li><li>the source (well, if it exists) is in the same directory as the binaries generated from it
+ </li><li>same files in different distributions need disk-space and bandwidth only once
+ </li><li>each package can be found only knowing component and sourcename
+ </li></ul>
+</li></ul>
+Now let's look at the actual structure of a pool (there is currently no difference
+between the pool structure of official Debian repositories and those generated by
+reprepro):
+
+<dl class="dir">
+<dt class="dir">pool</dt><dd>
+ The directory all this resides in is normally called <tt class="dir">pool</tt>.
+ That's nowhere hard coded in apt but that only looks at the relative
+ directory names in the index files. But there is also no reason to name
+ it differently.
+ <dl class="dir">
+ <dt class="dir">COMPONENT1</dt><dd>
+Each component has it's own subdirectory here.
+They can be named whatever users
+can be bothered to write into their <tt class="filename">sources.list</tt>, but
+things like <tt>main</tt>, <tt>non-free</tt> and <tt>contrib</tt> are common.
+But funny names like <tt>bad</tt> or <tt>universe</tt> are just as possible.
+ <dl class="dir">
+ <dt class="dir">a</dt><dd>
+As there are really many different source packages,
+the directory would be too full when all put here.
+So they are separated in different directories.
+Source packages starting with <tt class="constant">lib</tt> are put into a
+directory named after the first four letters of the source name.
+Everything else is put in a directory having the first letter as name.
+ <dl class="dir">
+ <dt class="dir">asource</dt><dd>
+Then the source package name follows.
+So this directory <tt class="dir">pool/COMPONENT1/a/asource/</tt> would contain
+all files of different versions of the hypothetical package <tt class="constant">asource</tt>.
+ <dl class="dir">
+ <dt class="dir">asource</dt><dd>
+ <dt class="file">a-source_version.dsc</dt>
+ <dt>a-source_version.tar.gz</dt><dd>
+The actual source package consists of its description file (<tt class="suffix">.dsc</tt>)
+and the files references by that.
+ </dd>
+ <dt class="file">binary_version_ARCH1deb</dt>
+ <dt class="file">binary_version_ARCH2.deb</dt>
+ <dt class="file">binary2_version_all.deb</dt><dd>
+ <dt class="file">di-module_version_ARCH1.udeb</dt><dd>
+Binary packages are stored here to.
+So to know where a binary package is stored you need to know what its source package
+name is.
+ </dd>
+ </dl>
+ </dd>
+ </dl>
+ </dd>
+ <dt class="dir">liba</dt><dd>
+As described before packages starting with <tt class="constant">lib</tt> are not stored
+in <tt class="dir">l</tt> but get a bit more context.
+ </dd>
+ </dl>
+ </dd>
+ <dt class="dir">COMPONENT2</dt><dd>
+There is one dir for every component. All look just the same.
+ </dd>
+ </dl>
+</dd></dl>
+As said before, you don't need to know this hierarchy in normal operation.
+reprepro will put everything to where it belong, keep account what is there
+and needed by what distribution or snapshot, and delete files no longer needed.
+(Unless told otherwise or when you are using the low-level commands).
+<h2><a name="config">Config files</a></h2>
+Configuring a reprepro repository is done by writing some config files
+into a directory.
+This directory is currently the <tt class="dir">conf</tt> subdirectory of the
+base directory of the repository,
+unless you specify <tt class="option">--confdir</tt> or set the
+environment variable <tt class="env">REPREPRO_CONFIG_DIR</tt>.
+
+<dl class="dir">
+<dt class="dir">options</dt><dd>
+If this file exists, reprepro will consider each line an additional
+command line option.
+Arguments must be in the same line after an equal sign.
+
+Options specified on the command line take precedence.
+</dd>
+<dt class="dir">distributions</dt><dd>
+This is the main configuration file and the only that is needed in all
+cases.
+It lists the distributions this repository contains and their properties.
+<br>
+See <a href="#firststeps">First steps</a> for a short example or the manpage
+for a list of all possible fields.
+</dd>
+<dt class="dir">updates</dt><dd>
+Rules about where to download packages from other repositories.
+See the section <a href="#mirroring">Mirroring / Updating</a>
+for more examples or the man page for a full reference.
+</dd>
+<dt class="dir">pulls</dt><dd>
+Rules about how to move packages in bulk between distributions
+where to download packages from other repositories.
+See the section <a href="#propagation">Propagation of packages</a>
+for an example or the man page for full reference.
+</dd>
+<dt class="dir">incoming</dt><dd>
+Rules for incoming queues as processed by <tt class="command">processincoming</tt>.
+See <a href="#processincoming-incoming-config">Processing an incoming queue</a> for more information.
+</dd>
+</dl>
+<h2><a name="export">Generation of index files</a></h2>
+<h3>Deciding when to generate</h3>
+As reprepro stores all state in its database,
+you can decide when you want them to be written to the <tt class="dir">dists/</tt>
+directory.
+You can always tell reprepro to generate those files with the <tt>export</tt> command:
+<pre class="command">
+reprepro -b $YOURBASEDIR export $CODENAMES
+</pre>
+This can be especially useful, if you just edited <tt class="file">conf/distributions</tt>
+and want to test what it generates.
+<p>
+While that command regenerates all files, in normal operation reprepro will only
+regenerate files where something just changed or that are missing.
+With <tt class="option">--export</tt> option you can control when this fill happen:
+<dl><dt>never</dt><dd>Don't touch any index files.
+This can be useful for doing multiple operations in a row and not wanting to regenerate
+the indices all the time.
+Note that unless you do an explicit export or change the same parts later without that
+option, the generated index files may be permanently out of date.
+</dd><dt>silent-never</dt><dd>Like never, but be more silent about it.
+</dd><dt>changed</dt><dd>This is the default behaviour since 3.0.1.
+Only export distributions where something changed
+(and no error occurred that makes an inconsistent state likely).
+And in those distributions only (re-)generate files which content should have been changed
+by the current action or which are missing.
+</dd><dt>lookedat</dt><dd>New name for <tt>normal</tt> since 3.0.1.
+</dd><dt>normal</dt><dd>This was the default behaviour until 3.0.0 (changed in 3.0.1).
+In this mode all distributions are processed that were looked at without error
+(where error means only errors happening while the package was open so have a chance
+to cause strange contents).
+This ensures that even after a operation that had nothing to do the looked at
+distribution has all the files exported needed to access it. (But still only files
+missing or that content would change with this action are regenerated).
+</dd><dt>force</dt><dd>Also try to write the current state if some error occurred.
+In all other modes reprepro will not write the index files if there was a problem.
+While this keeps the repository usable for users, it means that you will need an
+explicit export to write possible other changes done before that in the same run.
+(reprepro will tell you that at the end of the run with error, but you should not
+miss it).
+</dd></dl>
+<h3>Distribution specific fields</h3>
+There are a lot of <tt class="file">conf/distributions</tt> headers to control
+what index files to generate for some distribution, how to name
+them, how to postprocess them and so on. The most important are:
+<h4>Fields for the Release files</h4>
+The following headers are copied verbatim to the Release file, if they exist:
+<tt class="header">Origin</tt>,
+<tt class="header">Label</tt>,
+<tt class="header">Codename</tt>,
+<tt class="header">Suite</tt>,
+<tt class="header">Architectures</tt> (excluding a possible value "<tt>source</tt>"),
+<tt class="header">Components</tt>,
+<tt class="header">Description</tt>, and
+<tt class="header">NotAutomatic</tt>,
+<tt class="header">ButAutomaticUpgrades</tt>.
+<h4><a name="compression">Choosing compression and file names</a></h4>
+Depending on the type of the index files, different files are generated.
+No specifying anything is equivalent to:
+<pre class="config">
+ DscIndices Sources Release .gz
+ DebIndices Packages Release . .gz
+ UDebIndices Packages . .gz
+</pre>
+This means to generate <tt>Release</tt>, <tt>Sources.gz</tt> for sources,
+<tt>Release</tt>, <tt>Packages</tt> and <tt>Packages.gz</tt> for binaries
+and <tt>Packages</tt> and <tt>Packages.gz</tt> for installer modules.
+<br>
+The format of these headers is the name of index file to generate, followed
+by the optional name for a per-directory release description
+(when no name is specified, no file is generated).
+Then a list of compressions:
+A single dot (<tt>.</tt>) means generating an uncompressed index,
+<tt>.gz</tt> means generating a gzipped output,
+while <tt>.bz2</tt> requests and bzip2ed file.
+(<tt>.bz2</tt> is not available when disabled at compile time).
+After the compressions a script can be given that is called to generate/update
+additional forms, see <a href="#exporthook">&quot;Additional index files&quot;</a>.
+<h4><a name="signing">Signing</a></h4>
+If there is a <tt class="config">SignWith</tt> header, reprepro will try
+to generate a <tt class="file">Release.gpg</tt> file using libgpgme.
+If the value of the header is <tt>yes</tt> it will use the first key
+it finds, otherwise it will give the option to libgpgme to determine the
+key. (Which means fingerprints and keyids work fine, and whatever libgpgme
+supports, which might include most that gpg supports to select a key).
+<br>
+The best way to deal with keys needing passphrases is to use
+<a href="http://packages.debian.org/gnupg-agent">gpg-agent</a>.
+The only way to specify which keyring to use is to set the
+<tt class="env">GNUPGHOME</tt> environment variable, which will effect all
+distributions.
+<h4><a name="contents">Contents files</a></h4>
+Reprepro can generate files called
+<tt class="file">dists/CODENAME/Contents-ARCHITECTURE.gz</tt>
+listing all files in all binary packages available for the selected
+architecture in that distribution and which package they belong to.
+<br>
+This file can either be used by humans directly or via downloaded
+and searched with tools like
+<a href="http://packages.debian.org/apt-file">apt-file</a>.
+<br>
+To activate generating of these files by reprepro, you need a <tt class="config">Contents</tt> header in that distribution's declaration in <tt class="file">conf/distributions</tt>,
+like:
+<pre class="config">
+Contents:
+</pre>
+Versions before 3.0.0 need a ratio number there, like:
+<pre class="config">
+Contents: 1
+</pre>
+The number is the inverse ratio of not yet looked at and cached files to process in
+every run. The larger the more packages are missing. 1 means to list everything.
+<br>
+The arguments of the Contents field and other fields control
+which Architectures to generate Contents files for and which
+Components to include in those. For example
+<pre class="config">
+Contents: udebs nodebs . .gz .bz2
+ContentsArchitectures: ia64
+ContentsComponents:
+ContentsUComponents: main
+</pre>
+means to not skip any packages, generate Contents for <tt class="suffix">.udeb</tt>
+files, not generating Contents for <tt class="suffix">.deb</tt>s. Also it is only
+generated for the <tt>ia64</tt> architecture and only packages in component
+<tt>main</tt> are included.
+<h4><a name="exporthook">Additional index files (like .diff)</a></h4>
+Index files reprepro cannot generate itself, can be generated by telling
+it to call a script.
+<h5>using rredtool to generate pdiff files</h5>
+Starting with version 4.1.0, the <tt>rredtool</tt> coming with reprepro
+can be used as hook to create and update <tt>Packages.diff/Index</tt> files.
+<br>
+Unlike dak (which created the official Debian repositories) or the pdiff.py
+script (see below) derived from dak, an user will only need to download
+one of those patches, as new changes are merged into the old files.
+<br>
+To use it, make sure you have
+<a href="http://packages.debian.org/diff">diff</a> and
+<a href="http://packages.debian.org/gzip">gzip</a>
+installed.
+Then add something like the following to the headers of the distributions
+that should use this in <tt class="file">conf/distributions</tt>:
+<pre class="config">
+ DscIndices: Sources Release . .gz /usr/bin/rredtool
+ DebIndices: Packages Release . .gz /usr/bin/rredtool
+</pre>
+<h5>the pdiff example hook script (generates pdiff files)</h5>
+This example generates <tt class="file">Packages.diff</tt> and/or
+<tt class="file">Sources.diff</tt> directories containing a set of
+ed-style patches, so that people do not redownload the whole index
+for just some small changes.
+<br>
+To use it, copy <tt class="file">pdiff.example</tt> from the examples directory
+into your <tt class="dir">conf</tt> directory.
+(or any other directory, then you will need to give an absolute path later).
+Unpack, if needed. Rename it to pdiff.py and make it executable.
+Make sure you have
+<a href="http://packages.debian.org/python3-apt">python3-apt</a>,
+<a href="http://packages.debian.org/diff">diff</a> and
+<a href="http://packages.debian.org/gzip">gzip</a>
+installed.
+Then add something like the following to the headers of the distributions
+that should use this in <tt class="file">conf/distributions</tt>:
+<pre class="config">
+ DscIndices: Sources Release . .gz pdiff.py
+ DebIndices: Packages Release . .gz pdiff.py
+</pre>
+More information can be found in the file itself. You should read it.
+<h5>the bzip2 example hook script</h5>
+This is an very simple example.
+Simple and mostly useless,
+as reprepro has built in <tt>.bz2</tt> generation support,
+unless you compiled it your own with <tt>--without-libbz2</tt> or
+with no <tt>libbz2-dev</tt> installed.
+<br>
+To use it, copy <tt class="file">bzip.example</tt> from the examples directory
+into your <tt class="dir">conf</tt> directory.
+(or any other directory, then you will need to give an absolute path later).
+Unpack, if needed. Rename it to bzip2.sh and make it executable.
+Then add something like the following to the headers of the distributions
+that should use this in <tt class="file">conf/distributions</tt>:
+<pre class="config">
+ DscIndices: Sources Release . .gz bzip2.sh
+ DebIndices: Packages Release . .gz bzip2.sh
+ UDebIndices: Packages . .gz bzip2.sh
+</pre>
+The script will compress the index file using the
+<a href="http://packages.debian.org/bzip2">bzip2</a> program and tell
+reprepro which files to include in the Release file of the distribution.
+<h5>internals</h5>
+TO BE CONTINUED
+<h4>...</h4>
+TO BE CONTINUED
+<h2><a name="localpackages">Local packages</a></h2>
+There are two ways to get packages not yet in any repository into yours.
+<dl><dt>includedsc, includedeb, include</dt><dd>
+These are for including packages at the command line.
+Many options are available to control what actually happens.
+You can easily force components, section and priority and/or choose to
+include only some files or only in specific architectures.
+(Can be quite useful for architecture all packages depending on some
+packages you will some time before building for some of your architectures).
+Files can be moved instead of copied and most sanity checks overwritten.
+They are also optimized towards being fast and simply try things instead of
+checking a long time if they would succeed.
+</dd><dt>processincoming</dt><dd>
+This command checks for changes files in an incoming directory.
+Being optimized for automatic processing (i.e. trying to checking
+everything before actually doing anything), it can be slower
+(as every file is copied at least once to sure the owner is correct,
+with multiple partitions another copy can follow).
+Component, section and priority can only be changed via the distribution's
+override files. Every inclusion needs a <tt class="suffix">.changes</tt> file.
+<br>
+This method is also relatively new (only available since 2.0.0), thus
+optimisation for automatic procession will happen even more.
+</dd></dl>
+<h3><a name="include">Including via command line</a></h3>
+There are three commands to directly include packages into your repository:
+<tt class="command">includedeb</tt>, <tt class="command">includedsc</tt>
+and <tt class="command">includechanges</tt>.
+Each needs to codename of the distribution you want to put your package into
+as first argument and a file of the appropriate type
+(<tt class="suffix">.deb</tt>, <tt class="suffix">.dsc</tt> or
+ <tt class="suffix">.changes</tt>, respectively) as second argument.
+<br>
+If no component is specified via <tt class="option">--component</tt>
+(or short <tt class="option">-C</tt>), it will be guessed looking at its
+section and the components of that distribution.
+<br>
+If there are no <tt class="option">--section</tt>
+(or short <tt class="option">-S</tt>) option, and it is not specified
+by the (binary or source, depending on the type) override file of the
+distribution, the value from the <tt class="suffix">.changes</tt>-file
+is used (if the command is <tt class="command">includechanges</tt>)
+or it is extracted out of the file (if it is a
+<tt class="suffix">.deb</tt>-file, future versions might also try to
+extract it from a <tt class="suffix">.dsc</tt>'s diff or tarball).
+<br>
+Same with the priority and the <tt class="option">--priority</tt>
+(or short <tt class="option">-P</tt>) option.
+<br>
+With the <tt class="option">--architecture</tt> (or short <tt class="option">-A</tt>)
+option, the scope of the command is limited to that architecture.
+<tt class="command">includdeb</tt> will add a Architecture <tt>all</tt>
+packages only to that architecture (and complain about Debian packages for
+other architectures).
+<tt class="command">include</tt> will do the same and ignore packages for
+other architectures (source packages will only be included if the value
+for <tt class="option">--architecture</tt> is <tt>source</tt>).
+<br>
+To limit the scope to a specify type of package, use the
+<tt class="option">--packagetype</tt> or short <tt class="option">-T</tt>
+option. Possible values are <tt>deb</tt>, <tt>udeb</tt> and <tt>dsc</tt>.
+<br>
+When using the <tt class="option">--delete</tt> option, files will
+be moved or deleted after copying them.
+Repeating the <tt class="option">--delete</tt> option will also delete
+unused files.
+<br>
+TO BE CONTINUED.
+<h3><a name="incoming">Processing an incoming queue</a></h3>
+Using the <tt class="command">processincoming</tt> command reprepro
+can automatically process incoming queues.
+While this is still improveable (reprepro still misses ways to send
+mails and especially an easy way to send rejection mails to the
+uploader directly), it makes it easy to have an directory where you
+place your packages and reprepro will automatically include them.
+<br>
+To get this working you need three things:
+<ul>
+<li><a href="#processincoming-incoming-config">
+a file <tt class="file">conf/incoming</tt> describing your incoming directories,
+</a></li>
+<li><a href="#processincoming-dist-config">
+a <tt class="file">conf/distribution</tt> file describing your distributions
+(as always with reprepro)
+and
+</a></li>
+<li><a href="#processincoming-calling">
+a way to get reprepro called to process it.
+</a></li>
+</ul>
+<a name="processincoming-incoming-config">
+<h4>The file <tt class="file">conf/incoming</tt></h4></a>
+describes the different incoming queues.
+As usual the different chunks are separated by empty lines.
+Each chunk can have the following fields:
+<dl><dt>Name</dt><dd>This
+is the name of the incoming queue, that <tt class="command">processincoming</tt>
+wants as argument.</dd>
+<dt>IncomingDir</dt><dd>The actual directory to look for
+<tt class="suffix">.changes</tt> files.</dd>
+<dt>TempDir</dt><dd>To ensure integrity of the processed files and their
+permissions,
+every file is first copied from the incoming directory to this directory.
+Only the user reprepro runs as needs write permissions here.
+It speeds things up if this directory is in the same partition as the pool.
+<dt>Allow</dt><dd>
+This field lists the distributions this incoming queue might inject packages
+into.
+Each item can be a pair of a name of a distribution to accept and a distribution
+to put it into.
+Each upload has each item in its <tt class="field">Distribution:</tt> field
+compared first to last to each of this items and is put in the first distribution
+accepting it. For example
+<pre class="line">
+Allow: stable>etch stable>etch-proposed-updates mystuff unstable>sid
+</pre>
+will put a <tt class="suffix">.changes</tt> file with
+<tt class="field">Distribution: stable</tt> into etch.
+If that is not possible (e.g. because etch has a
+<tt class="field">UploadersList</tt> option not allowing this) it will
+be put into etch-proposed-updates.
+And a <tt class="suffix">.changes</tt> file with
+<tt class="field">Distribution: unstable</tt> will be put into sid, while
+with <tt class="field">Distribution: mystuff</tt> will end up in mystuff.
+<br>
+If there is a <tt class="field">Default</tt> field, the <tt class="field">Allow</tt>
+field is optional.</dd>
+<dt>Default</dt><dd>
+Every upload not caught by an item of the <tt class="field">Allow</tt>
+field is put into the distribution specified by this.
+<br>
+If there is a <tt class="field">Allow</tt> field, the <tt class="field">Default</tt>
+field is optional.</dd>
+<dt>Multiple</dt><dd>
+This field only makes a difference if a <tt class="suffix">.changes</tt> file
+has multiple distributions listed in its <tt class="field">Distribution:</tt>
+field.
+Without this field each of those distributions is tried according to the
+above rules until the package is added to one (or none accepts it).
+With this field it is tried for each distribution, so a package can be upload
+to multiple distributions at the same time.
+</dd>
+<dt>Permit</dt><dd>
+A list of options to allow things otherwise causing errors.
+(see the manpage for possible values).
+<br>This field os optional.</dd>
+<dt>Cleanup</dt><dd>
+Determines when and what files to delete from the incoming queue.
+By default only successfully processed <tt class="suffix">.changes</tt> files
+and the files references by those are deleted.
+For a list of possible options take a look into the man page.
+<br>This field os optional.</dd>
+</dl>
+<a name="processincoming-dist-config">
+<h4><tt class="file">conf/distribution</tt> for <tt class="command">processincoming</tt></h4></a>
+There are no special requirements on the <tt class="file">conf/distribution</tt>
+file by processincoming. So even a simple
+<pre class="file">
+Codename: mystuff
+Architectures: i386 source
+Components: main non-free contrib bad
+</pre>
+will work.
+<br>
+The <tt class="field">Uploaders</tt> field can list a file limiting
+uploads to this distribution to specific keys and
+<tt class="field">AlsoAcceptFor</tt> is used to resolve unknown names
+in <tt class="file">conf/incoming</tt>'s <tt class="field">Allow</tt>
+and <tt class="field">Default</tt> fields.
+<a name="processincoming-calling">
+<h4>Getting <tt class="command">processincoming</tt> called.</h4></a>
+While you can just call <tt class="command">reprepro processincoming</tt> manually,
+having an incoming queue needing manual intervention takes all the fun out of
+having an incoming queue, so usually so automatic way is chosen:
+<ul>
+<li>Dupload and dput have ways to call an hook after an package was uploaded.
+This can be an ssh to the host calling reprepro.
+The disavantage is having to configure this in every
+<tt class="file">.dupload.conf</tt> on every host you want to upload and give
+everyone access to ssh and permissions on the archive who should upload.
+The advantage is you can configure reprepro to have interactive scripts or
+ask for passphrases.
+</li>
+<li>Install a cron-job calling reprepro every 5 minutes. Cron is usually
+available everywhere and getting the output sent by mail to you or a mailing
+list is easy.
+The annoying part is having to wait almost 5 minutes for the processing.
+</li>
+<li>Use something like <a href="http://packages.debian.org/inoticoming"><tt class="external">inoticoming</tt></a>.
+Linux has a syscall called inotify, allowing a program to be run whenever
+something happens to a file.
+One program making use of this is inoticoming. It watches a directory using
+this facility and whenever a <tt class="suffix">.changes</tt> file is completed
+it can call reprepro for you.
+(As this happens directly, make sure you always upload the <tt class="suffix">.changes</tt>
+file last, dupload and dput always ensure this).
+This can be combined with Debian's cron-extension to have a program started at
+boot time with the <tt>@reboot</tt> directive.
+For example with a crontab like:
+<pre class="file">
+MAILTO=myaddress@somewhere.tld
+
+@reboot inoticoming --logfile /my/basedir/logs/i.log /my/basedir/incoming/ --stderr-to-log --stdout-to-log --suffix '.changes' --chdir /my/basedir reprepro -b /my/basedir --waitforlock 100 processincoming local {} \;
+</pre>
+</li>
+</ul>
+<h2><a name="mirroring">Mirroring / Updating</a></h2>
+Reprepro can fetch packages from other repositories.
+For this it uses apt's methods from <tt class="dir">/usr/lib/apt/methods/</tt>
+so everything (http, ftp, ...) that works with apt should also work with reprepro.
+Note that this works on the level of packages, even though you can tell reprepro
+to create a distribution having always the same packages as some remote repository,
+the repository as a whole may not look exactly the same but only have the same set
+of packages in the same versions.
+<br>
+You can also only mirror a specific subset of packages, merge multiple repositories
+into one distribution, or even have distributions mixing remote and local packages.
+<br>
+Each distribution to receive packages from other repositories needs an
+<tt class="field">Update:</tt> field listing the update rules applied to it.
+Those update rules are listed in <tt class="file">conf/updates</tt>.
+There is also the magic <tt>-</tt> update rule, which tells reprepro to delete
+all packages not re-added by later rules.
+<br>
+To make reprepro to update all distributions call <tt>reprepro update</tt>
+without further arguments, or give the distributions to update as additional
+arguments.
+<br>
+Let's start with some examples:
+<h3><a name="update-examples">Updating examples</a></h3>
+Let's assume you have the following <tt class="file">conf/distributions</tt>
+<pre class="file">
+Codename: etch
+Architectures: i386 source
+Components: main contrib
+Update: local - debian security
+
+Codename: mystuff
+Architectures: abacus source
+Components: main bad
+Update: debiantomystuff
+</pre>
+and the following <tt class="file">conf/updates</tt>
+<pre class="file">
+Name: local
+Method: http://ftp.myorg.tld/debian
+
+Name: debian
+Method: http://ftp.de.debian.org/debian
+VerifyRelease: A70DAF536070D3A1
+Config: Acquire::Http::Proxy=http://proxy.yours.org:8080
+
+Name: security
+Suite: */updates
+Method: http://security.eu.debian.org/
+Fallback: http://security.debian.org/
+VerifyRelease: A70DAF536070D3A1
+Config: Acquire::Http::Proxy=http://proxy.yours.org:8080
+
+Name: debiantomystuff
+Suite: sid
+Method: http://ftp.de.debian.org/debian
+Architectures: i386&gt;abacus source
+Components: main non-free&gt;bad contrib&gt;bad
+FilterFormula: Architecture (== all)| !Architecture
+FilterList: deinstall list
+</pre>
+and a file <tt class="file">conf/list</tt> with some
+output as <tt>dpkg --get-selections</tt> is printing.
+<br>
+If you then run
+<tt class="command">reprepro update etch</tt> or
+<tt class="command">reprepro checkupdate etch</tt>,
+reprepro looks at etch's <tt class="field">Update:</tt> line
+and finds four rules. The first is the <tt>local</tt> rule,
+which only has a method, so that means it will download the
+<tt class="file">Release</tt> file from
+<tt>http://ftp.myorg.tld/debian/dists/etch/Release</tt> and
+(unless it already has downloaded them before or that
+repository does not have all of them) downloads the
+<tt>binary-i386/Packages.gz</tt>
+and <tt>source/Sources.gz</tt> files for main and contrib.
+The same is done for the <tt>debian</tt> and <tt>security</tt>
+rules.
+As they have a <tt class="field">VerifyRelease</tt> field,
+Release.gpg is also downloaded and checked to be signed with the
+given key
+(which you should have imported to you <tt class="external">gpg</tt>
+keyring before).
+As security has a <tt class="field">Suite:</tt> field, not the codename,
+but the content of this field (with an possible<tt>*</tt> replaced by the codename),
+is used as distribution to get.
+<br>
+Then it will parse for each part of the distribution, parse the files it
+get from left to right.
+For each package it starts with the version currently in the distribution,
+if there is a newer on in <tt>local</tt> it will mark this.
+Then there is the delete rule <tt>-</tt>, which will mark it to be deleted
+(but remembers what was there, so if later the version in the distribution
+or the version in <tt>local</tt> are newest, it will get them from here avoiding
+slow downloads from far away). Then it will look into <tt>debian</tt> and then
+in <tt>security</tt>, if they have a newer version (or the same version, clearing
+the deletion mark).
+<br>
+If you issued <tt class="command">checkupdate</tt> reprepro will print what it would
+do now, otherwise it tries to download all the needed files and when it got all,
+change the packages in the distribution to the new ones, export the index files
+for this distribution and finally delete old files no longer needed.
+<br>
+TO BE CONTINUED.
+<h2><a name="propagation">Propagation of packages</a></h2>
+You can copy packages between distributions using the
+<tt class="command">pull</tt> and <tt class="command">copy</tt> commands.
+<br>
+With the <tt class="command">copy</tt> command you can copy packages
+by name from one distribution to the other within the same repository.
+<br>
+With the <tt class="command">pull</tt> command you can pull all packages
+(or a subset defined by some list, or exceptions by some list, or by some
+formula, or ...) from one distribution to another within the same formula.
+<br>
+Note that both assume the filenames of the corresponding packages in the
+pool will not differ, so you cannot move packages from one component to another.
+<br>
+Let's just look at a little example, more information can be found in the man page.
+<br>
+Assume you upload all new packages to a distribution and you want another
+so you can keep using an old version until you know the newer works, too.
+One way would be to use something like the following
+<tt class="file">conf/distributions</tt>:
+<pre class="file">
+Codename: development
+Suite: unstable
+Components: main extra
+Architectures: i386 source
+
+Codename: bla
+Suite: testing
+Components: main extra
+Architectures: i386 source
+Pull: from_development
+</pre>
+and <tt class="file">conf/pulls</tt>:
+<pre class="file">
+Name: from_development
+From: development
+</pre>
+i.e. you have two distributions, bla and development.
+Now you can just upload stuff to development (or it's alias unstable).
+And when you want a single package to go to testing, you can use the copy
+command:
+<pre class="shell">
+reprepro copy bla development name1 name2 name3
+</pre>
+If you do not want to copy all packages of a given name, but only some
+of them, you can use <tt>-A</tt>, <tt>-T</tt> and <tt>-C</tt>:
+<pre class="shell">
+reprepro -T deb -A i386 copy bla development name1
+</pre>
+will copy <tt class="suffix">.deb</tt> packages called name1 from the i386
+parts of the distribution.
+<br>
+TO BE CONTINUED
+<h2><a name="snapshosts">Snapshots</a></h2>
+There is a gensnapshot command.<br>
+TO BE DOCUMENTED
+<h2><a name="tracking">Source package tracking</a></h2>
+TO BE DOCUMENTED
+<h2><a name="hooks">Extending reprepro / Hooks and more</a></h2>
+When reprepro misses some functionality,
+it often can be added by some kind of hook.
+<br>
+Currently you can execute your own scripts at the following occasions:
+<ul>
+<li><a href="#addhook">after adding or removing packages</a></li>
+<li><a href="#byhandhook">to process byhand files</a></li>
+<li><a href="#exporthook">when creating index files (Packages.gz, Sources.gz)</a></li>
+<li><a href="#signhook">when signing releases</a></li>
+<li><a href="#outhook">after changing the visible files of the repository managed</a></li>
+<li><a href="#endhook">when reprepro finished</a></li>
+</ul>
+<h3><a name="addhook">Scripts to be run when adding or removing packages</a></h3>
+Whenever a package is added or removed,
+you can tell reprepro to log that to some file and/or call a script using the
+<tt>Log:</tt> directive in <tt class="file">conf/distributions</tt>.
+<br>
+This script can send out mails and do other logging stuff,
+but despite the name, it is not restricted to logging.
+<br>
+<h4>Automatically extracting changelog and copyright information</h4>
+reprepro ships with an example script to extract <tt class="file">debian/changelog</tt>
+and <tt class="file">debian/copyright</tt>
+files from source packages into a hierarchy loosely resembling the way changelogs
+are made available at
+<a href="http://packages.debian.org/changelogs/">http://packages.debian.org/changelogs/</a>.
+<br>
+All you have to do is to copy (or unpack if compressed) the file
+<tt class="file">changelogs.example</tt> from the examples directory
+in the reprepro source or
+<a href="file:///usr/share/doc/reprepro/examples/">/usr/share/doc/reprepro/examples/</a>
+of your installed reprepro package into your <tt class="directory">conf/</tt> directory
+(or somewhere else, then you will need an absolute path later), perhaps
+change some directories specified in it
+and add something like the following lines
+to all distributions in <tt class="file">conf/distributions</tt> that should use
+this feature:
+<pre class="config">
+Log:
+ --type=dsc changelogs.example
+</pre>
+If you still want to log to some file, just keep the filename there:
+<pre class="config">
+Log: mylogfilename
+ --type=dsc changelogs.example
+</pre>
+Then cause those files to be generated for all existing files via
+<pre class="command">
+reprepro rerunnotifiers
+</pre>
+and all future source packages added or removed will get this list automatically
+updated.
+<h4>Writing your own Log: scripts</h4>
+You can list an arbitrary amount of scripts, to be called at specified times
+(which can overlap or even be the same):
+<pre class="config">
+Log: logfilename
+ --type=dsc script-to-run-on-source-package-changes
+ script-to-run-on-package-changes
+ another-script-to-run-on-package-changes
+ --type=dsc --component=main script-to-run-on-main-source-packages
+ --architecture=i386 --type=udeb script-to-run-on-i386-udebs
+ --changes script-to-run-on-include-or-processincoming
+</pre>
+There are two kind of scripts:
+The first one is called when a package was added or removed.
+Using the <tt class="option">--archtecture=</tt>,
+<tt class="option">--component=</tt> and
+<tt class="option">--type=</tt> options you can limit it to specific parts
+of the distribution.
+The second kind is marked with <tt class="option">--changes</tt> and is
+called when a <tt class="suffix">.changes</tt>-file was added with
+<tt class="command">include</tt> or <tt class="command">processincoming</tt>.
+Both are called asynchronous in the background <emph>after</emph> everything was done,
+but before no longer referenced files are deleted (so the files of the
+replaced or deleted package are still around).
+<h5>Calling conventions for package addition/removal scripts</h5>
+This type of script is called with a variable number of arguments.
+The first argument is the action. This is either
+<tt>add</tt>, <tt>remove</tt> or <tt>replace</tt>.
+The next four arguments are the codename of the affected distribution
+and the packagetype, component and architecture in that distribution
+affected.
+The sixth argument is the package's name.
+After that is the version of the added package (<tt>add</tt> and <tt>replace</tt>)
+and the version of the removed package (<tt>remove</tt> and <tt>replace</tt>).
+Finally the filekeys of the new (<tt>add</tt> and <tt>replace</tt>) and/or
+removed (<tt>remove</tt> and <tt>replace</tt>) package are listed
+starting with the marker &quot;<tt>--</tt>&quot; followed by each filekey
+(the name of the file in the <tt class="dir">pool/</tt>
+relative to <tt class="env">REPREPRO_OUT_DIR</tt>)
+as its own argument.
+<br>
+The environment variable <tt class="env">REPREPRO_CAUSING_COMMAND</tt>
+contains the command of the action causing this change.
+The environment variable
+<tt class="env">REPREPRO_CAUSING_FILE</tt> contains the name of the file
+given at the command line causing this package to be changed,
+if there is one.
+(i.e. with <tt class="command">includedeb</tt>,
+<tt class="command">includedsc</tt> and <tt class="command">include</tt>).
+The environment variables
+<tt class="env">REPREPRO_CAUSING_RULE</tt> and
+<tt class="env">REPREPRO_FROM</tt> are
+the name of the update or pull rule pulling in a package
+and the name of the distribution a package is coming from.
+What this name is depends on the command and for most commands
+it is simply not set at all.
+And of course all the <tt class="env">REPREPRO_*_DIR</tt> variables are set.
+<h5>Calling conventions for <tt class="suffix">.changes</tt> scripts</h5>
+This type of script is called with 5 or 6 arguments.
+The first is always &quot;<tt>accepted</tt>&quot;, to make it easier to
+check it is configured the right way.
+The second argument is the codename of the distribution the
+<tt class="suffix">.changes</tt>-file was added to.
+The third argument is the source name, the forth the version.
+The fifth name is the <tt class="suffix">.changes</tt> itself
+(in case of <tt class="command">processingcoming</tt> the secure copy in the
+temporary dir).
+There is a sixth argument if the <tt class="suffix">.changes</tt>-file was
+added to the <tt class="dir">pool/</tt>:
+The filekey of the added .changes file
+(i.e. the filename relative to <tt class="env">REPREPRO_OUT_DIR</tt>).
+<br>
+The environment variable <tt class="env">REPREPRO_CAUSING_COMMAND</tt>
+contains the command of the action causing this change.
+The environment variable
+<tt class="env">REPREPRO_CAUSING_FILE</tt> contains the name of the file
+given at the command line, if there is one
+(e.g. with <tt class="command">include</tt>).
+And of course all the <tt class="env">REPREPRO_*_DIR</tt> variables are set.
+<h3><a name="byhandhook">Scripts to be run to process byhand files</a></h3>
+<tt class="suffix">.changes</tt> files can (beside the usual packages files
+to be included in the repository) contain additional files to be processed
+specially.
+Those are marked by the special section <tt class="constant">byhand</tt> (in Debian)
+or <tt class="constant">raw-</tt>something (in Ubuntu).
+Besides storing them just in the pool besides the packages using the
+<tt class="constant">includebyhand</tt> value in the <tt class="field">Tracking</tt>
+settings you can also let reprepro process a hook to process them when encountering
+them in the <tt class="action">processincomming</tt> action
+(Typical usages are uploading documentation files this way that are unpacked next
+to the repository, or installer images or stuff like that).
+
+To use them add to the distribution's defining stanca in <tt class="filename">conf/distributions</tt> a field like:
+<pre class="config">
+ByhandHooks:
+ byhand * manifesto.txt handle-byhand.sh
+</pre>
+This will call the hook script <tt class="constant">handle-byhand.sh</tt> for every byhand file with section <tt class="constant">byhand</tt>, any priority and filename <tt class="constant">manifesto.txt</tt>. (The first three fields allow glob characters for matching).
+
+The script will then be called with 5 arguments:
+the codename of the distribution,
+the section,
+the priority,
+the filename as found in the changes file and
+the filename of where the script can find the actual file.
+
+<h3>Scripts to be run when creating index files (Packages.gz, Sources.gz)</h3>
+this hook is described in the section <a href="#exporthook">&quot;Additional index files&quot;</a>.
+
+<h3><a name="signhook">Scripts to be run when signing releases</a></h3>
+Instead of creating <tt class="filename">InRelease</tt> and
+<tt class="filename">Release.gpg</tt> files using libgpgme,
+the <tt class="option">SignWith</tt> option can also contain
+a exclamation mark followed by a space and the name of a hook script to call.
+
+The script gets three arguments:
+The filename to sign,
+the filename of the InRelease file to create and
+the filename of the Release.gpg to create
+(a Release.gpg does not need to be created. reprepro will assume you do not care about that legacy file if it is not created).
+
+Reprepro will wait for the script to continue and only do the renaming
+and deleting of old files after that, so the script might wait for example
+for someone to copy files from the system, signing and copying them it,
+for example.
+
+<h3><a name="outhook">Scripts to be run after changing the visible files of the repository managed</a></h3>
+When using the <tt class="option">--outhook</tt> command line option (or the corresponding
+<tt class="constant">outhook</tt> in the <tt class="filename">options</tt> file),
+reprepro will create a <tt class="suffix">.outlog</tt> file in the log directory describing
+any changes done to the out dir and calls the hook script given as argument with this
+file as argument.
+
+The <tt class="suffix">.outlog</tt> file consists of lines each starting with a keyword
+and then some arguments separated by tab characters.
+
+The possible keywords are:
+<ul>
+<li><tt class="constant">POOLNEW</tt>:
+One argument is the filekey of a file newly added to the pool.
+<li><tt class="constant">POOLDELETE</tt>:
+One argument is the filekey of a file removed from the pool.
+<li><tt class="constant">START-DISTRIBUTION</tt>, <tt class="constant">END-DISTRIBUTION</tt>:
+two or three arguments: the codename, the directory,
+and the suite (if set).
+<li><tt class="constant">START-SNAPSHOT</tt>, <tt class="constant">END-SNAPSHOT</tt>:
+three arguments: the codename, the directory, and the name of the snapshot generated.
+<li><tt class="constant">DISTFILE</tt>:
+three arguments: the directory of the distribution (relative to out dir), the name relative to that directory, and the filename generated by reprepro.
+<li><tt class="constant">DISTSYMLINK</tt>:
+three arguments: the directory of the distribution (relative to out dir), the name relative to that directory, and the symlink target (relative to that directory).
+<li><tt class="constant">DISTDELETE</tt>:
+two arguments: the directory of the distribution (relative to out dir), the name relative to that directory of a file no longer there.
+<li><tt class="constant">DISTKEEP</tt> (not yet generated):
+two arguments: the directory of the distribution (relative to out dir), the name relative to that directory.
+</ul>
+
+All <tt class="constant">POOLNEW</tt> come before any distribution changes referencing them
+and all <tt class="constant">POOLDELETE</tt> will be afterwards.
+Each line belonging to a distribution is guaranteed to be between the corresponding
+<tt class="constant">START-DISTRIBUTION</tt> and
+<tt class="constant">END-DISTRIBUTION</tt> or between a
+<tt class="constant">START-SNAPSHOT</tt> and
+<tt class="constant">END-SNAPSHOT</tt> or between a
+with the same directory
+(i.e. there is some redundancy so you can choose to parse the information where it is more convenient for you).
+
+The lines starting with <tt class="constant">DIST</tt> describe new or modified files in the distribution description exported by reprepro. No hint is given if that file was previously non-existent, a proper file or a symlink (i.e. if you copy stuff, do not make any assumptions about that).
+Future versions of reprepro might create <tt class="constant">DISTKEEP</tt> lines to denote files that have not changed (i.e. just ignore those lines to be future-proof).
+
+The directories for the distribution entries are what apt expects them (i.e. always starting with <tt class="constant">dists/</tt>, while the third argument to <tt class="constant">DISTFILE</tt> is the name reprepro generated (i.e. starts with the distdir value, which can be configured to not end with <tt class="constant">dists/</tt>).
+
+<h3><a name="endhook">when reprepro finished</a></h3>
+With the <tt class="option">--endhook</tt> command line option (or the corresponding
+<tt class="constant">endhook</tt> in the <tt class="filename">options</tt> file) you
+can specify a hook to be executed after reprepro finished but before reprepro returns
+the to calling process.
+The hook gets all the command line arguments after the options (i.e. starting with
+the name of the action) and the exit code reprepro would have produces.
+For an example see the man page.
+<h2><a name="maintenance">Maintenance</a></h2>
+This section lists some commands you can use to check and improve the health
+of you repository.
+<br>
+Normally nothing of this should be needed, but taking a look from time to time
+cannot harm.
+<pre class="command">
+reprepro -b $YOURBASEDIR dumpunreferenced
+</pre>
+This lists all files reprepro knows about that are not marked as needed by anything.
+Unless you called reprepro with the <tt class="option">--keepunreferenced</tt>
+option, those should never occur. Though if reprepro is confused or interrupted it
+may sometimes prefer keeping files around instead of deleting them.
+<pre class="command">
+reprepro -b $YOURBASEDIR deleteunreferenced
+</pre>
+This is like the command before, only that such files are directly forgotten and
+deleted.
+<pre class="command">
+reprepro -b $YOURBASEDIR check
+</pre>
+Look if all needed files are in fact marked needed and known.
+<pre class="command">
+reprepro -b $YOURBASEDIR checkpool
+</pre>
+Make sure all known files are still there and still have the same checksum.
+<pre class="command">
+reprepro -b $YOURBASEDIR checkpool fast
+</pre>
+As the command above, but do not compute checksums.
+<pre class="command">
+reprepro -b $YOURBASEDIR tidytracks
+</pre>
+If you use source package tracking, check for files kept because of this
+that should no longer by the current rules.
+<br>
+If you fear your tracking data could have became outdated,
+you can also try the retrack command:
+<pre class="command">
+reprepro -b $YOURBASEDIR retrack
+</pre>
+That refreshes the tracking information about packages used and then
+runs a tidytracks. (Beware: don't do this with reprepro versions before 3.0.0).
+<h2><a name="internals">Internals</a></h2>
+reprepro stores the data it collects in Berkeley DB file (<tt class="suffix">.db</tt>)
+in a directory called <tt class="dir">db/</tt> or whatever you specified via command
+line. With a few exceptions, those files are NO CACHES, but the actual data.
+While some of those data can be regained when you lose those files, they are better
+not deleted.
+<h3>packages.db</h3>
+This file contains the actual package information.
+<br>
+It contains a database for every (codename,component,architecture,packagetype) quadruple
+available.
+<br>
+Each is indexed by package name and essentially contains the information written do
+the Packages and Sources files.
+<br>
+Note that if you change your <tt class="file">conf/distributions</tt> to no longer
+list some codenames, architectures or components,
+that will not remove the associated databases in this file.
+That needs an explicit call to <tt class="command">clearvanished</tt>.
+<h3>references.db</h3>
+This file contains a single database that lists for every file why this file
+is still needed.
+This is either an identifier for a package database, an tracked source package,
+or a snapshot.
+<br>
+Some low level commands to access this are (take a look at the manpage for how to use them):
+<dl class="commands">
+<dt class="command">rereference</dt><dd>recreate references (i.e. forget old and create newly)</dd>
+<dt class="command">dumpreferences</dt><dd>print a list of all references</dd>
+<dt class="command">_removereferences</dt><dd>remove everything referenced by a given identifier</dd>
+<dt class="command">_addreference</dt><dd>manually add a reference</dd>
+<dt class="command">_addreferences</dt><dd>manually add multiple references</dd>
+</dl>
+<h3>files.db / checksums.db</h3>
+These files contains what reprepro knows about your <tt class="dir">pool/</tt> directory,
+i.e. what files it things are there with what sizes and checksums.
+The file <tt class="filename">files.db</tt> is used by reprepro before version 3.3
+and kept for backwards compatibility.
+If your repository was only used with newer versions you can safely delete it.
+Otherwise you should run <tt class="command">collectnewchecksums</tt> before deleting
+it.
+The file <tt class="filename">checksums.db</tt> is the new file used since
+version 3.3.
+It can store more checksums types (<tt class="filename">files.db</tt> only contained
+md5sums, <tt class="filename">checksums.db</tt> can store arbitrary checksums and
+reprepro can even cope with it containing checksum types it does not yet know of)
+but for compatibility with pre-3.3 versions is not the canonical source of information
+as long as a <tt class="filename">files.db</tt> file exists).
+<br>
+If you manually put files in the pool or remove them, you should tell reprepro about that.
+(it sometimes looks for files there without being told, but it never forgets files
+except when it would have deleted them anyway).
+Some low level commands (take a look at the man page for how to use them):
+<dl class="commands">
+<dt class="command">collectnewchecksums</dt><dd>Make sure every file is listed in <tt class="filename">checksums.db</tt> and with all checksum types your reprepro supports.</dd>
+<dt class="command">checkpool fast</dt><dd>Make sure all files are still there.</dd>
+<dt class="command">checkpool</dt><dd>Make sure all files are still there and correct.</dd>
+<dt class="command">dumpunreferenced</dt><dd>Show all known files without reference.</dd>
+<dt class="command">deleteunreferenced</dt><dd>Delete all known files without reference.</dd>
+<dt class="command">_listmd5sums</dt><dd>Dump this database (old style)</dd>
+<dt class="command">_listchecksums</dt><dd>Dump this database (new style)</dd>
+<dt class="command">_detect</dt><dd>Add files to the database</dd>
+<dt class="command">_forget</dt><dd>Forget that some file is there</dd>
+<dt class="command">_addmd5sums</dt><dd>Create the database from dumped data</dd>
+<dt class="command">_addchecksums</dt><dd>dito</dd>
+<h3>release.cache.db</h3>
+In this file reprepro remembers what it already wrote to the <tt class="dir">dists</tt>
+directory,
+so that it can write their checksums (including the checksums of the uncompressed variant,
+even if that was never written to disk)
+in a newly to create <tt class="file">Release</tt>
+file without having to trust those files or having to unpack them.
+<h3>contents.cache.db</h3>
+This file contains all the lists of files of binary package files where reprepro
+already needed them. (which can only happen if you requested Contents files to be
+generated).
+<h3>tracking.db</h3>
+This file contains the information of the <a href="#tracking">source package tracking</a>.
+<h2><a name="recovery">Disaster recovery</a></h2>
+TO BE DOCUMENTED (see the
+<a href="http://git.debian.org/?p=mirrorer/reprepro.git;a=blob_plain;f=docs/recovery;hb=HEAD">recovery</a>
+file until then)
+<h2><a name="paranoia">Paranoia</a></h2>
+As all software, reprepro might have bugs.
+And it uses libraries not written by myself,
+which I'm thus even more sure that they will have bugs.
+Some of those bugs might be security relevant.
+This section contains some tips, to reduce the impact of those.
+<ul>
+<li>Never run reprepro as root.<br>
+All reprepro needs to work are permissions to files,
+there is no excuse for running it as root.
+</li>
+<li>Don't publish your db/ directory.<br>
+The contents of the db directory are not needed by everyone else.
+Having them available to everyone may make it easier for them to
+exploit some hypothetical problem in libdb and makes it easier to
+know in advance how exactly reprepro will act in a given circumstances,
+thus easier to exploit some hypothetical problem.
+</li>
+<li>Don't accept untrusted data without need.<br>
+If an attacker cannot do anything, they cannot do anything harmful, either.
+So if there is no need, don't offer an anonymous incoming queue.
+<tt class="program">dput</tt> supports uploading via scp, so just having
+an only group-writable incoming directory, or even better multiple incoming
+directories can be a better alternative.
+</li>
+</ul>
+External stuff being used and attack vectors opened by it:
+<dl>
+<dt>libgpgme/gpg</dt><dd>
+Almost anything is run through <tt>libgpgme</tt> and thus <tt>gpg</tt>.
+It will be used to check the <tt class="filename">Release.gpg</tt> file,
+or to read <tt class="suffix">.dsc</tt> and <tt class="suffix">.changes</tt>
+files (even when there is no key to look for specified,
+as that is the best way to get the data from the signed block).
+Avoiding this by just accepting stuff without looking for signatures on
+untrusted data is not really an option, so I know nothing to prevent this
+type of problems.
+</dd>
+<dt>libarchive</dt><dd>
+The <tt class="suffix">.tar</tt> files within <tt class="suffix">.deb</tt>
+files are normally (unless that library was
+not available while compiling) read using libarchive.
+This happens when a <tt class="suffix">.deb</tt> file is to be added
+(though only after deciding if it should be added, so if it does not have
+the correct checksum or the .changes did not have the signatures you specified,
+it is not) or when the file list is to be extracted
+(when creating <tt class="filename">Contents</tt> files).
+Note that they are not processed when only mirroring them (of course unless
+<tt class="filename">Contents</tt> files are generated), as then only the
+information from the Packages file is copied.
+</dd>
+<dt>dpkg-deb/tar</dt><dd>
+If reprepro was compiled without libarchive,
+<tt class="program">dpkg-deb</tt> is used instead, which most likely will
+call <tt class="program">tar</tt>. Otherwise just the same like the last
+item.
+</dd>
+<dt>zlib</dt><dd>
+When mirroring packages, the downloaded
+<tt class="filename">Packages.gz</tt> and <tt class="filename">Sources.gz</tt> files
+are read using zlib. Also the generated <tt class="suffix">.gz</tt> files
+are generated using it. There is no option but hoping there is no security
+relevant problem in that library.
+</dd>
+<dt>libbz2</dt><dd>
+Only used to generate <tt class="suffix">.bz2</tt> files.
+If you fear simple blockwise writing using that library has a security problem
+that can be exploited by data enough harmless looking to be written to the
+generated index files, you can always decide to no tell reprepro to generate
+<tt class="suffix">.bz2</tt> files.
+</dd>
+</dl>
+<h2><a name="counterindications">What reprepro cannot do</a></h2>
+There are some things reprepro does not do:
+<dl><dt>Verbatim mirroring</dt><dd>
+Reprepro aims to put all files into a coherent <tt>pool/</tt> hierarchy.
+Thus it cannot guarantee that files will have the same relatives path as in the
+original repository (especially if those have no pool).
+It also creates the index files from its own indices.
+While this leads to a tidy repository and possible savings of disk-space, the
+signatures of the repositories you mirror cannot be used to authenticate the mirror,
+but you will have to sign (or tell reprepro to sign for you) the result.
+While this is perfect when you only mirror some parts or specific packages or
+also have local packages that need local signing anyway, reprepro is no suitable tool
+for creating a full mirror that can be authenticated without adding the key of this
+repository.
+</dd>
+<dt>Placing your files on your own</dt><dd>
+Reprepro does all the calculation of filenames to save files as,
+bookkeeping what files are there and what are needed and so on.
+This cannot be switched off or disabled.
+You can place files where reprepro will expect them and reprepro will use
+them if their md5sum matches.
+But reprepro is not suited if you want those files outside of a pool or in
+places reprepro does not consider their canonical ones.
+</dd>
+<dt>Having different files with the same name</dt><dd>
+take a look in the <a href="http://git.debian.org/?p=mirrorer/reprepro.git;a=blob_plain;f=docs/FAQ;hb=HEAD">FAQ</a> (currently question 1.2) why and how to avoid the problem.
+
+</dd>
+</dl>
+</body>
+</html>
diff --git a/docs/outsftphook.py b/docs/outsftphook.py
new file mode 100755
index 0000000..91c1c9c
--- /dev/null
+++ b/docs/outsftphook.py
@@ -0,0 +1,589 @@
+#!/usr/bin/python3
+# Copyright (C) 2013 Bernhard R. Link
+#
+# This example script is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+
+# Those can be set here or in conf/outsftphook.conf:
+servername = None
+username = None
+targetdir = ""
+
+import sys, os, subprocess, select, sftp
+
+class Round(sftp.Enum,
+ DONE = -2,
+ INDIRECT = -1,
+ POOLFILES = 0,
+ DISTFILES = 1,
+ DELETES = 2,
+):
+ pass
+
+errors = 0
+def printe(s):
+ global errors
+ print(s, file=sys.stderr)
+ errors += 1
+
+# renaming file, assuming all directories exist...
+def renamefile(dst, src, donefunc):
+ a = yield [sftp.REMOVE(targetdir + dst), sftp.RENAME(targetdir + src, targetdir + dst, [sftp.SSH_FXF_RENAME.OVERWRITE])]
+ while True:
+ l = []
+ if not isinstance(a, sftp.STATUS):
+ raise SftpUnexpectedAnswer(a, "expecting STATUS")
+ if isinstance(a.forr, sftp.REMOVE):
+ if a.status != sftp.SSH_FX.OK and a.status != sftp.SSH_FX.NO_SUCH_FILE:
+ printe("%s failed: %s" % (a.forr, a))
+ elif isinstance(a.forr, sftp.RENAME):
+ if a.status != sftp.SSH_FX.OK:
+ printe("%s failed: %s" % (a.forr, a))
+ else:
+ l = donefunc(dst)
+ else:
+ raise SftpUnexpectedAnswer(a, a.forr)
+ a.forr.done()
+ a = yield l
+
+# create symlink, assuming all directories exist...
+def symlinkfile(dst, src, donefunc):
+ a = yield [sftp.REMOVE(targetdir + dst), sftp.SYMLINK(targetdir + dst, targetdir + src)]
+ while True:
+ l = []
+ if not isinstance(a, sftp.STATUS):
+ raise SftpUnexpectedAnswer(a, "expecting STATUS")
+ if isinstance(a.forr, sftp.REMOVE):
+ if a.status != sftp.SSH_FX.OK and a.status != sftp.SSH_FX.NO_SUCH_FILE:
+ printe("%s failed: %s" % (a.forr, a))
+ elif isinstance(a.forr, sftp.SYMLINK):
+ if a.status != sftp.SSH_FX.OK:
+ printe("%s failed: %s" % (a.forr, a))
+ else:
+ l = donefunc(dst, message="symlink done")
+ else:
+ raise SftpUnexpectedAnswer(a, a.forr)
+ a.forr.done()
+ a = yield l
+
+def deletefile(dst, donefunc):
+ a = yield [sftp.REMOVE(targetdir + dst)]
+ if not isinstance(a, sftp.STATUS):
+ raise SftpUnexpectedAnswer(a, "expecting STATUS")
+ if a.status == sftp.SSH_FX.OK:
+ l = donefunc(dst, message="deleted")
+ elif a.status == sftp.SSH_FX.NO_SUCH_FILE:
+ l = donefunc(dst, message="already deleted")
+ else:
+ printe("%s failed: %s" % (a.forr, a))
+ l = []
+ a.forr.done()
+ a = yield l
+ raise SftpUnexpectedAnswer(a, a.forr)
+
+def writefile(fname, filetocopy, donefunc):
+ filename = targetdir + fname
+ fd = open(filetocopy, 'rb')
+ dirname = os.path.dirname(filename)
+ if dirname:
+ mode = yield [('waitingfor', sftp.Dirlock, dirname)]
+ else:
+ mode = "top-level"
+ a = yield [('lock', sftp.Semaphore, 'openfile')]
+ if a != "unlock":
+ raise SftpUnexpectedAnswer(a, "waiting for unlock event")
+ a = yield [sftp.OPEN(filename, "CREAT|WRITE")]
+ if mode == "tryandtell" and isinstance(a, sftp.STATUS) and a.status == a.status.NO_SUCH_FILE:
+ a.forr.done()
+ a = yield [('missing', sftp.Dirlock, dirname),
+ ('release', sftp.Semaphore, 'openfile')]
+ if a != "createnew":
+ raise SftpUnexpectedAnswer(a, "waiting for %s" % dirname)
+ mode = a
+ a = yield [('lock', sftp.Semaphore, 'openfile')]
+ if a != "unlock":
+ raise SftpUnexpectedAnswer(a, "waiting for unlock event")
+ a = yield [sftp.OPEN(filename, "CREAT|WRITE")]
+ if not isinstance(a, sftp.HANDLE):
+ a.forr.done()
+ printe("Failed to create %s: %s" % (filename, a))
+ return
+ # raise SftpException("Failed to create %s: %s" % (filename, a))
+ h = a.handle
+ a.forr.done()
+ if mode == "tryandtell":
+ f = [('found', sftp.Dirlock, dirname), 'wantwrite']
+ else:
+ f = ['wantwrite']
+ a = yield f
+ if a != 'canwrite':
+ raise SftpUnexpectedAnswer(a, "waiting for 'canwrite'")
+ ofs = 0
+ while True:
+ b = fd.read(16376)
+ if len(b) == 0:
+ break
+ a = yield [sftp.WRITE(h, ofs, b), 'wantwrite']
+ ofs += len(b)
+ b = None
+ while a != 'canwrite':
+ a.forr.done()
+ fd.close()
+ a = yield [sftp.CLOSE(h), ('release', sftp.Semaphore, 'openfile')]
+ while True:
+ if type(a.forr) == sftp.CLOSE:
+ if a.status != sftp.SSH_FX.OK:
+ printe("%s failed: %s" % (a.forr, a))
+ l = donefunc(fname)
+ else:
+ if a.status != sftp.SSH_FX.OK:
+ printe("%s failed: %s" % (a.forr, a))
+ l = []
+ a.forr.done()
+ a = yield l
+
+class CriticalError(Exception):
+ pass
+class ParseError(CriticalError):
+ pass
+class ParseErrorWrongCount(ParseError):
+ def __init__(field):
+ super().__init__("Wrong number of arguments for %s" % field)
+
+class CollectedDistDir:
+ def __init__(self, dir):
+ self.done = False
+ self.failed = False
+ self.dir = dir
+ self.files = dict()
+ self.deletes = dict()
+ self.symlinks = dict()
+ self.transfered = 0
+ def onedone(self, filename):
+ assert(filename.endswith(".new"))
+ filename = filename[:-4]
+ assert (filename in self.files)
+ self.transfered += 1
+ self.files[filename].markpartial(filename, "asdotnew")
+ return self.finalizeifready()
+ def finalizeifready(self):
+ assert (not self.done)
+ if len(self.files) != self.transfered:
+ assert (len(self.files) > self.transfered)
+ return []
+ # everything copied as .new as needed, let's start finalisation
+ self.done = True
+ l = []
+ for m,e in self.files.items():
+ l.append(sftp.TaskFromGenerator(renamefile(m, m + ".new", e.doneone)))
+ for m,e in self.deletes.items():
+ l.append(sftp.TaskFromGenerator(deletefile(m, e.doneone)))
+ for m,(t,e) in self.symlinks.items():
+ l.append(sftp.TaskFromGenerator(symlinkfile(m, t, e.doneone)))
+ return l
+
+class DistDir:
+ def __init__(self, dir, onelog=True):
+ self.dir = dir
+ self.files = []
+ self.deletes = []
+ self.symlinks = []
+ def queue(self, todo, distdirs, logfile):
+ if not self.dir in distdirs:
+ collection = CollectedDistDir(self.dir)
+ distdirs[self.dir] = collection
+ else:
+ collection = distdirs[self.dir]
+ for fn, fr in self.files:
+ ffn = self.dir + "/" + fn
+ if logfile.alreadydone.get(ffn, "") == "asdotnew":
+ if logfile.enqueue(todo, ffn, Round.INDIRECT):
+ collection.files[ffn] = logfile
+ collection.transfered += 1
+ else:
+ if logfile.enqueue(todo, ffn,
+ Round.DISTFILES, ffn + ".new",
+ fr, collection.onedone):
+ collection.files[ffn] = logfile
+ for fn in self.deletes:
+ ffn = self.dir + "/" + fn
+ if logfile.enqueue(todo, ffn, Round.INDIRECT):
+ collection.deletes[ffn] = logfile
+ for fn, flt in self.symlinks:
+ ffn = self.dir + "/" + fn
+ if logfile.enqueue(todo, ffn, Round.INDIRECT):
+ collection.symlinks[ffn] = (flt, logfile)
+
+class LogFile:
+ def parselogline(self, fields):
+ if fields[0] == 'POOLNEW':
+ if len(fields) != 2:
+ raise ParseErrorWrongCount(fields[0])
+ self.newpoolfiles.append(fields[1])
+ elif fields[0] == 'POOLDELETE':
+ if len(fields) != 2:
+ raise ParseErrorWrongCount(fields[0])
+ self.deletepoolfiles.append(fields[1])
+ elif fields[0].startswith('BEGIN-'):
+ pass
+ elif fields[0].startswith('END-'):
+ pass
+ elif fields[0].startswith('DIST'):
+ command = fields[0][4:]
+ if command not in ['KEEP', 'FILE', 'DELETE', 'SYMLINK']:
+ raise ParseError("Unknown command %s" % command)
+ if not fields[1] in self.dists:
+ d = self.dists[fields[1]] = DistDir(fields[1])
+ else:
+ d = self.dists[fields[1]]
+ if command == 'FILE':
+ if len(fields) != 4:
+ raise ParseErrorWrongCount(fields[0])
+ d.files.append((fields[2], fields[3]))
+ elif command == 'DELETE':
+ if len(fields) != 3:
+ raise ParseErrorWrongCount(fields[0])
+ d.deletes.append(fields[2])
+ elif command == 'SYMLINK':
+ if len(fields) != 4:
+ raise ParseErrorWrongCount(fields[0])
+ d.symlinks.append((fields[2], fields[3]))
+ elif fields[0] == "DONE":
+ self.alreadydone[fields[2]] = fields[1]
+ else:
+ raise ParseError("Unknown command %s" % fields[0])
+ def __init__(self, logfile, donefile):
+ self.alreadydone = dict()
+ self.logfile = logfile
+ self.donefile = donefile
+ try:
+ lf = open(logfile, 'r', encoding='utf-8')
+ except Exception as e:
+ raise CriticalError("Cannot open %s: %s" % (repr(logfile), e))
+ self.newpoolfiles = []
+ self.dists = {}
+ self.deletepoolfiles = []
+ self.todocount = 0
+ for l in lf:
+ if l[-1] != '\n':
+ raise ParseError("not a text file")
+ self.parselogline(l[:-1].split('\t'))
+ lf.close()
+ def queue(self, todo, distdirs):
+ self.todo = set()
+ for f in self.deletepoolfiles:
+ self.enqueue(todo, f, Round.DELETES, f, None, self.doneone)
+ for f in self.newpoolfiles:
+ self.enqueue(todo, f, Round.POOLFILES, f, options.outdir + "/" + f, self.doneone)
+ for d in self.dists.values():
+ d.queue(todo, distdirs, self)
+ if not self.todocount:
+ # nothing to do left, mark as done:
+ os.rename(self.logfile, self.donefile)
+ del self.todo
+ return self.todocount > 0
+ def enqueue(self, dic, elem, *something):
+ if elem in self.alreadydone and self.alreadydone[elem] != "asdotnew":
+ if not elem in dic:
+ dic[elem] = (Round.DONE,)
+ return False
+ elif not elem in dic:
+ self.todo.add(elem)
+ self.todocount += 1
+ dic[elem] = something
+ return True
+ else:
+ self.markpartial(elem, "obsoleted")
+ return False
+ def markpartial(self, filename, message="done"):
+ if options.verbose:
+ print("%s: %s" % (message, repr(filename)))
+ f = open(self.logfile, "a", encoding="utf-8")
+ print("DONE\t%s\t%s" % (message, filename), file=f)
+ f.close()
+ def doneone(self, filename, message="done"):
+ assert (filename in self.todo)
+ self.todo.discard(filename)
+ assert (self.todocount > 0)
+ self.todocount -= 1
+ self.markpartial(filename, message=message)
+ if self.todocount == 0:
+ os.rename(self.logfile, self.donefile)
+ return []
+
+
+def doround(s, r, todo):
+ for p,v in todo.items():
+ assert (isinstance(v[0], Round))
+ if v[0] != r:
+ continue
+ round, filename, source, donefunc = v
+ if round != r:
+ continue
+ if source is None:
+ s.start(sftp.TaskFromGenerator(deletefile(filename, donefunc)))
+ else:
+ s.start(sftp.TaskFromGenerator(writefile(filename, source, donefunc)))
+ s.dispatch()
+
+
+class Options:
+ def __init__(self):
+ self.verbose = None
+ self.pending = False
+ self.autoretry = None
+ self.ignorepending = False
+ self.forceorder = False
+ self.confdir = None
+ self.basedir = None
+ self.outdir = None
+ self.logdir = None
+ self.debugsftp = None
+
+options = Options()
+
+def parseoptions(args):
+ while args and args[0].startswith("--"):
+ arg = args.pop(0)
+ if arg == "--verbose" or arg == "-v":
+ options.verbose = True
+ elif arg.startswith("--debug-sftp="):
+ options.debugsftp = int(arg[13:])
+ elif arg == "--pending":
+ options.pending = True
+ elif arg == "--ignore-pending":
+ options.ignorepending = True
+ elif arg == "--force-order":
+ options.forceorder = True
+ elif arg == "--basedir=":
+ options.basedir = arg[:10]
+ elif arg == "--basedir":
+ options.basedir = args.pop(0)
+ elif arg == "--outdir=":
+ options.outdir = arg[:9]
+ elif arg == "--outdir":
+ options.outdir = args.pop(0)
+ elif arg == "--logdir=":
+ options.logdir = arg[:9]
+ elif arg == "--logdir":
+ options.logdir = args.pop(0)
+ elif arg == "--help":
+ print("""outsftphook.py: an reprepro outhook example using sftp
+This hook sends changed files over sftp to a remote host. It is usually put into
+conf/options as outhook, but may also be called manually.
+Options:
+ --verbose tell what you did
+ --basedir DIR sets the following to default values
+ --outdir DIR directory to find pool/ and dist/ directories in
+ --logdir DIR directory to check for unprocessed outlog files
+ --pending process pending files instead of arguments
+ --autoretry reprocess older pending files, too
+ --ignore-pending ignore pending files
+ --force-order do not bail out if the given files are not ordered
+ --debug-sftp=N debug sftp.py (or your remote sftp server)
+""")
+ raise SystemExit(0)
+ else:
+ raise CriticalError("Unexpected command line option %s" %repr(arg))
+ if options.pending and options.ignorepending:
+ raise CriticalError("Cannot do both --pending and --ignore-pending")
+ if options.autoretry and options.forceorder:
+ raise CriticalError("Cannot do both --pending and --force-order")
+ if options.autoretry and options.ignorepending:
+ raise CriticalError("Cannot do both --autoretry and --ignore-pending")
+ # we need confdir, logdir and outdir, if they are given, all is done
+ if options.logdir is not None and options.outdir is not None and options.confdir is not None:
+ return
+ # otherwise it gets more complicated...
+ preconfdir = options.confdir
+ if preconfdir is None:
+ preconfdir = os.environ.get("REPREPRO_CONFIG_DIR", None)
+ if preconfdir is None:
+ if options.basedir is not None:
+ preconfdir = options.basedir + "/conf"
+ elif "REPREPRO_BASE_DIR" in os.environ:
+ preconfdir = os.environ["REPREPRO_BASE_DIR"] + "/conf"
+ else:
+ raise CriticalError("If not called by reprepro, please either give (--logdir and --outdir) or --basedir!")
+ optionsfile = preconfdir + "/options"
+ if os.path.exists(optionsfile):
+ f = open(optionsfile, "r")
+ for line in f:
+ line = line.strip()
+ if len(line) == 0 or line[0] == '#' or line[0] == ';':
+ continue
+ line = line.split()
+ if line[0] == "basedir" and options.basedir is None:
+ options.basedir = line[1]
+ elif line[0] == "confdir" and options.confdir is None:
+ options.confdir = line[1]
+ elif line[0] == "logdir" and options.logdir is None:
+ options.logdir = line[1]
+ elif line[0] == "outdir" and options.outdir is None:
+ options.outdir = line[1]
+ f.close()
+ if options.basedir is None:
+ options.basedir = os.environ.get("REPREPRO_BASE_DIR", None)
+ if options.outdir is None:
+ if options.basedir is None:
+ raise CriticalError("Need --basedir if not called by reprepro")
+ options.outdir = options.basedir
+ if options.logdir is None:
+ if options.basedir is None:
+ raise CriticalError("Need --basedir if not called by reprepro")
+ options.logdir = options.basedir + "/logs"
+ if options.confdir is None:
+ if "REPREPRO_CONFIG_DIR" in os.environ:
+ options.confdir = os.environ["REPREPRO_CONFIG_DIR"]
+ else:
+ if options.basedir is None:
+ raise CriticalError("Need --basedir if not called by reprepro")
+ options.confdir = options.basedir + "/conf"
+
+def main(args):
+ global errors, servername, username, targetdir
+ if "REPREPRO_OUT_DIR" in os.environ or "REPREPRO_LOG_DIR" in os.environ:
+ # assume being called by reprepro if one of those variable
+ # is set, so they all should be set:
+ options.outdir = os.environ["REPREPRO_OUT_DIR"]
+ options.logdir = os.environ["REPREPRO_LOG_DIR"]
+ options.confdir = os.environ["REPREPRO_CONFIG_DIR"]
+ else:
+ parseoptions(args)
+ assert (options.outdir and (options.ignorepending or options.logdir) and options.confdir)
+ conffilename = options.confdir + "/outsftphook.conf"
+ if os.path.exists(conffilename):
+ conffile = open(conffilename, "r")
+ for line in conffile:
+ line = line.strip().split(None, 1)
+ if len(line) == 0 or line[0].startswith("#"):
+ continue
+ if line[0] == "servername":
+ servername = line[1]
+ elif line[0] == "username":
+ username = line[1]
+ elif line[0] == "targetdir":
+ targetdir = line[1]
+ elif line[0] == "debug":
+ if options.debugsftp is None:
+ try:
+ options.debugsftp = int(line[1])
+ except Exception:
+ raise CriticalError(("Cannot parse %s: " +
+ "unparseable number %s") %
+ (repr(conffilename), repr(line[1])))
+ elif line[0] == "verbose":
+ if line[1].lower() in {'yes', 'on', '1', 'true'}:
+ if options.verbose is None:
+ options.verbose = True
+ elif line[1].lower() in {'no', 'off', '0', 'false'}:
+ if options.verbose is None:
+ options.verbose = False
+ else:
+ raise CriticalError(("Cannot parse %s: " +
+ "unparseable truth value %s") %
+ (repr(conffilename), repr(line[1])))
+ elif line[0] == "autoretry":
+ if line[1].lower() in {'yes', 'on', '1', 'true'}:
+ if options.autoretry is None:
+ options.autoretry = True
+ elif line[1].lower() in {'no', 'off', '0', 'false'}:
+ if options.autoretry is None:
+ options.autoretry = False
+ else:
+ raise CriticalError(("Cannot parse %s: " +
+ "unparseable truth value %s") %
+ (repr(conffilename), repr(line[1])))
+ else:
+ raise CriticalError("Cannot parse %s: unknown option %s" %
+ (repr(conffilename), repr(line[0])))
+ conffile.close()
+ if options.debugsftp is None:
+ options.debugsftp = 0
+ if targetdir and not targetdir.endswith("/"):
+ targetdir = targetdir + "/"
+ if not servername:
+ raise CriticalError("No servername configured!")
+ if not username:
+ raise CriticalError("No username configured!")
+
+ if len(args) <= 0:
+ if not options.pending:
+ raise CriticalError("No .outlog files given at command line!")
+ else:
+ if options.pending:
+ raise CriticalError("--pending might not be combined with arguments!")
+ if options.ignorepending:
+ pendinglogs = set()
+ else:
+ pendinglogs = set(name for name in os.listdir(options.logdir)
+ if name.endswith(".outlog"))
+ maxbasename = None
+ for f in args:
+ if len(f) < 8 or f[-7:] != ".outlog":
+ raise CriticalError("command line argument '%s' does not look like a .outlog file!" % f)
+ bn = os.path.basename(f)
+ pendinglogs.discard(bn)
+ if maxbasename:
+ if maxbasename < bn:
+ maxbasename = bn
+ elif not options.forceorder:
+ raise CriticalError("The arguments are not in order (%s <= %s). Applying in this order might not be safe. (use --force-order to proceed in this order anyway)" % (bn, maxbasename))
+ else:
+ maxbasename = bn
+ if options.pending:
+ pendinglogs = sorted(pendinglogs)
+ else:
+ pendinglogs = sorted(filter(lambda bn: bn < maxbasename, pendinglogs))
+ if pendinglogs and not options.autoretry:
+ raise CriticalError("Unprocessed earlier outlogs found: %s\nYou need to process them first (or use --autoretry or autoretry true in outsftphook.conf to automatically process them)" % repr(pendinglogs))
+ if pendinglogs and len(args) > 1:
+ raise CriticalError("autoretry does not work with multiple log files given (yet).")
+ args = list(map(lambda bn: options.logdir + "/" + bn, pendinglogs)) + args
+ outlogfiles = []
+ for f in args:
+ donefile = f[:-7] + ".done"
+ if options.verbose:
+ print("Parsing '%s'" % f)
+ try:
+ outlogfiles.append(LogFile(f, donefile))
+ except ParseError as e:
+ raise CriticalError("Error parsing %s: %s" %(f, str(e)))
+ todo = {}
+ distdirs = {}
+ workpending = False
+ for o in reversed(outlogfiles):
+ workpending |= o.queue(todo, distdirs)
+ if not workpending:
+ if options.verbose:
+ print("Nothing to do")
+ raise SystemExit(0)
+ s = sftp.Connection(servername=servername, username=username, debug=options.debugsftp)
+ doround(s, Round.POOLFILES, todo)
+ if errors:
+ raise SystemExit(1)
+ for d in distdirs.values():
+ for t in d.finalizeifready():
+ s.start(t)
+ doround(s, Round.DISTFILES, todo)
+ if errors:
+ raise SystemExit(1)
+ doround(s, Round.DELETES, todo)
+ if errors:
+ raise SystemExit(1)
+
+try:
+ main(sys.argv[1:])
+except CriticalError as e:
+ print(str(e), file=sys.stderr)
+ raise SystemExit(1)
diff --git a/docs/outstore.py b/docs/outstore.py
new file mode 100755
index 0000000..d0cc14e
--- /dev/null
+++ b/docs/outstore.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python3
+# Copyright (C) 2012 Bernhard R. Link
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+
+# This is an example outhook script.
+# Actually it is part of the testsuite and does many things
+# an actual outhook script would never do.
+# But it checks so many aspects of how a outhook script is called
+# that it should make quite clear what a outhookscript can expect.
+
+import sys, os, subprocess, select, dbm
+
+def poolfile(outdir, name):
+ s = os.lstat(outdir + '/' + name)
+ return "poolfile %d bytes" % s.st_size
+def distfile(outdir, name):
+ s = os.lstat(outdir + '/' + name)
+ return "distfile %d bytes" % s.st_size
+def distsymlink(distdir, target):
+ return "distsymlink -> %s/%s" % (distdir,target)
+def collecteddistfile(outdir, name):
+ if os.path.islink(outdir + '/' + name):
+ l = os.readlink(outdir + '/' + name)
+ d = os.path.dirname(name)
+ while d and l[0:3] == '../':
+ d = os.path.dirname(d)
+ l = l[3:]
+ if d:
+ d = d + '/'
+ return "distsymlink -> %s%s" % (d,l)
+ else:
+ return distfile(outdir, name)
+
+def processfile(logfile, donefile, db):
+ # print("Parsing '%s'" % logfile)
+ lf = open(logfile, 'r', encoding='utf-8')
+ newpoolfiles = []
+ distributions = []
+ deletepoolfiles = []
+ mode = 'POOLNEW'
+ # This parser is wasteful and unnecessarily complicated, but it's
+ # purpose is mainly making sure the output of reprepro is
+ # well-formed and no so much targeted at doing actual work.
+ for l in lf:
+ if l[-1] != '\n':
+ raise CriticalError("Malformed file '%s' (not a text file)" % logfile)
+ l = l[:-1]
+ fields = l.split('\t')
+ if fields[0] != 'POOLNEW':
+ break
+ if len(fields) != 2:
+ raise CriticalError("Malformed file '%s': POOLNEW with more than one argument" % logfile)
+ newpoolfiles.append(fields[1])
+ else:
+ fields = ['EOF']
+ while fields[0] == 'BEGIN-DISTRIBUTION' or fields[0] == 'BEGIN-SNAPSHOT':
+ beginmarker = fields[0]
+ endmarker = 'END-' + beginmarker[6:]
+ if len(fields) != 3 and len(fields) != 4:
+ raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile,beginmarker))
+ distname = fields[1]
+ distdir = fields[2]
+ distfiles = []
+ distsymlinks = []
+ distdeletes = []
+ for l in lf:
+ if l[-1] != '\n':
+ raise CriticalError("Malformed file '%s' (not a text file)" % logfile)
+ l = l[:-1]
+ fields = l.split('\t')
+ if fields[0] == endmarker:
+ if len(fields) != 3 and len(fields) != 4:
+ raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, endmarker))
+ if fields[1] != distname or fields[2] != distdir:
+ raise CriticalError("Malformed file '%s': %s not matching previous %s" % (logfile, endmarker, beginmarker))
+ break
+ elif fields[0] == 'DISTKEEP':
+ continue
+ elif not fields[0] in ['DISTFILE', 'DISTSYMLINK', 'DISTDELETE']:
+ raise CriticalError("Malformed file '%s': Unexpected '%s'" % (logfile, fields[0]))
+ if len(fields) < 3:
+ raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0]))
+ if fields[1] != distdir:
+ raise CriticalError("Malformed file '%s': wrong distdir '%s' in '%s'" %(logfile, fields[1], fields[0]))
+ if fields[0] == 'DISTFILE':
+ if len(fields) != 4:
+ raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0]))
+ distfiles.append((fields[2], fields[3]))
+ elif fields[0] == 'DISTDELETE':
+ if len(fields) != 3:
+ raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0]))
+ distdeletes.append(fields[2])
+ elif fields[0] == 'DISTSYMLINK':
+ if len(fields) != 4:
+ raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0]))
+ distsymlinks.append((fields[2], fields[3]))
+ else:
+ raise CriticalError("Malformed file '%s': unexpected end of file (%s missing)" % (logfile, endmarker))
+ distributions.append((distname, distdir, distfiles, distsymlinks, distdeletes))
+ l = next(lf, 'EOF\n')
+ if l[-1] != '\n':
+ raise CriticalError("Malformed file '%s' (not a text file)" % logfile)
+ l = l[:-1]
+ fields = l.split('\t')
+ while fields[0] == 'POOLDELETE':
+ if len(fields) != 2:
+ raise CriticalError("Malformed file '%s': wrong number of arguments for POOLDELETE" % logfile)
+ deletepoolfiles.append(fields[1])
+ l = next(lf, 'EOF\n')
+ if l[-1] != '\n':
+ raise CriticalError("Malformed file '%s' (not a text file)" % logfile)
+ l = l[:-1]
+ fields = l.split('\t')
+ if fields[0] != 'EOF' or next(lf, None) != None:
+ raise CriticalError("Malformed file '%s': Unexpected command '%s'" % (logfile, fields[0]))
+ # print("Processing '%s'" % logfile)
+ # Checked input to death, no actualy do something
+ outdir = os.environ['REPREPRO_OUT_DIR']
+ for p in newpoolfiles:
+ bp = bytes(p, encoding="utf-8")
+ if bp in db:
+ raise Exception("duplicate pool file %s" % p)
+ db[bp] = poolfile(outdir, p)
+ for distname, distdir, distfiles, distsymlinks, distdeletes in distributions:
+ for name, orig in distfiles:
+ db[distdir + '/' + name] = distfile(outdir, orig)
+ for name, target in distsymlinks:
+ db[distdir + '/' + name] = distsymlink(distdir, target)
+ for name in distdeletes:
+ del db[distdir + '/' + name]
+ for p in deletepoolfiles:
+ bp = bytes(p, encoding="utf-8")
+ if not bp in db:
+ raise Exception("deleting non-existant pool file %s" % p)
+ del db[bp]
+
+def collectfiles(dir, name):
+ for l in os.listdir(dir + '/' + name):
+ n = name + '/' + l
+ if os.path.isdir(dir + '/' + n):
+ for x in collectfiles(dir, n):
+ yield x
+ else:
+ yield n
+
+def collectpool(outdir):
+ if os.path.isdir(outdir + '/pool'):
+ return ["%s: %s" % (filename, poolfile(outdir, filename)) for filename in collectfiles(outdir, 'pool')]
+ else:
+ return []
+
+def collectdists(outdir):
+ if os.path.isdir(outdir + '/dists'):
+ return ["%s: %s" % (filename, collecteddistfile(outdir, filename)) for filename in collectfiles(outdir, 'dists')]
+ else:
+ return []
+
+def showdiff(i1, i2):
+ clean = True
+ l1 = next(i1, None)
+ l2 = next(i2, None)
+ while l1 or l2:
+ if l1 == l2:
+ l1 = next(i1, None)
+ l2 = next(i2, None)
+ elif l1 != None and (l2 == None or l1 < l2):
+ print("+ %s" % l1)
+ clean = False
+ l1 = next(i1, None)
+ elif l2 != None and (l1 == None or l1 > l2):
+ print("- %s" % l2)
+ clean = False
+ l2 = next(i2, None)
+ else:
+ raise("unexpected")
+ return clean
+
+def check(db):
+ outdir = os.environ['REPREPRO_OUT_DIR']
+ actualfiles = collectpool(outdir)
+ actualfiles.extend(collectdists(outdir))
+
+ expectedfiles = []
+ for k in db.keys():
+ expectedfiles.append("%s: %s" % (k.decode(encoding='utf-8'), db[k].decode(encoding='utf-8')))
+ expectedfiles.sort()
+ actualfiles.sort()
+ if not showdiff(iter(expectedfiles), iter(actualfiles)):
+ raise CriticalError("outdir does not match expected state")
+
+class CriticalError(Exception):
+ pass
+
+def main(args):
+ if len(args) <= 0:
+ raise CriticalError("No .outlog files given at command line!")
+
+ if len(args) == 1 and args[0] == '--print':
+ db = dbm.open(os.environ['REPREPRO_OUT_DB'], 'r')
+ for k in sort(db.keys()):
+ print("%s: %s" % (k, db[k]))
+ return
+ if len(args) == 1 and args[0] == '--check':
+ db = dbm.open(os.environ['REPREPRO_OUT_DB'], 'r')
+ check(db)
+ return
+
+ for f in args:
+ if len(f) < 8 or f[-7:] != ".outlog":
+ raise CriticalError("command line argument '%s' does not look like a .outlog file!" % f)
+
+ db = dbm.open(os.environ['REPREPRO_OUT_DB'], 'c')
+
+ for f in args:
+ donefile = f[:-7] + ".outlogdone"
+ if os.path.exists(donefile):
+ print("Ignoring '%s' as '%s' already exists!" % (f,donefile), file=sys.stderr)
+ continue
+ processfile(f, donefile, db)
+
+try:
+ main(sys.argv[1:])
+except CriticalError as e:
+ print(str(e), file=sys.stderr)
+ raise SystemExit(1)
diff --git a/docs/pdiff.example b/docs/pdiff.example
new file mode 100755
index 0000000..bf58d64
--- /dev/null
+++ b/docs/pdiff.example
@@ -0,0 +1,255 @@
+#!/usr/bin/env python3
+
+# Note that rrdtool can also generate .diff files and
+# is usually more efficient at it than this script
+
+#############################################################################
+# generates partial package updates list as reprepro hook
+# (to be used by apt-get >= 0.6.44, apt-qupdate or things compatible with that)
+
+# changes Copyright 2005 Bernhard R. Link <brlink@debian.org>
+# as this is used as hook, it does not need any parsing of
+# Configuration or Handling of architectures and components.
+# Also reprepro will present old and new file, so it does not
+# need to store a permanent copy of the last version.
+# This needs either python3-apt installed or you have to change
+# it to use another sha1 calculation method.
+
+# HOW TO USE:
+# - install python3-apt
+# - make sure your paths contain no ' characters.
+# - be aware this is still quite experimental and might not
+# report some errors properly
+# - uncompress this file if it is compressed
+# - copy this file to your conf/ directory as "pdiff"
+# - make it executable
+# - add something like the following to the every distribution
+# in conf/distributions you want to have diffs for:
+#
+# DscIndices: Sources Release . .gz pdiff
+# DebIndices: Packages Release . .gz pdiff
+#
+# The first line is for source indices, the second for binary indices.
+# Make sure uncompressed index files are generated (the single dot in those
+# lines), as this version only diffs the uncompressed files.
+
+# This file is a heavily modified version of apt-qupdate's tiffany,
+# (downloaded from http://ftp-master.debian.org/~ajt/tiffani/tiffany
+# 2005-02-20)which says:
+#--------------------------------------------------------------------
+# idea and basic implementation by Anthony, some changes by Andreas
+# parts are stolen from ziyi
+#
+# Copyright (C) 2004-5 Anthony Towns <aj@azure.humbug.org.au>
+# Copyright (C) 2004-5 Andreas Barth <aba@not.so.argh.org>
+#--------------------------------------------------------------------
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+#############################################################################
+
+import sys, os, time
+import apt_pkg
+
+################################################################################
+
+def usage (exit_code=0):
+ print("""Usage: pdiff.example directory newfile oldfile mode 3>releaselog
+Write out ed-style diffs to Packages/Source lists
+This file is intended to be called by reprepro as hook
+given to DebIndices, UDebIndices or DscIndices.
+
+ """)
+ sys.exit(exit_code);
+
+
+def tryunlink(file):
+ try:
+ os.unlink(file)
+ except OSError:
+ print("warning: removing of %s denied" % (file))
+
+class Updates:
+ def __init__(self, readpath = None):
+ self.can_path = None
+ self.history = {}
+ self.max = 14
+ self.readpath = readpath
+ self.filesizesha1 = None
+
+ if readpath:
+ try:
+ f = open(readpath + "/Index")
+ x = f.readline()
+
+ def read_hashs(ind, f, self, x=x):
+ while 1:
+ x = f.readline()
+ if not x or x[0] != " ": break
+ l = x.split()
+ if l[2] not in self.history:
+ self.history[l[2]] = [None,None]
+ self.history[l[2]][ind] = (l[0], int(l[1]))
+ return x
+
+ while x:
+ l = x.split()
+
+ if len(l) == 0:
+ x = f.readline()
+ continue
+
+ if l[0] == "SHA1-History:":
+ x = read_hashs(0,f,self)
+ continue
+
+ if l[0] == "SHA1-Patches:":
+ x = read_hashs(1,f,self)
+ continue
+
+ if l[0] == "Canonical-Name:" or l[0]=="Canonical-Path:":
+ self.can_path = l[1]
+
+ if l[0] == "SHA1-Current:" and len(l) == 3:
+ self.filesizesha1 = (l[1], int(l[2]))
+
+ x = f.readline()
+
+ except IOError:
+ 0
+
+ def dump(self, out=sys.stdout):
+ if self.can_path:
+ out.write("Canonical-Path: %s\n" % (self.can_path))
+
+ if self.filesizesha1:
+ out.write("SHA1-Current: %s %7d\n" % (self.filesizesha1))
+
+ hs = self.history
+ l = list(self.history.keys())
+ l.sort()
+
+ cnt = len(l)
+ if cnt > self.max:
+ for h in l[:cnt-self.max]:
+ tryunlink("%s/%s.gz" % (self.readpath, h))
+ del hs[h]
+ l = l[cnt-self.max:]
+
+ out.write("SHA1-History:\n")
+ for h in l:
+ out.write(" %s %7d %s\n" % (hs[h][0][0], hs[h][0][1], h))
+ out.write("SHA1-Patches:\n")
+ for h in l:
+ out.write(" %s %7d %s\n" % (hs[h][1][0], hs[h][1][1], h))
+
+def sizesha1(f):
+ size = os.fstat(f.fileno())[6]
+ f.seek(0)
+ sha1sum = apt_pkg.sha1sum(f)
+ return (sha1sum, size)
+
+def getsizesha1(name):
+ f = open(name, "r")
+ r = sizesha1(f)
+ f.close()
+ return r
+
+def main():
+ if len(sys.argv) != 5:
+ usage(1)
+
+ directory = sys.argv[1]
+ newrelfile = sys.argv[2]
+ oldrelfile = sys.argv[3]
+ mode = sys.argv[4]
+
+ # this is only needed with reprepro <= 0.7
+ if oldrelfile.endswith(".gz"):
+ sys.exit(0);
+
+
+ oldfile = "%s/%s" % (directory,oldrelfile)
+ newfile= "%s/%s" % (directory,newrelfile)
+
+ outdir = oldfile + ".diff"
+
+ if mode == "old":
+ # Nothing to do...
+ if os.path.isfile(outdir + "/Index"):
+ os.write(3,(oldrelfile + ".diff/Index").encode("utf-8"))
+ sys.exit(0);
+
+ if mode == "new":
+ # TODO: delete possible existing Index and patch files?
+ sys.exit(0);
+
+ print("making diffs between %s and %s: " % (oldfile, newfile))
+
+ o = os.popen("date +%Y-%m-%d-%H%M.%S")
+ patchname = o.readline()[:-1]
+ o.close()
+ difffile = "%s/%s" % (outdir, patchname)
+
+ upd = Updates(outdir)
+
+ oldsizesha1 = getsizesha1(oldfile)
+
+ # should probably early exit if either of these checks fail
+ # alternatively (optionally?) could just trim the patch history
+
+ if upd.filesizesha1:
+ if upd.filesizesha1 != oldsizesha1:
+ print("old file seems to have changed! %s %s => %s %s" % (upd.filesizesha1 + oldsizesha1))
+ sys.exit(1);
+
+ newsizesha1 = getsizesha1(newfile)
+
+ if newsizesha1 == oldsizesha1:
+ print("file unchanged, not generating diff")
+ if os.path.isfile(outdir + "/Index"):
+ os.write(3,(oldrelfile + ".diff/Index\n").encode("utf-8"))
+ else:
+ if not os.path.isdir(outdir): os.mkdir(outdir)
+ print("generating diff")
+ while os.path.isfile(difffile + ".gz"):
+ print("This was too fast, diffile already there, waiting a bit...")
+ time.sleep(2)
+ o = os.popen("date +%Y-%m-%d-%H%M.%S")
+ patchname = o.readline()[:-1]
+ o.close()
+ difffile = "%s/%s" % (outdir, patchname)
+
+ # TODO make this without shell...
+ os.system("diff --ed '%s' '%s' > '%s'" %
+ (oldfile,newfile, difffile))
+ difsizesha1 = getsizesha1(difffile)
+ # TODO dito
+ os.system("gzip -9 '%s'" %difffile)
+
+
+ upd.history[patchname] = (oldsizesha1, difsizesha1)
+ upd.filesizesha1 = newsizesha1
+
+ f = open(outdir + "/Index.new", "w")
+ upd.dump(f)
+ f.close()
+# Specifying the index should be enough, it contains checksums for the diffs
+ os.write(3,(oldrelfile + ".diff/Index.new\n").encode("utf-8"))
+
+################################################################################
+
+if __name__ == '__main__':
+ main()
diff --git a/docs/recovery b/docs/recovery
new file mode 100644
index 0000000..80740c0
--- /dev/null
+++ b/docs/recovery
@@ -0,0 +1,67 @@
+Some tips what to do if (hopefully never), your database gets
+corrupted:
+
+First there are three different databases used, residing in three
+files in your --dbdir (normally db/):
+
+1) references.db
+This file only contains the information which file in the pool/
+is needed by which target (i.e. which type/distribution/
+component/architecture quadruple). This is simply repairable by
+deleting references.db and running "rereference".
+
+The current state of this database can be seen with "dumpreferences".
+All references from some specific target can be removed with
+"_removereferences".
+
+2) files.db and checksums.db
+These files contain the information about which files in the pool/ dir
+are known and what checksums they have. Files not in here will not be
+deleted with "deleteunreferenced". Files being wrong here will not realized
+(and thus not corrected even if told to be newly included)
+
+If both files exist, files.db is the canonical information and checksums.db
+can be regenerated with a call to collectnewchecksums.
+If only checksums.db is there, only that it used. (This means: if you
+have called collectnewchecksums since you last used a version prior to 3.3
+with this repository, you can just delete files.db. But make sure to
+never ever use a version prior to 3.0 on this repository after that.)
+
+To get this database in text form use "_listchecksums" without argument,
+to add items manually pipe it into "_addchecksums". (Filenames
+are handled as strings, so be careful).
+
+If the database is completely lost or broken, you can regain it by moving
+files.db and checksums.db out of the way and running:
+find $BASEDIR/pool -type f -printf "pool/%P\n" | reprepro -b $BASEDIR _detect
+(or cd $BASEDIR && find pool -type f -print | reprepro -b . _detect)
+
+Also single files can be removed or added by "_forget" and "_detect".
+(Again note filekeys will be handled as strings, so leading "./", double
+ slashes, "/./", symlinks and the like make them differ).
+
+4) packages.db
+This file contains multiple databases, one for each target, containing
+the chunks from the Packages or Sources files, indexed by package name.
+
+This one is the hardest to reconstruct. If you have still an uncorrupted
+"dists/" directory around, (e.g. you just deleted db/ accidentally),
+it can be reconstructed by moving your dists/ directory to some other place,
+moving the packages.db file (if still existent) away, and set every distribution
+in conf/distributions a "Update: localreadd" with localreadd in conf/updates like:
+Name: localreadd
+Suite: *
+Method: copy:/<otherplace>
+
+with otherplace being the place you moved the dists/ directory too.
+
+If the packages database is corrupt, the described way can at least reconstruct
+the Packages still landing in the Packages.gz and Sources.gz files.
+If references.db is still accessible via dumpreferences, it can give hints
+where the other files belong to. Otherwise removing references.db and calling
+"rereference" and then "dumpunreferenced" will give you a list of files not
+yet anywhere.
+
+Last but not least, there are also the "check" and "checkpool" commands, which
+can give some hints about inconsistencies. (Check will also read files missing
+from files.db+checksums.db if they are needed by packages but in the pool).
diff --git a/docs/reprepro.1 b/docs/reprepro.1
new file mode 100644
index 0000000..34f1a3c
--- /dev/null
+++ b/docs/reprepro.1
@@ -0,0 +1,2847 @@
+.TH REPREPRO 1 "2013-05-04" "reprepro" REPREPRO
+.SH NAME
+reprepro \- produce, manage and sync a local repository of Debian packages
+.mso www.tmac
+.SH SYNOPSIS
+.B reprepro \-\-help
+
+.B reprepro
+[
+\fIoptions\fP
+]
+\fIcommand\fP
+[
+\fIper\-command\-arguments\fP
+]
+.SH DESCRIPTION
+reprepro is a tool to manage a repository of Debian packages
+(.deb, .udeb, .dsc, ...).
+It stores files either being injected manually or
+downloaded from some other repository (partially) mirrored
+into a pool/ hierarchy.
+Managed packages and checksums of files are stored in a
+Berkeley DB database file,
+so no database server is needed.
+Checking signatures of mirrored repositories and creating
+signatures of the generated Package indices is supported.
+
+Former working title of this program was mirrorer.
+.SH "GLOBAL OPTIONS"
+Options can be specified before the command. Each affects a different
+subset of commands and is ignored by other commands.
+.TP
+.B \-h \-\-help
+Displays a short list of options and commands with description.
+.TP
+.B \-v, \-V, \-\-verbose
+Be more verbose. Can be applied multiple times. One uppercase
+.B \-V
+counts as five lowercase
+.B \-v.
+.TP
+.B \-\-silent
+Be less verbose. Can be applied multiple times. One
+.B \-v
+and one
+.B \-s
+cancel each other out.
+.TP
+.B \-f, \-\-force
+This option is ignored, as it no longer exists.
+.TP
+.B \-b, \-\-basedir \fIbasedir\fP
+Sets the base\-dir all other default directories are relative to.
+If none is supplied and the
+.B REPREPRO_BASE_DIR
+environment variable is not set either, the current directory
+will be used.
+.TP
+.B \-\-outdir \fIoutdir\fP
+Sets the base\-dir of the repository to manage, i.e. where the
+.B pool/
+subdirectory resides. And in which the
+.B dists/
+directory is placed by default.
+If this starts with '\fB+b/\fP', it is relative to basedir.
+
+The default for this is \fIbasedir\fP.
+.TP
+.B \-\-confdir \fIconfdir\fP
+Sets the directory where the configuration is searched in.
+
+If this starts with '\fB+b/\fP', it is relative to basedir.
+
+If none is given, \fB+b/conf\fP (i.e. \fIbasedir\fP\fB/conf\fP) will be used.
+.TP
+.B \-\-distdir \fIdistdir\fP
+Sets the directory to generate index files relatively to. (i.e. things like
+Packages.gz, Sources.gz and Release.gpg)
+
+If this starts with '\fB+b/\fP', it is relative to basedir,
+if starting with '\fB+o/\fP' relative to outdir.
+
+If none is given, \fB+o/dists\fP (i.e. \fIoutdir\fP\fB/dists\fP) is used.
+
+.B Note:
+apt has
+.B dists
+hard-coded in it, so this is mostly only useful for testing or when your webserver
+pretends another directory structure than your physical layout.
+
+.B Warning:
+Beware when changing this forth and back between two values not ending
+in the same directory.
+Reprepro only looks if files it wants are there. If nothing of the content
+changed and there is a file it will not touch it, assuming it is the one it
+wrote last time, assuming any different \fB\-\-distdir\fP ended in the same
+directory.
+So either clean a directory before setting \fB\-\-distdir\fP to it or
+do an \fBexport\fP with the new one first to have a consistent state.
+.TP
+.B \-\-logdir \fIlogdir\fP
+The directory where files generated by the \fBLog:\fP directive are
+stored if they have no absolute path.
+
+If this starts with '\fB+b/\fP', it is relative to basedir,
+if starting with '\fB+o/\fP' relative to outdir,
+with '\fB+c/\fP' relative to confdir.
+
+If none is given, \fB+b/logs\fP (i.e. \fIbasedir\fP\fB/logs\fP) is used.
+.TP
+.B \-\-dbdir \fIdbdir\fP
+Sets the directory where reprepro keeps its databases.
+
+If this starts with '\fB+b/\fP', it is relative to basedir,
+if starting with '\fB+o/\fP' relative to outdir,
+with '\fB+c/\fP' relative to confdir.
+
+If none is given, \fB+b/db\fP (i.e. \fIbasedir\fP\fB/db\fP) is used.
+
+.B Note:
+This is permanent data, no cache. One has almost to regenerate the whole
+repository when this is lost.
+.TP
+.B \-\-listdir \fIlistdir\fP
+Sets the directory where it downloads indices to when importing
+from other repositories. This is temporary data and can be safely deleted
+when not in an update run.
+
+If this starts with '\fB+b/\fP', it is relative to basedir,
+if starting with '\fB+o/\fP' relative to outdir,
+with '\fB+c/\fP' relative to confdir.
+
+If none is given, \fB+b/lists\fP (i.e. \fIbasedir\fP\fB/lists\fP) is used.
+.TP
+.B \-\-morguedir \fImorguedir\fP
+Files deleted from the pool are stored into \fImorguedir\fP.
+
+If this starts with '\fB+b/\fP', it is relative to basedir,
+if starting with '\fB+o/\fP' relative to outdir,
+with '\fB+c/\fP' relative to confdir.
+
+If none is given, deleted files are just deleted.
+.TP
+.B \-\-methoddir \fImethoddir\fP
+Look in \fImethoddir\fP instead of
+.B /usr/lib/apt/methods
+for methods to call when importing from other repositories.
+.TP
+.B \-C, \-\-component \fIcomponents\fP
+Limit the specified command to this components only.
+This will force added packages to this components,
+limit removing packages from this components,
+only list packages in this components,
+and/or otherwise only look at packages in this components,
+depending on the command in question.
+
+Multiple components are specified by separating them with \fB|\fP,
+as in \fB\-C 'main|contrib'\fP.
+.TP
+.B \-A, \-\-architecture \fIarchitectures\fP
+Limit the specified command to this architectures only.
+(i.e. only list such packages,
+only remove packages from the specified architectures,
+or otherwise only look at/act on this architectures
+depending on the specific command).
+
+Multiple architectures are specified by separating them with \fB|\fP,
+as in \fB\-A 'sparc|i386'\fP.
+
+Note that architecture \fBall\fP packages can be included to each
+architecture but are then handled separately.
+Thus by using \fB\-A\fP in a specific way one can have different versions of
+an architecture \fBall\fP package in different architectures of the
+same distribution.
+.TP
+.B \-T, \-\-type \fRdsc|deb|udeb
+Limit the specified command to this packagetypes only.
+(i.e. only list such packages, only remove such packages, only include
+such packages, ...)
+.TP
+.B \-S, \-\-section \fIsection\fP
+Overrides the section of inclusions. (Also override possible override files)
+.TP
+.B \-P, \-\-priority \fIpriority\fP
+Overrides the priority of inclusions. (Also override possible override files)
+.TP
+.BR \-\-export= ( silent\-never | never | changed | lookedat | force )
+This option specify whether and how the high level actions
+(e.g. install, update, pull, delete)
+should export the index files of the distributions they work with.
+.TP
+.BR \-\-export=lookedat
+In this mode every distribution the action handled will be exported,
+unless there was an error possibly corrupting it.
+.br
+\fINote\fP that only missing files and files whose intended content changed
+between before and after the action will be written.
+To get a guaranteed current export, use the \fBexport\fP action.
+.br
+For backwards compatibility, \fBlookedat\fP is also available under the
+old name \fBnormal\fP.
+The name \fBnormal\fP is deprecated and will be removed in future versions.
+.TP
+.BR \-\-export=changed
+In this mode every distribution actually changed will be exported,
+unless there was an error possibly corrupting it.
+(i.e. if nothing changed, not even missing files will be created.)
+.br
+\fINote\fP that only missing files and files whose intended content changed
+between before and after the action will be written.
+To get a guaranteed current export, use the \fBexport\fP action.
+.TP
+.BR \-\-export=force
+Always export all distributions looked at, even if there was some
+error possibly bringing it into a inconsistent state.
+.TP
+.BR \-\-export=never
+No index files are exported. You will have to call \fBexport\fP later.
+.br
+\fINote\fP that you most likely additionally need the \fB\-\-keepunreferencedfiles\fP
+option, if you do not want some of the files pointed to by the untouched index
+files to vanish.
+.TP
+.BR \-\-export=silent-never
+Like never, but suppress most output about that.
+.TP
+.B \-\-ignore=\fIwhat\fP
+Ignore errors of type \fIwhat\fP. See the section \fBERROR IGNORING\fP
+for possible values.
+.TP
+.B \-\-nolistsdownload
+When running \fBupdate\fP, \fBcheckupdate\fP or \fBpredelete\fP do not download
+any Release or index files.
+This is hardly useful except when you just run one of those
+command for the same distributions.
+And even then reprepro is usually good in
+not downloading except \fBRelease\fP and \fBRelease.gpg\fP files again.
+.TP
+.B \-\-nothingiserror
+If nothing was done, return with exitcode 1 instead of the usual 0.
+
+Note that "nothing was done" means the primary purpose of the action
+in question.
+Auxiliary actions (opening and closing the database, exporting missing
+files with \-\-export=lookedat, ...) usually do not count.
+Also note that this is not very well tested.
+If you find an action that claims to have done something in some cases
+where you think it should not, please let me know.
+.TP
+.B \-\-keeptemporaries
+Do not delete temporary \fB.new\fP files when exporting a distribution
+fails.
+(reprepro first create \fB.new\fP files in the \fBdists\fP directory and
+only if everything is generated, all files are put into their final place
+at once.
+If this option is not specified and something fails, all are deleted
+to keep \fBdists\fP clean).
+.TP
+.B \-\-keepunreferencedfiles
+Do not delete files that are no longer used because the package they
+are from is deleted/replaced with a newer version from the last distribution
+it was in.
+.TP
+.B \-\-keepunusednewfiles
+The include, includedsc, includedeb and processincoming by default delete
+any file they added to the pool that is not marked used at the end of the
+operation.
+While this keeps the pool clean and allows changing before trying to add again,
+this needs copying and checksum calculation every time one tries to add a file.
+.TP
+.B \-\-keepdirectories
+Do not try to rmdir parent directories after files or directories
+have been removed from them.
+(Do this if your directories have special permissions you want keep,
+do not want to be pestered with warnings about errors to remove them,
+or have a buggy rmdir call deleting non-empty directories.)
+.TP
+.B \-\-ask\-passphrase
+Ask for passphrases when signing things and one is needed. This is a quick
+and dirty and unsafe implementation using the obsolete \fBgetpass(3)\fP
+function with the description gpgme is supplying.
+So the prompt will look quite funny and support for passphrases with
+more than 8 characters depend on your libc.
+Use of this option is not recommended. Use gpg-agent with pinentry instead.
+
+(With current versions of gnupg you need to set \fBpinentry-mode loopback\fP
+in your .gnupg/gpg.conf file to use \fB\-\-ask\-passphrase\fP.
+Without that option gnupg uses the much safer and recommended pinentry instead).
+.TP
+.B \-\-noskipold
+When updating do not skip targets where no new index files and no files
+marked as already processed are available.
+
+If you changed a script to preprocess downloaded index files or
+changed a Listfilter, you most likely want to call reprepro with \-\-noskipold.
+.TP
+.B \-\-waitforlock \fIcount
+If there is a lockfile indicating another instance of reprepro is currently
+using the database, retry \fIcount\fP times after waiting for 10 seconds
+each time.
+The default is 0 and means to error out instantly.
+.TP
+.B \-\-spacecheck full\fR|\fPnone
+The default is \fBfull\fR:
+.br
+In the update commands, check for every to be downloaded file which filesystem
+it is on and how much space is left.
+.br
+To disable this behaviour, use \fBnone\fP.
+.TP
+.BI \-\-dbsafetymargin " bytes-count"
+If checking for free space, reserve \fIbyte-count\fP bytes on the filesystem
+containing the \fBdb/\fP directory.
+The default is 104857600 (i.e. 100MB), which is quite large.
+But as there is no way to know in advance how large the databases will
+grow and libdb is extremely touchy in that regard, lower only when you know
+what you do.
+.TP
+.BI \-\-safetymargin " bytes-count"
+If checking for free space, reserve \fIbyte-count\fP bytes on filesystems
+not containing the \fBdb/\fP directory.
+The default is 1048576 (i.e. 1MB).
+.TP
+.B \-\-noguessgpgtty
+Don't set the environment variable
+.BR GPG_TTY ,
+even when it is not set, stdin is terminal and
+.B /proc/self/fd/0
+is a readable symbolic link.
+.TP
+.B \-\-gnupghome
+Set the
+.B GNUPGHOME
+evnironment variable to the given directory as argument to this option.
+And your gpg will most likely use the content of this variable
+instead of "~/.gnupg".
+Take a look at
+.BR gpg (1)
+to be sure.
+This option in the command line is usually not very useful, as it is possible
+to set the environment variable directly.
+Its main reason for existence is that it can be used in \fIconf\fP\fB/options\fP.
+.TP
+.BI \-\-gunzip " gz-uncompressor"
+While reprepro links against \fBlibz\fP, it will look for the program given
+with this option (or \fBgunzip\fP if not given) and use that when uncompressing
+index files while downloading from remote repositories.
+(So that downloading and uncompression can happen at the same time).
+If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing
+will always be done using the built in uncompression method.
+The program has to accept the compressed file as stdin and write
+the uncompressed file into stdout.
+.TP
+.BI \-\-bunzip2 " bz2-uncompressor"
+When uncompressing downloaded index files or if not linked against \fBlibbz2\fP
+reprepro will use this program to uncompress \fB.bz2\fP files.
+The default value is \fBbunzip2\fP.
+If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing
+will always be done using the built in uncompression method or not be possible
+if not linked against \fBlibbz2\fP.
+The program has to accept the compressed file as stdin and write
+the uncompressed file into stdout.
+.TP
+.BI \-\-unlzma " lzma-uncompressor"
+When uncompressing downloaded index files or if not linked against \fBliblzma\fP
+reprepro will use this program to uncompress \fB.lzma\fP files.
+The default value is \fBunlzma\fP.
+If the program is not found or is \fBNONE\fP (all-uppercase)
+then uncompressing lzma files will always be done using
+the built in uncompression method
+or not be possible if not linked against \fBliblzma\fP.
+The program has to accept the compressed file as stdin and write
+the uncompressed file into stdout.
+.TP
+.BI \-\-unxz " xz-uncompressor"
+When uncompressing downloaded index files or if not linked against \fBliblzma\fP
+reprepro will use this program to uncompress \fB.xz\fP files.
+The default value is \fBunxz\fP.
+If the program is not found or is \fBNONE\fP (all-uppercase)
+then uncompressing xz files will always be done using
+the built in uncompression method
+or not be possible if not linked against \fBliblzma\fP.
+The program has to accept the compressed file as stdin and write
+the uncompressed file into stdout.
+.TP
+.BI \-\-lunzip " lzip-uncompressor"
+When trying to uncompress or read \fPlzip\fP compressed files, this program
+will be used.
+The default value is \fBlunzip\fP.
+If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing
+lz files will not be possible.
+The program has to accept the compressed file as stdin and write
+the uncompressed file into stdout.
+Note that .lz support is \fBDEPRECATED\fP and will be removed in the future.
+.TP
+.BI \-\-list\-max " count"
+Limits the output of \fBlist\fP, \fBlistmatched\fP and \fBlistfilter\fP to the first \fIcount\fP
+results.
+The default is 0, which means unlimited.
+.TP
+.BI \-\-list\-skip " count"
+Omitts the first \fIcount\fP results from the output of
+\fBlist\fP, \fBlistmatched\fP and \fBlistfilter\fP.
+.TP
+.BI \-\-list\-format " format"
+Set the output format of \fBlist\fP, \fBlistmatched\fP and \fBlistfilter\fP commands.
+The format is similar to dpkg\-query's \fB\-\-showformat\fP:
+fields are specified as
+.BI ${ fieldname }
+or
+.BI ${ fieldname ; length }\fR.\fP
+Zero length or no length means unlimited.
+Positive numbers mean fill with spaces right, negative fill with spaces left.
+
+.BR \[rs]n ", " \[rs]r ", " \[rs]t ", " \[rs]0
+are new-line, carriage-return, tabulator and zero-byte.
+Backslash (\fB\[rs]\fP) can be used to escape every non-letter-or-digit.
+
+The special field names
+.BR $identifier ", " $architecture ", " $component ", " $type ", " $codename
+denote where the package was found.
+
+The special field names
+.BR $source " and " $sourceversion
+denote the source and source version a package belongs to.
+(i.e.
+.B ${$source}
+will either be the same as
+.B ${source}
+(without a possible version in parentheses at the end)
+or the same as
+.BR ${package} .
+
+The special field names
+.BR $basename ", " $filekey " and " $fullfilename
+denote the first package file part of this entry
+(i.e. usually the .deb, .udeb or .dsc file)
+as basename, as filekey (filename relative to the outdir)
+and the full filename with outdir prepended
+(i.e. as relative or absolute as your
+outdir (or basedir if you did not set outdir) is).
+
+When \fB\-\-list\-format\fP is not given or \fBNONE\fP, then the default
+is equivalent to
+.br
+.BR "${$identifier} ${package} ${version}\[rs]n" .
+
+Escaping digits or letters not in above list,
+using dollars not escaped outside specified constructs,
+or any field names not listed as special and not consisting entirely out of
+letters, digits and minus signs have undefined behaviour
+and might change meaning without any further notice.
+
+If you give this option on the command line,
+don't forget that $ is also interpreted by your shell.
+So you have to properly escape it.
+For example by putting the whole argument to \-\-list\-format in single quotes.
+.TP
+.B \-\-show\-percent
+When downloading packages, show each completed percent of completed package
+downloads together with the size of completely downloaded packages.
+(Repeating this option increases the frequency of this output).
+.TP
+.B \-\-onlysmalldeletes
+The pull and update commands will skip every distribution in which one
+target loses more than 20% of its packages (and at least 10).
+
+Using this option (or putting it in the options config file) can
+avoid removing large quantities of data but means you might often
+give
+.B \-\-noonlysmalldeletes
+to override it.
+.TP
+.B \-\-restrict \fIsrc\fP\fR[\fP=\fIversion\fP\fR|\fP:\fItype\fP\fR]\fP
+Restrict a \fBpull\fP or \fBupdate\fP to only act on packages belonging
+to source-package \fIsrc\fP.
+Any other package will not be updated (unless it matches a \fB\-\-restrict\-bin\fP).
+Only packages that would otherwise be updated or are at least marked with \fBhold\fP
+in a \fBFilterList\fP or \fBFilerSrcList\fP will be updated.
+
+The action can be restricted to a source version using a equal sign or
+changed to another type (see FilterList) using a colon.
+
+This option can be given multiple times to list multiple packages, but each
+package may only be named once (even when there are different versions or types).
+.TP
+.B \-\-restrict\-binary \fIname\fP\fR[\fP=\fIversion\fP\fR|\fP:\fItype\fP\fR]\fP
+Like \fB\-\-restrict\fP but restrict to binary packages (\fB.deb\fP and \fB.udeb\fP).
+Source packages are not upgraded unless they appear in a \fB\-\-restrict\fP.
+.TP
+.B \-\-restrict\-file \fIfilename\fP
+Like \fB\-\-restrict\fP but read a whole file in the \fBFilterSrcList\fP format.
+.TP
+.B \-\-restrict\-file\-bin \fIfilename\fP
+Like \fB\-\-restrict\-bin\fP but read a whole file in the \fBFilterList\fP format.
+.TP
+.B \-\-endhook \fIhookscript\fP
+
+Run the specified \fIhookscript\fP once reprepro exits.
+It will get the usual \fBREPREPRO_\fP* environment variables set (or unset)
+and additionally a variable \fBREPREPRO_EXIT_CODE\fP that is the exit code
+with which reprepro would have exited (the hook is always called once the
+initial parsing of global options and the command name is done, no matter
+if reprepro did anything or not).
+Reprepro will return to the calling process with the exitcode of this script.
+Reprepro has closed all its databases and removed all its locks,
+so you can run reprepro again in this script
+(unless someone else did so in the same repository before, of course).
+
+The only advantage over running that command always directly after reprepro
+is that you can some environment variables set and cannot so easily forget
+it if this option is in conf/options.
+
+The script is supposed to be located relative to \fIconfdir\fP, unless its
+name starts with \fB/\fP, \fB./\fP, \fB+b/\fP, \fB+o/\fP, or \fB+c/\fP
+and the name may not start (except in the cases given before) with a \fB+\fP.
+
+An example script looks like: \fB
+ #!/bin/sh
+
+ if [ "$REPREPRO_EXIT_CODE" \-ne 0 ] ; then
+ exit "$REPREPRO_EXIT_CODE"
+ fi
+
+ echo "congratulations, reprepro with arguments: $*"
+ echo "seems to have run successfully. REPREPRO_ part of the environment is:"
+ set | grep ^REPREPRO_
+
+ exit 0
+ \fP
+.TP
+.B \-\-outhook \fIhookscript\fP
+\fIhookscript\fP is called with a \fB.outlog\fP file as argument (located
+in \fIlogdir\fP) containing a description of all changes made to \fIoutdir\fP.
+
+The script is supposed to be located relative to \fIconfdir\fP, unless its
+name starts with \fB/\fP, \fB./\fP, \fB+b/\fP, \fB+o/\fP, or \fB+c/\fP
+and the name may not start (except in the cases given before) with a \fB+\fP.
+
+For a format of the \fB.outlog\fP files generated for this script see the
+\fBmanual.html\fP shipped with reprepro.
+.SH COMMANDS
+.TP
+.BR export " [ " \fIcodenames\fP " ]"
+Generate all index files for the specified distributions.
+
+This regenerates all files unconditionally.
+It is only useful if you want to be sure \fBdists\fP is up to date,
+you called some other actions with \fB\-\-export=never\fP before or
+you want to create an initial empty but fully equipped
+.BI dists/ codename
+directory.
+.TP
+.RB " [ " \-\-delete " ] " createsymlinks " [ " \fIcodenames\fP " ]"
+Creates \fIsuite\fP symbolic links in the \fBdists/\fP-directory pointing
+to the corresponding \fIcodename\fP.
+
+It will not create links, when multiple of the given codenames
+would be linked from the same suite name, or if the link
+already exists (though when \fB\-\-delete\fP is given it
+will delete already existing symlinks)
+.TP
+.B list \fIcodename\fP \fR[\fP \fIpackagename\fP \fR]\fP
+List all packages (source and binary, except when
+.B \-T
+or
+.B \-A
+is given) with the given name in all components (except when
+.B \-C
+is given) and architectures (except when
+.B \-A
+is given) of the specified distribution.
+If no package name is given, list everything.
+The format of the output can be changed with \fB\-\-list\-format\fP.
+To only get parts of the result, use \fB\-\-list\-max\fP and
+\fB\-\-list\-skip\fP.
+.TP
+.B listmatched \fIcodename\fP \fIglob\fP
+as list, but does not list a single package, but all packages
+matching the given shell-like \fIglob\fP.
+(i.e. \fB*\fP, \fB?\fP and \fB[\fP\fIchars\fP\fB]\fP are allowed).
+
+Examples:
+
+.B reprepro \-b . listmatched test2 'linux\-*'
+lists all packages starting with \fBlinux\-\fP.
+
+.TP
+.B listfilter \fIcodename\fP \fIcondition\fP
+as list, but does not list a single package, but all packages
+matching the given condition.
+
+The format of the formulas is those of the dependency lines in
+Debian packages' control files with some extras.
+That means a formula consists of names of fields with a possible
+condition for its content in parentheses.
+These atoms can be combined with
+an exclamation mark '!' (meaning not),
+a pipe symbol '|' (meaning or) and
+a comma ',' (meaning and).
+Additionally parentheses can be used to change binding
+(otherwise '!' binds more than '|' than ',').
+
+The values given in the search expression are directly alphabetically
+compared to the headers in the respective index file.
+That means that each part \fIFieldname\fP\fB (\fP\fIcmp\fP\fB \fP\fIvalue\fP\fB)\fP
+of the formula will be true for exactly those package that have
+in the \fBPackage\fP or \fBSources\fP file a line starting with \fIfieldname\fP
+and a value is alphabetically \fIcmp\fP to \fIvalue\fP.
+
+Additionally since reprepro 3.11.0, '\fB%\fP' can be used as comparison operator,
+denoting matching a name with shell like wildcard
+(with '\fB*\fP', '\fB?\fP' and '\fB[\fP..\fB]\fP').
+
+The special field names starting with '\fB$\fP' have special meaning
+(available since 3.11.1):
+
+.B $Version
+
+The version of the package, comparison is not alphabetically, but as
+Debian version strings.
+
+.B $Source
+
+The source name of the package.
+
+.B $SourceVersion
+
+The source version of the package.
+
+.B $Architecture
+
+The architecture the package is in (listfilter) or to be put into.
+
+.B $Component
+
+The component the package is in (listfilter) or to be put into.
+
+.B $Packagetype
+
+The packagetype of the package.
+
+Examples:
+
+.B reprepro \-b . listfilter test2 'Section (== admin)'
+will list all packages in distribution test2 with a Section field and the value
+of that field being \fBadmin\fP.
+
+.B reprepro \-b . \-T deb listfilter test2 'Source (== \fIblub\fP) | ( !Source , Package (== \fIblub\fP) )'
+will find all .deb Packages with either a Source field blub or
+no Source field and a Package field blub.
+(That means all package generated by a source package \fIblub\fP,
+except those also specifying a version number with its Source).
+
+.B reprepro \-b . \-T deb listfilter test2 '$Source (==\fIblub\fP)'
+is the better way to do this (but only available since 3.11.1).
+
+.B reprepro \-b . listfilter test2 '$PackageType (==deb), $Source (==\fIblub\fP)'
+is another (less efficient) way.
+
+.B reprepro \-b . listfilter test2 'Package (% linux\-*\-2.6*)'
+lists all packages with names starting with \fBlinux\-\fP and later
+having an \fB\-2.6\fP.
+.TP
+.B ls \fIpackage-name\fP
+List the versions of the specified package in all distributions.
+.TP
+.B lsbycomponent \fIpackage-name\fP
+Like ls, but group by component (and print component names).
+.TP
+.B remove \fIcodename\fP \fIpackage-names\fP\fR[\fP=\fIversion\fP\fR]\fP \fI...\fP
+Delete packages in the specified distribution,
+that have package name listed as argument.
+Package versions must be specified by appending '\fB=\fP' and the
+version to the name (without spaces). When no version is specified, the latest
+package version is removed.
+
+Note that like any other operation removing or replacing a package,
+the old package's files are unreferenced and thus may be automatically
+deleted if this was their last reference and no \fB\-\-keepunreferencedfiles\fP
+specified.
+.TP
+.B removematched \fIcodename\fP \fIglob\fP
+Delete all packages \fBlistmatched\fP with the same arguments would list.
+.TP
+.B removefilter \fIcodename\fP \fIcondition\fP
+Delete all packages \fBlistfilter\fP with the same arguments would list.
+.TP
+.B removesrc \fIcodename\fP \fIsource-name\fP \fR[\fP\fIversion\fP\fR]\fP
+Remove all packages in distribution \fIcodename\fP belonging to source
+package \fIsource-name\fP.
+(Limited to those with source version \fIversion\fP if specified).
+
+If package tracking is activated, it will use that information to find the
+packages, otherwise it traverses all package indices for the distribution.
+.TP
+.B removesrcs \fIcodename\fP \fIsource-name\fP\fR[\fP=\fIversion\fP\fR]\fP \fI...\fP
+Like \fBremovesrc\fP, but can be given multiple source names
+and source versions must be specified by appending '\fB=\fP' and the version
+to the name (without spaces).
+.TP
+.BR update " [ " \fIcodenames\fP " ]"
+Sync the specified distributions (all if none given) as
+specified in the config with their upstreams. See the
+description of
+.B conf/updates
+below.
+.TP
+.BR checkupdate " [ " \fIcodenames\fP " ]"
+Same like
+.BR update ,
+but will show what it will change instead of actually changing it.
+.TP
+.BR dumpupdate " [ " \fIcodenames\fP " ]"
+Same like
+.BR checkupdate ,
+but less suitable for humans and more suitable for computers.
+.TP
+.BR predelete " [ " \fIcodenames\fP " ]"
+This will determine which packages a \fBupdate\fP would delete or
+replace and remove those packages.
+This can be useful for reducing space needed while upgrading, but
+there will be some time where packages are vanished from the
+lists so clients will mark them as obsolete.
+Plus if you cannot
+download a updated package in the (hopefully) following update
+run, you will end up with no package at all instead of an old one.
+This will also blow up \fB.diff\fP files if you are using the pdiff
+example or something similar.
+So be careful when using this option or better get some more space so
+that update works.
+.TP
+.B cleanlists
+Delete all files in \fIlistdir\fP (default \fIbasedir\fP\fB/lists\fP) that do not
+belong to any update rule for any distribution.
+I.e. all files are deleted in that directory that no \fBupdate\fP
+command in the current configuration can use.
+(The files are usually left there, so if they are needed again they
+do not need to be downloaded again. Though in many easy cases not
+even those files will be needed.)
+.TP
+.BR pull " [ " \fIcodenames\fP " ]"
+pull in newer packages into the specified distributions (all if none given)
+from other distributions in the same repository.
+See the description of
+.B conf/pulls
+below.
+.TP
+.BR checkpull " [ " \fIcodenames\fP " ]"
+Same like
+.BR pull ,
+but will show what it will change instead of actually changing it.
+.TP
+.BR dumppull " [ " \fIcodenames\fP " ]"
+Same like
+.BR checkpull ,
+but less suitable for humans and more suitable for computers.
+.TP
+.B includedeb \fIcodename\fP \fI.deb-filename\fP
+Include the given binary Debian package (.deb) in the specified
+distribution, applying override information and guessing all
+values not given and guessable.
+.TP
+.B includeudeb \fIcodename\fP \fI.udeb-filename\fP
+Same like \fBincludedeb\fP, but for .udeb files.
+.TP
+.B includedsc \fIcodename\fP \fI.dsc-filename\fP
+Include the given Debian source package (.dsc, including other files
+like .orig.tar.gz, .tar.gz and/or .diff.gz) in the specified
+distribution, applying override information and guessing all values
+not given and guessable.
+
+Note that .dsc files do not contain section or priority, but the
+Sources.gz file needs them.
+reprepro tries to parse .diff and .tar files for
+it, but is only able to resolve easy cases.
+If reprepro fails to extract those automatically,
+you have to either specify a DscOverride or give them via
+.B \-S
+and
+.B \-P
+.TP
+.B include \fIcodename\fP \fI.changes-filename\fP
+Include in the specified distribution all packages found and suitable
+in the \fI.changes\fP file, applying override information guessing all
+values not given and guessable.
+.TP
+.B processincoming \fIrulesetname\fP \fR[\fP\fI.changes-file\fP\fR]\fP
+Scan an incoming directory and process the .changes files found there.
+If a filename is supplied, processing is limited to that file.
+.I rulesetname
+identifies which rule-set in
+.B conf/incoming
+determines which incoming directory to use
+and in what distributions to allow packages into.
+See the section about this file for more information.
+.TP
+.BR check " [ " \fIcodenames\fP " ]"
+Check if all packages in the specified distributions have all files
+needed properly registered.
+.TP
+.BR checkpool " [ " fast " ]"
+Check if all files believed to be in the pool are actually still there and
+have the known md5sum. When
+.B fast
+is specified md5sum is not checked.
+.TP
+.BR collectnewchecksums
+Calculate all supported checksums for all files in the pool.
+(Versions prior to 3.3 did only store md5sums, 3.3 added sha1, 3.5 added sha256).
+.TP
+.BR translatelegacychecksums
+Remove the legacy \fBfiles.db\fP file after making sure all information
+is also found in the new \fBchecksums.db\fP file.
+(Alternatively you can call \fBcollecnewchecksums\fP and remove the file
+on your own.)
+.TP
+.B rereference
+Forget which files are needed and recollect this information.
+.TP
+.B dumpreferences
+Print out which files are marked to be needed by whom.
+.TP
+.B dumpunreferenced
+Print a list of all filed believed to be in the pool, that are
+not known to be needed.
+.TP
+.B deleteunreferenced
+Remove all known files (and forget them) in the pool not marked to be
+needed by anything.
+.TP
+.BR deleteifunreferenced " [ " \fIfilekeys\fP " ]"
+Remove the given files (and forget them) in the pool if they
+are not marked to be used by anything.
+If no command line arguments are given,
+stdin is read and every line treated as one filekey.
+This is mostly useful together with \fB\-\-keepunreferenced\fP
+in \fBconf/options\fP or in situations where one does not want
+to run \fBdeleteunreferenced\fP, which removes all files eligible
+to be deleted with this command.
+.TP
+.BR reoverride " [ " \fIcodenames\fP " ]"
+Reapply the override files to the given distributions (Or only parts
+thereof given by \fB\-A\fP,\fB\-C\fP or \fB\-T\fP).
+
+Note: only the control information is changed. Changing a section
+to a value, that would cause another component to be guessed, will
+not cause any warning.
+.TP
+.BR redochecksums " [ " \fIcodenames\fP " ]"
+Re-add the information about file checksums to the package indices.
+
+Usually the package's control information is created at inclusion
+time or imported from some remote source and not changed later.
+This command modifies it to re-add missing checksum types.
+
+Only checksums already known are used.
+To update known checksums about files run \fBcollectnewchecksums\fP first.
+
+.TP
+.BR dumptracks " [ " \fIcodenames\fP " ]"
+Print out all information about tracked source packages in the
+given distributions.
+.TP
+.BR retrack " [ " \fIcodenames\fP " ]"
+Recreate a tracking database for the specified distributions.
+This contains ouf of three steps.
+First all files marked as part of a source package are set to
+unused.
+Then all files actually used are marked as thus.
+Finally tidytracks is called remove everything no longer needed
+with the new information about used files.
+
+(This behaviour, though a bit longsome, keeps even files only
+kept because of tracking mode \fBkeep\fP and files not otherwise
+used but kept due to \fBincludechanges\fP or its relatives.
+Before version 3.0.0 such files were lost by running retrack).
+.TP
+.BR removealltracks " [ " \fIcodenames\fP " ]"
+Removes all source package tracking information for the
+given distributions.
+.TP
+.B removetrack " " \fIcodename\fP " " \fIsourcename\fP " " \fIversion\fP
+Remove the trackingdata of the given version of a given sourcepackage
+from a given distribution. This also removes the references for all
+used files.
+.TP
+.BR tidytracks " [ " \fIcodenames\fP " ]"
+Check all source package tracking information for the given distributions
+for files no longer to keep.
+.TP
+.B copy \fIdestination-codename\fP \fIsource-codename\fP \fIpackage\fP\fR[\fP=\fIversion\fP\fR]\fP \fI...\fP
+Copy the given packages from one distribution to another.
+The packages are copied verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+copied. Package versions must be specified by appending '\fB=\fP' and the
+version to the name (without spaces). When no version is specified, the latest
+package version is copied.
+.TP
+.B copysrc \fIdestination-codename\fP \fIsource-codename\fP \fIsource-package\fP \fR[\fP\fIversions\fP\fR]\fP
+look at each package
+(where package means, as usual, every package be it dsc, deb or udeb)
+in the distribution specified by \fIsource-codename\fP
+and identifies the relevant source package for each.
+All packages matching the specified \fIsource-package\fP name
+(and any \fIversion\fP if specified)
+are copied to the \fIdestination-codename\fP distribution.
+The packages are copied verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+copied.
+.TP
+.B copymatched \fIdestination-codename\fP \fIsource-codename\fP \fIglob\fP
+Copy packages matching the given glob (see \fBlistmatched\fP).
+
+The packages are copied verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+copied.
+.TP
+.B copyfilter \fIdestination-codename\fP \fIsource-codename\fP \fIformula\fP
+Copy packages matching the given formula (see \fBlistfilter\fP).
+(all versions if no version is specified).
+The packages are copied verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+copied.
+.TP
+.B move \fIdestination-codename\fP \fIsource-codename\fP \fIpackage\fP\fR[\fP=\fIversion\fP\fR]\fP \fI...\fP
+Move the given packages from one distribution to another.
+The packages are moved verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+moved. Package versions must be specified by appending '\fB=\fP' and the
+version to the name (without spaces). When no version is specified, the latest
+package version is moved.
+.TP
+.B movesrc \fIdestination-codename\fP \fIsource-codename\fP \fIsource-package\fP \fR[\fP\fIversions\fP\fR]\fP
+look at each package
+(where package means, as usual, every package be it dsc, deb or udeb)
+in the distribution specified by \fIsource-codename\fP
+and identifies the relevant source package for each.
+All packages matching the specified \fIsource-package\fP name
+(and any \fIversion\fP if specified)
+are moved to the \fIdestination-codename\fP distribution.
+The packages are moved verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+moved.
+.TP
+.B movematched \fIdestination-codename\fP \fIsource-codename\fP \fIglob\fP
+Move packages matching the given glob (see \fBlistmatched\fP).
+
+The packages are moved verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+moved.
+.TP
+.B movefilter \fIdestination-codename\fP \fIsource-codename\fP \fIformula\fP
+Move packages matching the given formula (see \fBlistfilter\fP).
+(all versions if no version is specified).
+The packages are moved verbatim, no override files are consulted.
+Only components and architectures present in the source distribution are
+moved.
+.TP
+.B restore \fIcodename\fP \fIsnapshot\fP \fIpackages...\fP
+.TP
+.B restoresrc \fIcodename\fP \fIsnapshot\fP \fIsource-epackage\fP \fR[\fP\fIversions\fP\fR]\fP
+.TP
+.B restorefilter \fIdestination-codename\fP \fIsnapshot\fP \fIformula\fP
+.TP
+.B restorematched \fIdestination-codename\fP \fIsnapshot\fP \fIglob\fP
+Like the copy commands, but do not copy from another distribution,
+but from a snapshot generated with \fBgensnapshot\fP.
+Note that this blindly trusts the contents of the files in your \fBdists/\fP
+directory and does no checking.
+.TP
+.B clearvanished
+Remove all package databases that no longer appear in \fBconf/distributions\fP.
+If \fB\-\-delete\fP is specified, it will not stop if there are still
+packages left.
+Even without \fB\-\-delete\fP it will unreference
+files still marked as needed by this target.
+(Use \fB\-\-keepunreferenced\fP to not delete them if that was the last
+reference.)
+
+Do not forget to remove all exported package indices manually.
+.TP
+.B gensnapshot " " \fIcodename\fP " " \fIdirectoryname\fP
+Generate a snapshot of the distribution specified by \fIcodename\fP
+in the directory \fIdists\fB/\fIcodename\fB/snapshots/\fIdirectoryname\fB/\fR
+and reference all needed files in the pool as needed by that.
+No Content files are generated and no export hooks are run.
+
+Note that there is currently no automated way to remove that snapshot
+again (not even clearvanished will unlock the referenced files after the
+distribution itself vanished).
+You will have to remove the directory yourself and tell reprepro
+to \fBunreferencesnapshot \fP\fIcodename\fP\fB \fP\fIdirectoryname\fP before
+\fBdeleteunreferenced\fP will delete the files from the pool locked by this.
+
+To access such a snapshot with apt, add something like the following to
+your sources.list file:
+.br
+\fBdeb method://as/without/snapshot \fIcodename\fB/snapshots/\fIname\fB main\fR
+.TP
+.B unreferencesnapshot " " \fIcodename\fP " " \fIdirectoryname\fP
+Remove all references generated by an \fBgenshapshot\fP with the
+same arguments.
+This allows the next \fBdeleteunferenced\fP call to delete those files.
+(The indices in \fBdists/\fP for the snapshot are not removed.)
+.TP
+.BR rerunnotifiers " [ " \fIcodenames\fP " ]"
+Run all external scripts specified in the \fBLog:\fP options of the
+specified distributions.
+.TP
+.B build\-needing \fIcodename\fP \fIarchitecture\fP \fR[\fP \fIglob\fP \fR]\fP
+List source packages (matching \fIglob\fP) that likely need a build on the
+given architecture.
+
+List all source package in the given distribution without a binary package
+of the given architecture built from that version of the source,
+without a \fB.changes\fP or \fB.log\fP file for the given architecture,
+with an Architecture field including \fBany\fP, \fIos\fP\fB-any\fP (with
+\fIos\fP being the part before the hyphen in the architecture or \fBlinux\fP
+if there is no hyphen) or the architecture and
+at least one package in the Binary field not yet available.
+
+If instead of \fIarchitecture\fP the term \fBany\fP is used,
+all architectures are iterated and the architecture is printed as
+fourth field in every line.
+
+If the \fIarchitecture\fP is \fBall\fP, then only source packages
+with an Architecture field including \fBall\fP are considered
+(i.e. as above with real architectures but \fBany\fP does not suffice).
+Note that dpkg\-dev << 1.16.1 does not both set \fBany\fP and \fBall\fP
+so source packages building both architecture dependent and independent
+packages will never show up unless built with a new enough dpkg\-source).
+
+.TP
+.B translatefilelists
+Translate the file list cache within
+.IB db /contents.cache.db
+into the new format used since reprepro 3.0.0.
+
+Make sure you have at least half of the space of the current
+.IB db /contents.cache.db
+file size available in that partition.
+.TP
+.B flood \fIdistribution\fP \fR[\fP\fIarchitecture\fP\fR]\fP
+For each architecture of \fIdistribution\fP (or for the one specified)
+add architecture \fBall\fP packages from other architectures
+(but the same component or packagetype) under the following conditions:
+
+ Packages are only upgraded, never downgraded.
+ If there is a package not being architecture \fPall\fP,
+then architecture \fBall\fP packages of the same source from the same
+source version are preferred over those that have no such binary sibling.
+ Otherwise the package with the highest version wins.
+
+You can restrict with architectures are looked for architecture \fPall\fP
+packages using \fB\-A\fP and which components/packagetypes are flooded by
+\fB\-C\fP/\fB\-T\fP as usual.
+
+There are mostly two use cases for this command:
+If you added an new architecture to an distribution and want to copy all
+architecture \fBall\fP packages to it.
+Or if you included some architecture all packages only to some architectures
+using \fB\-A\fP to avoid breaking the other architectures for which the binary
+packages were still missing and now want to copy it to those architectures were
+they are unlikely to break something (because a newbinary is already available).
+.TP
+.B unusedsources \fR[\fP\fIdistributions\fP\fR]\fP
+List all source packages for which no binary package build from them is found.
+.TP
+.B sourcemissing \fR[\fP\fIdistributions\fP\fR]\fP
+List all binary packages for which no source package is found
+(the source package must be in the same distribution,
+but source packages only kept by package tracking is enough).
+.TP
+.B reportcruft \fR[\fP\fIdistributions\fP\fR]\fP
+List all source package versions that either have a source package
+and no longer a binary package or binary packages left without
+source package in the index. (Unless sourcemissing also list packages
+where the source package in only in the pool due to enabled tracking
+but no longer in the index).
+.TP
+.BR sizes " [ " \fIcodenames\fP " ]"
+List the size of all packages in the distributions specified or
+in all distributions.
+
+Each row contains 4 numbers, each being a number of bytes in a set
+of packages, which are:
+The packages in this distribution
+(including anything only kept because of tracking),
+the packages only in this distribution
+(anything in this distribution and a snapshot of this distribution
+counts as only in this distribution),
+the packages in this distribution and its snapshots,
+the packages only in this distribution or its snapshots.
+
+If more than one distribution is selected, also list a sum of those
+(in which 'Only' means only in selected ones, and not only only in
+one of the selected ones).
+
+.TP
+.BR repairdescriptions " [ " \fIcodenames\fP " ]"
+Look for binary packages only having a short description
+and try to get the long description from the .deb file
+(and also remove a possible Description-md5 in this case).
+.SS internal commands
+These are hopefully never needed, but allow manual intervention.
+.B WARNING:
+Is is quite easy to get into an inconsistent and/or unfixable state.
+.TP
+.BR _detect " [ " \fIfilekeys\fP " ]"
+Look for the files, which \fIfilekey\fP
+is given as argument or as a line of the input
+(when run without arguments), and calculate
+their md5sum and add them to the list of known files.
+(Warning: this is a low level operation, no input validation
+or normalization is done.)
+.TP
+.BR _forget " [ " \fIfilekeys\fP " ]"
+Like
+.B _detect
+but remove the given \fIfilekey\fP from the list of known
+files.
+(Warning: this is a low level operation, no input validation
+or normalization is done.)
+.TP
+.B _listmd5sums
+Print a list of all known files and their md5sums.
+.TP
+.B _listchecksums
+Print a list of all known files and their recorded checksums.
+.TP
+.B _addmd5sums
+alias for the newer
+.TP
+.B _addchecksums
+Add information of known files (without any check done)
+in the strict format of _listchecksums output (i.e. don't dare to
+use a single space anywhere more than needed).
+.TP
+.BI _dumpcontents " identifier"
+Printout all the stored information of the specified
+part of the repository. (Or in other words, the content
+the corresponding Packages or Sources file would get)
+
+This command is deprecated and will be removed in a future version.
+.TP
+.BI "_addreference " filekey " " identifier
+Manually mark \fIfilekey\fP to be needed by \fIidentifier\fP
+.TP
+.BI "_addreferences " identifier " \fR[\fR " filekeys " \fR]\fR"
+Manually mark one or more \fIfilekeys\fP to be needed by \fIidentifier\fP.
+If no command line arguments are given,
+stdin is read and every line treated as one filekey.
+.TP
+.BI "_removereference " identifier " " filekey
+Manually remove the given mark that the file is needed by this identifier.
+.TP
+.BI "_removereferences " identifier
+Remove all references what is needed by
+.I identifier.
+.TP
+.BI __extractcontrol " .deb-filename"
+Look what reprepro believes to be the content of the
+.B control
+file of the specified .deb-file.
+.TP
+.BI __extractfilelist " .deb-filename"
+Look what reprepro believes to be the list of files
+of the specified .deb-file.
+.TP
+.BI _fakeemptyfilelist " filekey"
+Insert an empty filelist for \fIfilekey\fP. This is a evil
+hack around broken .deb files that cannot be read by reprepro.
+.TP
+.B _addpackage \fIcodenam\fP \fIfilename\fP \fIpackages...\fP
+Add packages from the specified filename to part specified
+by \fB\-C\fP \fB\-A\fP and \fB\-T\fP of the specified distribution.
+Very strange things can happen if you use it improperly.
+.TP
+.B __dumpuncompressors
+List what compressions format can be uncompressed and how.
+.TP
+.BI __uncompress " format compressed-file uncompressed-file"
+Use builtin or external uncompression to uncompress the specified
+file of the specified format into the specified target.
+.TP
+.BR _listcodenames
+Print - on per line - the codenames of all configured distributions.
+.TP
+.B _listconfidentifiers \fIidentifier\fP \fR[\fP \fIdistributions...\fP \fR]\fP
+Print - one per line - all identifiers of subdatabases as derived from the
+configuration.
+If a list of distributions is given, only identifiers of those are printed.
+
+.TP
+.B _listdbidentifiers \fIidentifier\fP \fR[\fP \fIdistributions...\fP \fR]\fP
+Print - one per line - all identifiers of subdatabases in the current
+database.
+This will be a subset of the ones printed by \fB_listconfidentifiers\fP or
+most commands but \fBclearvanished\fP will refuse to run, and depending
+on the database compatibility version, will include all those if reprepro
+was run since the config was last changed.
+
+.SH "CONFIG FILES"
+.B reprepo
+uses three config files, which are searched in
+the directory specified with
+.B \-\-confdir
+or in the
+.B conf/
+subdirectory of the \fIbasedir\fP.
+
+If a file
+.B options
+exists, it is parsed line by line.
+Each line can be the long
+name of a command line option (without the \-\-)
+plus an argument, where possible.
+Those are handled as if they were command line options given before
+(and thus lower priority than) any other command line option.
+(and also lower priority than any environment variable).
+
+To allow command line options to override options file options,
+most boolean options also have a corresponding form starting with \fB\-\-no\fP.
+
+(The only exception is when the path to look for config files
+changes, the options file will only opened once and of course
+before any options within the options file are parsed.)
+
+The file
+.B distributions
+is always needed and describes what distributions
+to manage, while
+.B updates
+is only needed when syncing with external repositories and
+.B pulls
+is only needed when syncing with repositories in the same reprepro database.
+
+The last three are in the format control files in Debian are in,
+i.e. paragraphs separated by empty lines consisting of
+fields. Each field consists of a fieldname, followed
+by a colon, possible whitespace and the data. A field
+ends with a newline not followed by a space or tab.
+
+Lines starting with # as first character are ignored,
+while in other lines the # character and
+everything after it till the newline character are ignored.
+
+A paragraph can also consist of only a single field
+.RB \(dq !include: \(dq
+which causes the named file (relative to confdir unless starting
+with
+.BR ~/ ", " +b/ ", " +c/ " or " / " )"
+to be read as if it was found at this place.
+
+Each of the three files or a file included as described above
+can also be a directory, in which case all files it contains
+with a filename ending in
+.B .conf
+and not starting with
+.B .
+are read.
+.SS conf/distributions
+.TP
+.B Codename
+This required field is the unique identifier of a distribution
+and used as directory name within
+.B dists/
+It is also copied into the Release files.
+
+Note that this name is not supposed to change.
+You most likely \fBnever ever\fP want a name like \fBtesting\fP
+or \fBstable\fP here (those are suite names and supposed to point
+to another distribution later).
+.TP
+.B Suite
+This optional field is simply copied into the
+Release files. In Debian it contains names like
+stable, testing or unstable. To create symlinks
+from the Suite to the Codename, use the
+\fBcreatesymlinks\fP command of reprepro.
+.TP
+.B FakeComponentPrefix
+If this field is present,
+its argument is added - separated by a slash - before every
+Component written to the main Release file
+(unless the component already starts with it),
+and removed from the end of the Codename and Suite fields in that file.
+Also if a component starts with it, its directory in the dists dir
+is shortened by this.
+.br
+So \fB
+
+ Codename: bla/updates
+ Suite: foo/updates
+ FakeComponentPrefix: updates
+ Components: main bad\fP
+
+will create a Release file with \fB
+
+ Codename: bla
+ Suite: foo
+ Components: updates/main updates/bad\fP
+
+in it, but otherwise nothing is changed, while\fB
+
+ Codename: bla/updates
+ Suite: foo/updates
+ FakeComponentPrefix: updates
+ Components: updates/main updates/bad\fP
+
+will also create a Release file with \fB
+
+ Codename: bla
+ Suite: foo
+ Components: updates/main updates/bad\fP
+
+but the packages will actually be in the components
+\fBupdates/main\fP and \fBupdates/bad\fP,
+most likely causing the same file using duplicate storage space.
+
+This makes the distribution look more like Debian's security archive,
+thus work around problems with apt's workarounds for that.
+.TP
+.B AlsoAcceptFor
+A list of distribution names.
+When a \fB.changes\fP file is told to be included
+into this distribution with the \fBinclude\fP command
+and the distribution header of that file is neither
+the codename, nor the suite name, nor any name from the
+list, a \fBwrongdistribution\fP error is generated.
+The \fBprocess_incoming\fP command will also use this field,
+see the description of \fBAllow\fP and \fBDefault\fP
+from the \fBconf/incoming\fP file for more information.
+.TP
+.B Version
+This optional field is simply copied into the
+Release files.
+.TP
+.B Origin
+This optional field is simply copied into the
+Release files.
+.TP
+.B Label
+This optional field is simply copied into the
+Release files.
+.TP
+.B NotAutomatic
+This optional field is simply copied into the
+Release files.
+(The value is handled as an arbitrary string,
+though anything but \fByes\fP does not make much
+sense right now.)
+.TP
+.B ButAutomaticUpgrades
+This optional field is simply copied into the
+Release files.
+(The value is handled as an arbitrary string,
+though anything but \fByes\fP does not make much
+sense right now.)
+.TP
+.B Description
+This optional field is simply copied into the
+Release files.
+.TP
+.B Architectures
+This required field lists the binary architectures within
+this distribution and if it contains
+.B source
+(i.e. if there is an item
+.B source
+in this line this Distribution has source. All other items
+specify things to be put after "binary\-" to form directory names
+and be checked against "Architecture:" fields.)
+
+This will also be copied into the Release files. (With exception
+of the
+.B source
+item, which will not occur in the topmost Release file whether
+it is present here or not)
+.TP
+.B Components
+This required field lists the component of a
+distribution. See
+.B GUESSING
+for rules which component packages are included into
+by default. This will also be copied into the Release files.
+.TP
+.B DDebComponents
+List of components containing .ddebs.
+.TP
+.B UDebComponents
+Components with a debian\-installer subhierarchy containing .udebs.
+(E.g. simply "main")
+.TP
+.B Update
+When this field is present, it describes which update rules are used
+for this distribution. There also can be a magic rule minus ("\-"),
+see below.
+.TP
+.B Pull
+When this field is present, it describes which pull rules are used
+for this distribution.
+Pull rules are like Update rules,
+but get their stuff from other distributions and not from external sources.
+See the description for \fBconf/pulls\fP.
+.TP
+.B SignWith
+When this field is present, a Release.gpg file will be generated.
+If the value is "yes" or "default", the default key of gpg is used.
+If the field starts with an exclamation mark ("!"), the given script
+is executed to do the signing.
+Otherwise the value will be given to libgpgme to determine to key to
+use.
+
+If there are problems with signing, you can try
+.br
+.B gpg \-\-list\-secret\-keys \fIvalue\fP
+.br
+to see how gpg could interpret the value.
+If that command does not list any keys or multiple ones,
+try to find some other value (like the keyid),
+that gpg can more easily associate with a unique key.
+
+If this key has a passphrase, you need to use gpg\-agent
+or the insecure option \fB\-\-ask\-passphrase\fP.
+
+A '\fB!\fP' hook script is looked for in the confdir,
+unless it starts with
+.BR ~/ ", " ./ ", " +b/ ", " +o/ ", " +c/ " or " / " ."
+Is gets three command line arguments: The filename to sign,
+an empty argument or the filename to create with an inline
+signature (i.e. InRelease) and
+an empty argument or the filename to create an detached signature
+(i.e. Release.gpg).
+The script may generate no Release.gpg file if it choses to
+(then the repository will look like unsigned for older clients),
+but generating empty files is not allowed.
+Reprepro waits for the script to finish and will abort the exporting
+of the distribution this signing is part of unless the scripts
+returns normally with exit code 0.
+Using a space after ! is recommended to avoid incompatibilities
+with possible future extensions.
+.TP
+.B DebOverride
+When this field is present, it describes the override file used
+when including .deb files.
+.TP
+.B UDebOverride
+When this field is present, it describes the override file used
+when including .udeb files.
+.TP
+.B DscOverride
+When this field is present, it describes the override file used
+when including .dsc files.
+.TP
+.B DebIndices\fR, \fBUDebIndices\fR, \fBDscIndices
+Choose what kind of Index files to export. The first
+part describes what the Index file shall be called.
+The second argument determines the name of a Release
+file to generate or not to generate if missing.
+Then at least one of "\fB.\fP", "\fB.gz\fP", "\fB.xz\fP" or "\fB.bz2\fP"
+specifying whether to generate uncompressed output, gzipped
+output, bzip2ed output or any combination.
+(bzip2 is only available when compiled with bzip2 support,
+so it might not be available when you compiled it on your
+own, same for xz and liblzma).
+If an argument not starting with dot follows,
+it will be executed after all index files are generated.
+(See the examples for what argument this gets).
+The default is:
+.br
+DebIndices: Packages Release . .gz
+.br
+UDebIndices: Packages . .gz
+.br
+DscIndices: Sources Release .gz
+.TP
+.B ExportOptions
+Options to modify how and if exporting is done:
+.br
+.B noexport
+Never export this distribution.
+That means there will be no directory below \fBdists/\fP generated and the distribution is only useful to copy packages to other distributions.
+.br
+.B keepunknown
+Ignore unknown files and directories in the exported directory.
+This is currently the only available option and the default, but might change in the future, so it can already be requested explicitly.
+.TP
+.B Contents
+Enable the creation of Contents files listing all the files
+within the binary packages of a distribution.
+(Which is quite slow, you have been warned).
+
+In earlier versions, the first argument was a rate at which
+to extract file lists.
+As this did not work and was no longer easily possible after
+some factorisation, this is no longer supported.
+
+The arguments of this field is a space separated list of options.
+If there is a \fBudebs\fP keyword, \fB.udeb\fPs are also listed
+(in a file called \fBuContents\-\fP\fIarchitecture\fP.)
+If there is a \fBnodebs\fP keyword, \fB.deb\fPs are not listed.
+(Only useful together with \fBudebs\fP)
+If there is at least one of the keywords
+\fB.\fP, \fB.gz\fP, \fB\.xz\fP and/or \fB.bz2\fP,
+the Contents files are written uncompressed, gzipped and/or bzip2ed instead
+of only gzipped.
+
+If there is a \fBpercomponent\fP then one Contents\-\fIarch\fP file
+per component is created.
+If there is a \fBallcomponents\fP then one global Contents\-\fIarch\fP
+file is generated.
+If both are given, both are created.
+If none of both is specified then \fBpercomponent\fP is taken
+as default (earlier versions had other defaults).
+
+The switches \fBcompatsymlink\fP or \fBnocompatsymlink\fP
+(only possible if \fBallcomponents\fP was not specified explicitly)
+control whether a compatibility symlink is created so old versions
+of apt\-file looking for the component independent filenames at
+least see the contents of the first component.
+
+Unless \fBallcomponents\fP is given, \fBcompatsymlinks\fP
+currently is the default, but that will change
+in some future (current estimate: after wheezy was released)
+
+.TP
+.B ContentsArchitectures
+Limit generation of Contents files to the architectures given.
+If this field is not there, all architectures are processed.
+An empty field means no architectures are processed, thus not
+very useful.
+.TP
+.B ContentsComponents
+Limit what components are processed for the \fBContents\-\fP\fIarch\fP
+files to the components given.
+If this field is not there, all components are processed.
+An empty field is equivalent to specify \fBnodebs\fP in the
+\fBContents\fP field, while a non-empty field overrides a
+\fBnodebs\fP there.
+.TP
+.B ContentsUComponents
+Limit what components are processed for the uContents files to
+the components given.
+If this field is not there and there is the \fBudebs\fP keyword
+in the Contents field, all .udebs of all components are put
+in the \fBuContents.\fP\fIarch\fP files.
+If this field is not there and there is no \fBudebs\fP keyword
+in the Contents field, no \fBuContents\-\fP\fIarch\fP files are
+generated at all.
+A non-empty fields implies generation of \fBuContents\-\fP\fIarch\fP
+files (just like the \fBudebs\fP keyword in the Contents field),
+while an empty one causes no \fBuContents\-\fP\fIarch\fP files to
+be generated.
+.TP
+.B Uploaders
+Specifies a file (relative to confdir if not starting with
+.BR ~/ ", " +b/ ", " +c/ " or " / " )"
+to specify who is allowed to upload packages. Without this there are no
+limits, and this file can be ignored via \fB\-\-ignore=uploaders\fP.
+See the section \fBUPLOADERS FILES\fP below.
+.TP
+.B Tracking
+Enable the (experimental) tracking of source packages.
+The argument list needs to contain exactly one of the following:
+.br
+.B keep
+Keeps all files of a given source package, until that
+is deleted explicitly via \fBremovetrack\fP. This is
+currently the only possibility to keep older packages
+around when all indices contain newer files.
+.br
+.B all
+Keep all files belonging to a given source package until
+the last file of it is no longer used within that
+distribution.
+.br
+.B minimal
+Remove files no longer included in the tracked distribution.
+(Remove changes, logs and includebyhand files once no file is
+in any part of the distribution).
+.br
+And any number of the following (or none):
+.br
+.B includechanges
+Add the .changes file to the tracked files of a source package.
+Thus it is also put into the pool.
+.br
+.B includebyhand
+Add \fBbyhand\fP and \fBraw\-\fP\fI*\fP files to the tracked
+files and thus in the pool.
+.br
+.B includebuildinfos
+Add buildinfo files to the tracked files and thus in the pool.
+.br
+.B includelogs
+Add log files to the tracked files and thus in the pool.
+(Not that putting log files in changes files is a reprepro
+extension not found in normal changes files)
+.br
+.B embargoalls
+Not yet implemented.
+.br
+.B keepsources
+Even when using minimal mode, do not remove source files
+until no file is needed any more.
+.br
+.B needsources
+Not yet implemented.
+.TP
+.B Limit
+Limit the number of versions of a package per distribution, architecture,
+component, and type. The limit must be a number. If the number is positive,
+all old package version that exceed these limit will be removed or archived
+(see
+.B Archive
+option), when a new package version is added. If the number is zero or negative,
+all package version will be kept. By default only one package version will be
+kept.
+.TP
+.B Archive
+Specify a codename which must be declared before (to avoid loops). When packages
+exceed the version count limit (specified in \fBLimit\fR), these packages will
+be moved to the specified distribution instead of being removed.
+.TP
+.B Log
+Specify a file to log additions and removals of this distribution
+into and/or external scripts to call when something is added or
+removed.
+The rest of the \fBLog:\fP line is the filename,
+every following line (as usual, have to begin with a single space)
+the name of a script to call.
+The name of the script may be preceded with options of the
+form \fB\-\-type=\fP(\fBdsc\fP|\fBdeb\fP|\fBudeb\fP),
+\fB\-\-architecture=\fP\fIname\fP or
+\fB\-\-component=\fP\fIname\fP to only call the script for some
+parts of the distribution.
+An script with argument \fB\-\-changes\fP is called when a \fB.changes\fP
+file was accepted by \fBinclude\fP or \fBprocessincoming\fP (and with other
+arguments).
+Both type of scripts can have a \fB\-\-via=\fP\fIcommand\fP specified,
+in which case it is only called when caused by reprepro command \fIcommand\fP.
+
+For information how it is called and some examples take a look
+at manual.html in reprepro's source or
+.B /usr/share/doc/reprepro/
+
+If the filename for the log files does not start with a slash,
+it is relative to the directory specified with \fB\-\-logdir\fP,
+the scripts are relative to \fB\-\-confdir\fP unless starting with
+.BR ~/ ", " +b/ ", " +c/ " or " / .
+.TP
+.B ValidFor
+If this field exists, an Valid\-Until field is put into generated
+.B Release
+files for this distribution with an date as much in the future as the
+argument specifies.
+
+The argument has to be an number followed by one of the units
+.BR d ", " m " or " y ,
+where \fBd\fP means days, \fBm\fP means 31 days and \fBy\fP means
+365 days.
+So
+.B ValidFor: 1m 11 d
+causes the generation of a
+.B Valid\-Until:
+header in Release files that points 42 days into the future.
+.TP
+.B ReadOnly
+Disallow all modifications of this distribution or its directory
+in \fBdists/\fP\fIcodename\fP (with the exception of snapshot subdirectories).
+.TP
+.B ByHandHooks
+This species hooks to call for handling byhand/raw files by processincoming
+(and in future versions perhaps by include).
+
+Each line consists out of 4 arguments:
+A glob pattern for the section
+(classically \fBbyhand\fP, though Ubuntu uses \fBraw\-\fP*),
+a glob pattern for the priority (not usually used),
+and a glob pattern for the filename.
+
+The 4th argument is the script to be called when all of the above match.
+It gets 5 arguments: the codename of the distribution,
+the section (usually \fBbyhand\fP),
+the priority (usually only \fB\-\fP),
+the filename in the changes file and
+the full filename (with processincoming in the secure TempDir).
+.TP
+.B Signed\-By
+This optional field is simply copied into the Release files.
+It is used to tell apt which keys to trust for this Release
+in the future.
+(see SignWith for how to tell reprepro whether and how to sign).
+.SS conf/updates
+.TP
+.B Name
+The name of this update\-upstream as it can be used in the
+.B Update
+field in conf/distributions.
+.TP
+.B Method
+An URI as one could also give it apt, e.g.
+.I http://ftp.debian.de/debian
+which is simply given to the corresponding
+.B apt\-get
+method. (So either
+.B apt\-get has to be installed, or you have to point with
+.B \-\-methoddir
+to a place where such methods are found.
+.TP
+.B Fallback
+(Still experimental:) A fallback URI, where all files are
+tried that failed the first one. They are given to the
+same method as the previous URI (e.g. both http://), and
+the fallback-server must have everything at the same place.
+No recalculation is done, but single files are just retried from
+this location.
+.TP
+.B Config
+This can contain any number of lines, each in the format
+.B apt\-get \-\-option
+would expect. (Multiple lines \(hy as always \(hy marked with
+leading spaces).
+.P
+For example: Config: Acquire::Http::Proxy=http://proxy.yours.org:8080
+.TP
+.B From
+The name of another update rule this rules derives from.
+The rule containing the \fBFrom\fP may not contain
+.BR Method ", " Fallback " or " Config "."
+All other fields are used from the rule referenced in \fBFrom\fP, unless
+found in this containing the \fBFrom\fP.
+The rule referenced in \fBFrom\fP may itself contain a \fBFrom\fP.
+Reprepro will only assume two remote index files are the same,
+if both get their \fBMethod\fP information from the same rule.
+.TP
+.B Suite
+The suite to update from. If this is not present, the codename
+of the distribution using this one is used. Also "*/whatever"
+is replaced by "<codename>/whatever"
+.TP
+.B Components
+The components to update. Each item can be either the name
+of a component or a pair of a upstream component and a local
+component separated with ">". (e.g. "main>all contrib>all non\-free>notall")
+
+If this field is not there, all components from the distribution
+to update are tried.
+
+An empty field means no source or .deb packages are updated by this rule,
+but only .udeb packages, if there are any.
+
+A rule might list components not available in all distributions
+using this rule. In this case unknown components are silently
+ignored.
+(Unless you start reprepro with the \fB\-\-fast\fP option,
+it will warn about components unusable in all distributions using
+that rule. As exceptions, unusable components called \fBnone\fP
+are never warned about, for compatibility with versions prior to
+3.0.0 where and empty field had a different meaning.)
+.TP
+.B Architectures
+The architectures to update. If omitted all from the distribution
+to update from. (As with components, you can use ">" to download
+from one architecture and add into another one. (This only determine
+in which Package list they land, it neither overwrites the Architecture
+line in its description, nor the one in the filename determined from this
+one. In other words, it is no really useful without additional filtering))
+.TP
+.B UDebComponents
+Like
+.B Components
+but for the udebs.
+.TP
+.B VerifyRelease
+Download the
+.B Release.gpg
+file and check if it is a signature of the
+.B Releasefile
+with the key given here. (In the Format as
+"gpg \-\-with\-colons \-\-list\-key" prints it, i.e. the last
+16 hex digits of the fingerprint) Multiple keys can be specified
+by separating them with a "\fB|\fP" sign. Then finding a signature
+from one of the will suffice.
+To allow revoked or expired keys, add a "\fB!\fP" behind a key.
+(but to accept such signatures, the appropriate \fB\-\-ignore\fP
+is also needed).
+To also allow subkeys of a specified key, add a "\fB+\fP" behind a key.
+.TP
+.B IgnoreRelease: yes
+If this is present, no
+.B InRelease
+or
+.B Release
+file will be downloaded and thus the md5sums of the other
+index files will not be checked.
+.TP
+.B GetInRelease: no
+IF this is present, no
+.B InRelease
+file is downloaded but only
+.B Release
+(and
+.B Release.gpg
+)
+are tried.
+.TP
+.B Flat
+If this field is in an update rule, it is supposed to be a
+flat repository, i.e. a repository without a \fBdists\fP
+dir and no subdirectories for the index files.
+(If the corresponding \fBsources.list\fP line has the suite
+end with a slash, then you might need this one.)
+The argument for the \fBFlat:\fP field is the Component to
+put those packages into.
+No \fBComponents\fP or \fBUDebComponents\fP
+fields are allowed in a flat update rule.
+If the \fBArchitecture\fP field has any \fB>\fP items,
+the part left of the "\fB>\fP" is ignored.
+.br
+For example the \fBsources.list\fP line
+ deb http://cran.r\-project.org/bin/linux/debian etch\-cran/
+.br
+would translate to
+.br
+ Name: R
+ Method: http://cran.r\-project.org/bin/linux/debian
+ Suite: etch\-cran
+ Flat: whatevercomponentyoudlikethepackagesin
+.TP
+.B IgnoreHashes
+This directive tells reprepro to not check the listed
+hashes in the downloaded Release file (and only in the Release file).
+Possible values are currently \fBmd5\fP, \fBsha1\fP and \fBsha256\fP.
+
+Note that this does not speed anything
+up in any measurable way. The only reason to specify this if
+the Release file of the distribution you want to mirror from
+uses a faulty algorithm implementation.
+Otherwise you will gain nothing and only lose security.
+.TP
+.B FilterFormula
+This can be a formula to specify which packages to accept from
+this source. The format is misusing the parser intended for
+Dependency lines. To get only architecture all packages use
+"architecture (== all)", to get only at least important
+packages use "priority (==required) | priority (==important)".
+
+See the description of the listfilter command for the semantics
+of formulas.
+.TP
+.B FilterList\fR, \fPFilterSrcList
+These two options each take at least two arguments:
+The first argument is the fallback (default) action.
+All following arguments are treated as file names of lists.
+
+The filenames are considered to be relative to
+.B \-\-confdir\fR,
+if not starting with
+.BR ~/ ", " +b/ ", " +c/ " or " / "."
+
+Each list file consists of lines with a package name
+followed by whitespaced followed by an action.
+
+Each list may only contain a single line for a given package name.
+The action to be taken is the action specified by the first file
+mentioning that package.
+If no list file mentions a package, the fallback action is used instead.
+
+This format is inspired by dpkg \-\-get\-selections before multiarch
+and the names of the actions likely only make sense if you imagine the
+file to be the output of this command of an existing system.
+
+For each package available in the distribution to be updated from/pulled from
+this action is determined and affects the current decision what to do
+to the target distribution.
+(Only after all update/pull rules for a given target distribution have been
+processed something is actually done).
+
+The possible action keywords are:
+.RS
+.TP
+.B install
+mark the available package to be added to the target distribution unless
+the same version or a higher version is already marked as to be added/kept.
+(Note that without a prior delete rule (\fB\-\fP) or \fBsupersede\fP action,
+this will never downgrade a package as the already existing version
+is marked to be kept).
+.TP
+.B upgradeonly
+like \fBinstall\fP but will not add new packages to a distribution.
+.TP
+.B supersede
+unless the current package version is higher than the available package version,
+mark the package to be deleted in the target distribution.
+(Useful to remove packages in add-on distributions once they reached the base distribution).
+.TP
+.BR deinstall " or " purge
+ignore the newly available package.
+.TP
+.B warning
+print a warning message to stderr if a new package/newer version is available.
+Otherwise ignore the new package (like with \fBdeinstall\fP or \fBpurge\fP).
+.TP
+.B hold
+the new package is ignored, but every previous decision to
+downgrade or delete the package in the target distribution is reset.
+.TP
+.B error
+abort the whole upgrade/pull if a new package/newer version is available
+.TP
+.B "= \fIversion\fP"
+If the candidate package has the given version, behave like \fBinstall\fP.
+Otherwise continue as if this list file did not mention this package
+(i.e. look in the remaining list files or use the fallback action).
+Only one such entry per package is currently supported and the version
+is currently compared as string.
+.RE
+.PP
+.RS
+If there is both \fBFilterList\fP and \fBFilterSrcList\fP then
+the first is used for \fB.deb\fP and \fB.udeb\fP and the second for
+\fB.dsc\fP packages.
+.PP
+If there is only \fBFilterList\fP that is applied to everything.
+.PP
+If there is only \fBFilterSrcList\fP that is applied to everything, too,
+but the source package name (and source version) is used to do the lookup.
+.RE
+.TP
+.B OmitExtraSourceOnly
+This field controls whether source packages with Extra-Source-Only
+set are ignore when getting source packages.
+Without this option or if it is true, those source packages
+are ignored, while if set to no or false, those source packages
+are also candidates if no other filter excludes them.
+(The default of true will likely change once reprepro supports
+multiple versions of a package or has other means to keep the
+source packages around).
+.TP
+.B ListHook
+If this is given, it is executed for all downloaded index files
+with the downloaded list as first and a filename that will
+be used instead of this. (e.g. "ListHook: /bin/cp" works
+but does nothing.)
+
+If a file will be read multiple times, it is processed multiple
+times, with the environment variables
+.BR REPREPRO_FILTER_CODENAME ", " REPREPRO_FILTER_PACKAGETYPE ", "
+.BR REPREPRO_FILTER_COMPONENT " and " REPREPRO_FILTER_ARCHITECTURE
+set to the where this file will be added and
+.B REPREPRO_FILTER_PATTERN
+to the name of the update rule causing it.
+
+.TP
+.B ListShellHook
+This is like ListHook, but the whole argument is given to the shell
+as argument, and the input and output file are stdin and stdout.
+
+i.e.:
+.br
+ListShellHook: cat
+.br
+works but does nothing but useless use of a shell and cat, while
+.br
+ListShellHook: grep\-dctrl \-X \-S apt \-o \-X \-S dpkg || [ $? \-eq 1 ]
+.br
+will limit the update rule to packages from the specified source packages.
+.TP
+.B DownloadListsAs
+The arguments of this field specify which index files reprepro
+will download.
+
+Allowed values are
+.BR . ", " .gz ", " .bz2 ", " .lzma ", " .xz ", " .diff ", "
+.BR force.gz ", " force.bz2 ", " force.lzma ", "
+.BR force.xz ", and " force.diff "."
+
+Reprepro will try the first supported variant in the list given:
+Only compressions compiled in or for which an uncompressor was found
+are used.
+Unless the value starts with \fBforce.\fP,
+it is only tried if is found in the Release or InRelease file.
+
+The default value is \fB.diff .xz .lzma .bz2 .gz .\fP, i.e.
+download Packages.diff if listed in the Release file,
+otherwise or if not usable download .xz if
+listed in the Release file and there is a way to uncompress it,
+then .lzma if usable,
+then .bz2 if usable,
+then .gz and then uncompressed).
+
+Note there is no way to see if an uncompressed variant
+of the file is available (as the Release file always lists their
+checksums, even if not there),
+so putting '\fB.\fP' anywhere but as the last argument can mean
+trying to download a file that does not exist.
+
+Together with \fBIgnoreRelease\fP reprepro will download the first
+in this list that could be unpacked (i.e. \fBforce\fP is always assumed)
+and the default value is \fB.gz .bzip2 . .lzma .xz\fP.
+.SS conf/pulls
+This file contains the rules for pulling packages from one
+distribution to another.
+While this can also be done with update rules using the file
+or copy method and using the exported indices of that other
+distribution, this way is faster.
+It also ensures the current files are used and no copies
+are made.
+(This also leads to the limitation that pulling from one
+component to another is not possible.)
+
+Each rule consists out of the following fields:
+.TP
+.B Name
+The name of this pull rule as it can be used in the
+.B Pull
+field in conf/distributions.
+.TP
+.B From
+The codename of the distribution to pull packages from.
+.TP
+.B Components
+The components of the distribution to get from.
+
+If this field is not there,
+all components from the distribution to update are tried.
+
+A rule might list components not available in all distributions using this
+rule. In this case unknown components are silently ignored.
+(Unless you start reprepro with the \-\-fast option,
+it will warn about components unusable in all distributions using that rule.
+As exception, unusable components called \fBnone\fP are never warned about,
+for compatibility with versions prior to 3.0.0 where and empty field had
+a different meaning.)
+.TP
+.B Architectures
+The architectures to update.
+If omitted all from the distribution to pull from.
+As in
+.BR conf/updates ,
+you can use ">" to download
+from one architecture and add into another one. (And again, only useful
+with filtering to avoid packages not architecture \fBall\fP to migrate).
+.TP
+.B UDebComponents
+Like
+.B Components
+but for the udebs.
+.TP
+.B FilterFormula
+.TP
+.B FilterList
+.TP
+.B FilterSrcList
+The same as with update rules.
+.SH "OVERRIDE FILES"
+The format of override files used by reprepro
+should resemble the extended ftp\-archive format,
+to be specific it is:
+
+.B \fIpackagename\fP \fIfield name\fP \fInew value\fP
+
+For example:
+.br
+.B kernel\-image\-2.4.31\-yourorga Section protected/base
+.br
+.B kernel\-image\-2.4.31\-yourorga Priority standard
+.br
+.B kernel\-image\-2.4.31\-yourorga Maintainer That's me <me@localhost>
+.br
+.B reprepro Priority required
+
+All fields of a given package will be replaced by the new value specified
+in the override file
+with the exception of special fields starting with a dollar sign ($).
+While the field name is compared case-insensitive, it is copied in
+exactly the form in the override file there.
+(Thus I suggest to keep to the exact case it is normally found in
+index files in case some other tool confuses them.)
+More than copied is the Section header (unless \fB\-S\fP is supplied),
+which is also used to guess the component (unless \fB\-C\fP is there).
+
+Some values like \fBPackage\fP, \fBFilename\fP, \fBSize\fP or \fBMD5sum\fP
+are forbidden, as their usage would severely confuse reprepro.
+
+As an extension reprepro also supports patterns instead of packagenames.
+If the package name contains '*', '[' or '?',
+it is considered a pattern
+and applied to each package
+that is not matched by any non-pattern override nor by any previous pattern.
+
+Fieldnames starting with a dollar ($) are not be placed in the
+exported control data but have special meaning.
+Unknown ones are loudly ignored.
+Special fields are:
+
+ \fB$Component\fP: includedeb, includedsc, include and processincoming
+will put the package in the component given as value
+(unless itself overridden with \fB\-C\fP).
+Note that the proper way to specify the component is by setting the
+section field and using this extension will most likely confuse people
+and/or tools.
+
+ \fB$Delete\fP: the value is treated a fieldname and fields of that
+name are removed.
+(This way one can remove fields previously added without removing and
+re-adding the package.
+And fields already included in the package can be removed, too).
+
+.SS conf/incoming
+Every chunk is a rule set for the
+.B process_incoming
+command.
+Possible fields are:
+.TP
+.B Name
+The name of the rule-set, used as argument to the scan command to specify
+to use this rule.
+.TP
+.B IncomingDir
+The Name of the directory to scan for
+.B .changes
+files.
+.TP
+.B TempDir
+A directory where the files listed in the processed .changes files
+are copied into before they are read.
+You can avoid some copy operations by placing this directory
+within the same mount point the pool hierarchy
+is (at least partially) in.
+.TP
+.B LogDir
+A directory where .changes files, .log files, .buildinfo files
+and otherwise unused .byhand files are stored upon procession.
+.TP
+.B Allow
+Each argument is either a pair \fIname1\fB>\fIname2\fR or simply
+\fIname\fP which is short for \fIname\fB>\fIname\fR.
+Each \fIname2\fP must identify a distribution,
+either by being Codename, a unique Suite, or a unique AlsoAcceptFor
+from \fBconf/distributions\fP.
+Each upload has each item in its
+.B Distribution:
+header compared first to last with each \fIname1\fP in the rules
+and is put in the first one accepting this package. e.g.:
+.br
+Allow: local unstable>sid
+.br
+or
+.br
+Allow: stable>security\-updates stable>proposed\-updates
+.br
+(Note that this makes only sense if Multiple is set to true or if
+there are people only allowed to upload to proposed\-updates but
+not to security\-updates).
+.TP
+.B Default \fIdistribution
+Every upload not put into any other distribution because
+of an Allow argument is put into \fIdistribution\fP if that
+accepts it.
+.TP
+.B Multiple
+Old form of Options: multiple_distributions.
+.TP
+.B Options
+A list of options
+.br
+.B multiple_distributions
+.br
+Allow including a upload in multiple distributions.
+
+If a .changes file lists multiple distributions,
+then reprepro will start with the first name given,
+check all Accept and Default options till it finds
+a distribution this upload can go into.
+
+If this found no distribution or if this option was given,
+reprepro will then do the same with the second distribution name
+given in the .changes file and so on.
+.br
+.B limit_arch_all
+.br
+If an upload contains binaries from some architecture and architecture
+all packages,
+the architecture all packages are only put into the architectures within
+this upload.
+Useful to combine with the \fBflood\fP command.
+.TP
+.B Permit
+A list of options to allow things otherwise causing errors:
+.br
+.B unused_files
+.br
+Do not stop with error if there are files listed in the \fB.changes\fP
+file if it lists files not belonging to any package in it.
+.br
+.B older_version
+.br
+Ignore a package not added because there already is a strictly newer
+version available instead of treating this as an error.
+.br
+.B unlisted_binaries
+.br
+Do not abort with an error if a .changes file contains .deb files that
+are not listed in the Binaries header.
+.TP
+.B Cleanup \fIoptions
+A list of options to cause more files in the incoming directory to be
+deleted:
+.br
+.B unused_files
+.br
+If there is \fBunused_files\fP in \fBPermit\fP then also delete those
+files when the package is deleted after successful processing.
+.br
+.B unused_buildinfo_files
+.br
+If .buildinfo files of processed .changes files are not used (neither
+stored by LogDir nor with Tracking: includebuildinfos) then delete them
+from the incoming dir.
+(This option has no additional effect if \fBunused_files\fP is already used.)
+.br
+.B on_deny
+.br
+If a \fB.changes\fP file is denied processing because of missing signatures
+or allowed distributions to be put in, delete it and all the files it references.
+.br
+.B on_error
+.br
+If a \fB.changes\fP file causes errors while processing, delete it and the files
+it references.
+
+Note that allowing cleanup in publicly accessible incoming queues allows a denial
+of service by sending in .changes files deleting other peoples files before they
+are completed.
+Especially when .changes files are handled directly (e.g. by inoticoming).
+
+.TP
+.B MorgueDir
+If files are to be deleted by Cleanup, they are instead moved to a subdirectory
+of the directory given as value to this field.
+This directory has to be on the same partition as the incoming directory and
+files are moved (i.e. owner and permission stay the same) and never copied.
+
+.SH "UPLOADERS FILES"
+These files specified by the \fBUploaders\fP header in the distribution
+definition as explained above describe what key a \fB.changes\fP file
+as to be signed with to be included in that distribution.
+.P
+Empty lines and lines starting with a hash are ignored, every other line
+must be of one of the following nine forms or an include directive:
+.TP
+.B allow \fIcondition\fP by anybody
+which allows everyone to upload packages matching \fIcondition\fP,
+.TP
+.B allow \fIcondition\fP by unsigned
+which allows everything matching that has no pgp/gpg header,
+.TP
+.B allow \fIcondition\fP by any key
+which allows everything matching with any valid signature in or
+.TP
+.B allow \fIcondition\fP by key \fIkey-id\fP
+which allows everything matching signed by this \fIkey-id\fP
+(to be specified without any spaces).
+If the \fIkey-id\fP ends with a \fB+\fP (plus), a signature with a subkey of
+this primary key also suffices.
+
+\fIkey-id\fP must be a suffix of the id libgpgme uses to identify this key,
+i.e. a number of hexdigits from the end of the fingerprint of the key, but
+no more than what libgpgme uses.
+(The maximal number should be what gpg \-\-list-key \-\-with\-colons prints,
+as of the time of this writing that is at most 16 hex-digits).
+.TP
+.B allow \fIcondition\fP by group \fIgroupname\fP
+which allows every member of group \fIgroupname\fP.
+Groups can be manipulated by
+.TP
+.B group \fIgroupname\fP add \fIkey-id\fP
+to add a \fIkey-id\fP (see above for details) to this group, or
+.TP
+.B group \fIgroupname\fP contains \fIgroupname\fP
+to add a whole group to a group.
+
+To avoid warnings in incomplete config files there is also
+.TP
+.B group \fIgroupname\fP empty
+to declare a group has no members (avoids warnings that it is used without those)
+and
+.TP
+.B group \fIgroupname\fP unused
+to declare that a group is not yet used (avoid warnings that it is not used).
+.PP
+A line starting with \fBinclude\fP causes the rest of the line to be
+interpreted as filename, which is opened and processed before the rest
+of the file is processed.
+
+The only conditions currently supported are:
+.TP
+.B *
+which means any package,
+.TP
+.BI "source '" name '
+which means any package with source \fIname\fP.
+('\fB*\fP', '\fB?\fP' and '\fB[\fP..\fB]\fP' are treated as in shell wildcards).
+.TP
+.B sections '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP
+matches an upload in which each section matches one of the names
+given.
+As upload conditions are checked very early,
+this is the section listed in the .changes file,
+not the one from the override file.
+(But this might change in the future,
+if you have the need for the one or the other behavior, let me know).
+.TP
+.B sections contain '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP
+The same, but not all sections must be from the given set,
+but at least one source or binary package needs to have one of those given.
+.TP
+.B binaries '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP
+matches an upload in which each binary (type deb or udeb)
+matches one of the names given.
+.TP
+.B binaries contain '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP
+again only at least one instead of all is required.
+.TP
+.B architectures '\fIarchitecture\fP'\fR(\fP|'\fIname\fP'\fR)*\fP
+matches an upload in which each package has only architectures
+from the given set.
+\fBsource\fP and \fBall\fP are treated as unique architectures.
+Wildcards are not allowed.
+.TP
+.B architectures contain '\fIarchitecture\fP'\fR(\fP|'\fIarchitecture\fP'\fR)*\fP
+again only at least one instead of all is required.
+.TP
+.B byhand
+matches an upload with at least one byhand file
+(i.e. a file with section \fBbyhand\fP or \fBraw\-\fP\fIsomething\fP).
+.TP
+.B byhand '\fIsection\fP'\fR(\fP|'\fIsection\fP'\fR)*\fP
+matches an upload with at least one byhand file and
+all byhand files having a section listed in the list of given section.
+(i.e. \fBbyhand 'byhand'|'raw\-*'\fP is currently is the same as \fBbyhand\fP).
+.TP
+.BI "distribution '" codename '
+which means any package when it is to be included in \fIcodename\fP.
+As the uploaders file is given by distribution, this is only useful
+to reuse a complex uploaders file for multiple distributions.
+.PP
+Putting \fBnot\fP in front of a condition, inverses it's meaning.
+For example
+.br
+\fBallow not source 'r*' by anybody\fP
+.br
+means anybody may upload packages which source name does not start
+with an 'r'.
+.PP
+Multiple conditions can be connected with \fBand\fP and \fBor\fP,
+with \fBor\fP binding stronger (but both weaker than \fBnot\fP).
+That means
+.br
+\fBallow source 'r*' and source '*xxx' or source '*o' by anybody\fP
+.br
+is equivalent to
+.br
+\fBallow source 'r*xxx' by anybody\fP
+.br
+\fBallow source 'r*o' by anybody\fP
+
+(Other conditions
+will follow once somebody tells me what restrictions are useful.
+Currently planned is only something for architectures).
+.SH "ERROR IGNORING"
+With \fB\-\-ignore\fP on the command line or an \fIignore\fP
+line in the options file, the following type of errors can be
+ignored:
+.TP
+.B brokenold \fR(hopefully never seen)
+If there are errors parsing an installed version of package, do not
+error out, but assume it is older than anything else, has not files
+or no source name.
+.TP
+.B brokensignatures
+If a .changes or .dsc file contains at least one invalid signature
+and no valid signature (not even expired or from an expired or revoked key),
+reprepro assumes the file got corrupted and refuses to use it unless this
+ignore directive is given.
+.TP
+.B brokenversioncmp \fR(hopefully never seen)
+If comparing old and new version fails, assume the new one is newer.
+.TP
+.B dscinbinnmu
+If a .changes file has an explicit Source version that is different the
+to the version header of the file,
+than reprepro assumes it is binary non maintainer upload (NMU).
+In that case, source files are not permitted in .changes files
+processed by
+.B include
+or
+.BR processincoming .
+Adding \fB\-\-ignore=dscinbinnmu\fP allows it for the \fBinclude\fP
+command.
+.TP
+.B emptyfilenamepart \fR(insecure)
+Allow strings to be empty that are used to construct filenames.
+(like versions, architectures, ...)
+.TP
+.B extension
+Allow one to \fBincludedeb\fP files that do not end with \fB.deb\fP,
+to \fBincludedsc\fP files not ending in \fB.dsc\fP and to
+\fBinclude\fP files not ending in \fB.changes\fP.
+.TP
+.B forbiddenchar \fR(insecure)
+Do not insist on Debian policy for package and source names
+and versions.
+Thus allowing all 7-bit characters but slashes (as they would
+break the file storage) and things syntactically active
+(spaces, underscores in filenames in .changes files, opening
+parentheses in source names of binary packages).
+To allow some 8-bit chars additionally, use \fB8bit\fP additionally.
+.TP
+.B 8bit \fR(more insecure)
+Allow 8-bit characters not looking like overlong UTF-8 sequences
+in filenames and things used as parts of filenames.
+Though it hopefully rejects overlong UTF-8 sequences, there might
+be other characters your filesystem confuses with special characters,
+thus creating filenames possibly equivalent to
+\fB/mirror/pool/main/../../../etc/shadow\fP
+(Which should be safe, as you do not run reprepro as root, do you?)
+or simply overwriting your conf/distributions file adding some commands
+in there. So do not use this if you are paranoid, unless you are paranoid
+enough to have checked the code of your libs, kernel and filesystems.
+.TP
+.B ignore \fR(for forward compatibility)
+Ignore unknown ignore types given to \fI\-\-ignore\fP.
+.TP
+.B flatandnonflat \fR(only suppresses a warning)
+Do not warn about a flat and a non-flat distribution from the same
+source with the same name when updating.
+(Hopefully never ever needed.)
+.TP
+.B malformedchunk \fR(I hope you know what you do)
+Do not stop when finding a line not starting with a space but
+no colon(:) in it. These are otherwise rejected as they have no
+defined meaning.
+.TP
+.B missingfield \fR(safe to ignore)
+Ignore missing fields in a .changes file that are only checked but
+not processed.
+Those include: Format, Date, Urgency, Maintainer, Description, Changes
+.TP
+.B missingfile \fR(might be insecure)
+When including a .dsc file from a .changes file,
+try to get files needed but not listed in the .changes file
+(e.g. when someone forgot to specify \-sa to dpkg\-buildpackage)
+from the directory the .changes file is in instead of erroring out.
+(\fB\-\-delete\fP will not work with those files, though.)
+.TP
+.B spaceonlyline \fR(I hope you know what you do)
+Allow lines containing only (but non-zero) spaces. As these
+do not separate chunks as thus will cause reprepro to behave
+unexpected, they cause error messages by default.
+.TP
+.B surprisingarch
+Do not reject a .changes file containing files for a
+architecture not listed in the Architecture-header within it.
+.TP
+.B surprisingbinary
+Do not reject a .changes file containing .deb files containing
+packages whose name is not listed in the "Binary:" header
+of that changes file.
+.TP
+.B undefinedtarget \fR(hope you are not using the wrong db directory)
+Do not stop when the packages.db file contains databases for
+codename/packagetype/component/architectures combinations that are
+not listed in your distributions file.
+
+This allows you to temporarily remove some distribution from the config files,
+without having to remove the packages in it with the \fBclearvanished\fP
+command.
+You might even temporarily remove single architectures or components,
+though that might cause inconsistencies in some situations.
+.TP
+.B undefinedtracking \fR(hope you are not using the wrong db directory)
+Do not stop when the tracking file contains databases for
+distributions that are not listed in your \fBdistributions\fP file.
+
+This allows you to temporarily remove some distribution from the config files,
+without having to remove the packages in it with the \fBclearvanished\fP
+command.
+You might even temporarily disable tracking in some distribution, but that
+is likely to cause inconsistencies in there, if you do not know, what you
+are doing.
+.TP
+.B unknownfield \fR(for forward compatibility)
+Ignore unknown fields in the config files, instead of refusing to run
+then.
+.TP
+.B unusedarch \fR(safe to ignore)
+No longer reject a .changes file containing no files for any of the
+architectures listed in the Architecture-header within it.
+.TP
+.B unusedoption
+Do not complain about command line options not used by the
+specified action (like \fB\-\-architecture\fP).
+.TP
+.B uploaders
+The include command will accept packages that would otherwise been
+rejected by the uploaders file.
+.TP
+.B wrongarchitecture \fR(safe to ignore)
+Do not warn about wrong "Architecture:" lines in downloaded
+Packages files.
+(Note that wrong Architectures are always ignored when getting
+stuff from flat repositories or importing stuff from one architecture
+to another).
+.TP
+.B wrongdistribution \fR(safe to ignore)
+Do not error out if a .changes file is to be placed in a
+distribution not listed in that files' Distributions: header.
+.TP
+.B wrongsourceversion
+Do not reject a .changes file containing .deb files with
+a different opinion on what the version of the source package is.
+.br
+(Note: reprepro only compares literally here, not by meaning.)
+.TP
+.B wrongversion
+Do not reject a .changes file containing .dsc files with
+a different version.
+.br
+(Note: reprepro only compares literally here, not by meaning.)
+.TP
+.B expiredkey \fR(I hope you know what you do)
+Accept signatures with expired keys.
+(Only if the expired key is explicitly requested).
+.TP
+.B expiredsignature \fR(I hope you know what you do)
+Accept expired signatures with expired keys.
+(Only if the key is explicitly requested).
+.TP
+.B revokedkey \fR(I hope you know what you do)
+Accept signatures with revoked keys.
+(Only if the revoked key is explicitly requested).
+.SH GUESSING
+When including a binary or source package without explicitly
+declaring a component with
+.B \-C
+it will take the
+first component with the name of the section, being
+prefix to the section, being suffix to the section
+or having the section as prefix or any. (In this order)
+
+Thus having specified the components:
+"main non\-free contrib non\-US/main non\-US/non\-free non\-US/contrib"
+should map e.g.
+"non\-US" to "non\-US/main" and "contrib/editors" to "contrib",
+while having only "main non\-free and contrib" as components should
+map "non\-US/contrib" to "contrib" and "non\-US" to "main".
+
+.B NOTE:
+Always specify main as the first component, if you want things
+to end up there.
+
+.B NOTE:
+unlike in dak, non\-US and non\-us are different things...
+.SH NOMENCLATURE
+.B Codename
+the primary identifier of a given distribution. This are normally
+things like \fBsarge\fP, \fBetch\fP or \fBsid\fP.
+.TP
+.B basename
+the name of a file without any directory information.
+.TP
+.B byhand
+Changes files can have files with section 'byhand' (Debian) or 'raw\-' (Ubuntu).
+Those files are not packages but other data generated (usually together with
+packages) and then uploaded together with this changes files.
+
+With reprepro those can be stored in the pool next to their packages with
+tracking, put in some log directory when using processincoming, or given to
+an hook script (currently only possible with processincoming).
+.TP
+.B filekey
+the position relative to the outdir. (as found in "Filename:" in Packages.gz)
+.TP
+.B "full filename"
+the position relative to /
+.TP
+.B architecture
+The term like \fBsparc\fP, \fBi386\fP, \fBmips\fP, ... .
+To refer to the source packages, \fBsource\fP
+is sometimes also treated as architecture.
+.TP
+.B component
+Things like \fBmain\fP, \fBnon\-free\fP and \fBcontrib\fP
+(by policy and some other programs also called section, reprepro follows
+the naming scheme of apt here.)
+.TP
+.B section
+Things like \fBbase\fP, \fBinterpreters\fP, \fBoldlibs\fP and \fBnon\-free/math\fP
+(by policy and some other programs also called subsections).
+.TP
+.B md5sum
+The checksum of a file in the format
+"\fI<md5sum of file>\fP \fI<length of file>\fP"
+.SH Some note on updates
+.SS A version is not overwritten with the same version.
+.B reprepro
+will never update a package with a version it already has. This would
+be equivalent to rebuilding the whole database with every single upgrade.
+To force the new same version in, remove it and then update.
+(If files of
+the packages changed without changing their name, make sure the file is
+no longer remembered by reprepro.
+Without \fB\-\-keepunreferencedfiled\fP
+and without errors while deleting it should already be forgotten, otherwise
+a \fBdeleteunreferenced\fP or even some \fB__forget\fP might help.)
+.SS The magic delete rule ("\-").
+A minus as a single word in the
+.B Update:
+line of a distribution marks everything to be deleted. The mark causes later rules
+to get packages even if they have (strict) lower versions. The mark will
+get removed if a later rule sets the package on hold (hold is not yet implemented,
+in case you might wonder) or would get a package with the same version
+(Which it will not, see above). If the mark is still there at the end of the processing,
+the package will get removed.
+.P
+Thus the line "Update: \-
+.I rules
+" will cause all packages to be exactly the
+highest Version found in
+.I rules.
+The line "Update:
+.I near
+\-
+.I rules
+" will do the same, except if it needs to download packages, it might download
+it from
+.I near
+except when too confused. (It will get too confused e.g. when
+.I near
+or
+.I rules
+have multiple versions of the package and the highest in
+.I near
+is not the first one in
+.I rules,
+as it never remember more than one possible spring for a package.
+.P
+Warning: This rule applies to all type/component/architecture triplets
+of a distribution, not only those some other update rule applies to.
+(That means it will delete everything in those!)
+.SH ENVIRONMENT VARIABLES
+Environment variables are always overwritten by command line options,
+but overwrite options set in the \fBoptions\fP file. (Even when the
+options file is obviously parsed after the environment variables as
+the environment may determine the place of the options file).
+.TP
+.B REPREPRO_BASE_DIR
+The directory in this variable is used instead of the current directory,
+if no \fB\-b\fP or \fB\-\-basedir\fP options are supplied.
+.br
+It is also set in all hook scripts called by reprepro
+(relative to the current directory or absolute,
+depending on how reprepro got it).
+.TP
+.B REPREPRO_CONFIG_DIR
+The directory in this variable is used when no \fB\-\-confdir\fP is
+supplied.
+.br
+It is also set in all hook scripts called by reprepro
+(relative to the current directory or absolute,
+depending on how reprepro got it).
+.TP
+.B REPREPRO_OUT_DIR
+This is not used, but only set in hook scripts called by reprepro
+to the directory in which the \fBpool\fP subdirectory resides
+(relative to the current directory or absolute,
+depending on how reprepro got it).
+.TP
+.B REPREPRO_DIST_DIR
+This is not used, but only set in hook scripts called by reprepro
+to the \fBdists\fP directory (relative to the current directory or
+absolute, depending on how reprepro got it).
+.TP
+.B REPREPRO_LOG_DIR
+This is not used, but only set in hook scripts called by reprepro
+to the value setable by \fB\-\-logdir\fP.
+.TP
+.B REPREPRO_CAUSING_COMMAND
+.TP
+.B REPREPRO_CAUSING_FILE
+Those two environment variable are set (or unset) in
+\fBLog:\fP and \fBByHandHooks:\fP scripts and hint what command
+and what file caused the hook to be called (if there is some).
+.TP
+.B REPREPRO_CAUSING_RULE
+This environment variable is set (or unset) in
+\fBLog:\fP scripts and hint what update or pull rule caused
+this change.
+.TP
+.B REPREPRO_FROM
+This environment variable is set (or unset) in
+\fBLog:\fP scripts and denotes what other distribution a
+package is copied from (with pull and copy commands).
+.TP
+.B REPREPRO_FILTER_ARCHITECTURE
+.TP
+.B REPREPRO_FILTER_CODENAME
+.TP
+.B REPREPRO_FILTER_COMPONENT
+.TP
+.B REPREPRO_FILTER_PACKAGETYPE
+.TP
+.B REPREPRO_FILTER_PATTERN
+Set in \fBFilterList:\fP and \fBFilterSrcList:\fP scripts.
+.TP
+.B GNUPGHOME
+Not used by reprepro directly.
+But reprepro uses libgpgme, which calls gpg for signing and verification
+of signatures.
+And your gpg will most likely use the content of this variable
+instead of "~/.gnupg".
+Take a look at
+.BR gpg (1)
+to be sure.
+You can also tell reprepro to set this with the \fB\-\-gnupghome\fP option.
+.TP
+.B GPG_TTY
+When there is a gpg\-agent running that does not have the passphrase
+cached yet, gpg will most likely try to start some pinentry program
+to get it.
+If that is pinentry\-curses, that is likely to fail without this
+variable, because it cannot find a terminal to ask on.
+In this cases you might set this variable to something like
+the value of
+.B $(tty)
+or
+.B $SSH_TTY
+or anything else denoting a usable terminal. (You might also
+want to make sure you actually have a terminal available.
+With ssh you might need the
+.B \-t
+option to get a terminal even when telling gpg to start a specific command).
+
+By default, reprepro will set this variable to what the symbolic link
+.B /proc/self/fd/0
+points to, if stdin is a terminal, unless you told with
+.B \-\-noguessgpgtty
+to not do so.
+.SH BUGS
+Increased verbosity always shows those things one does not want to know.
+(Though this might be inevitable and a corollary to Murphy)
+
+Reprepro uses berkeley db, which was a big mistake.
+The most annoying problem not yet worked around is database corruption
+when the disk runs out of space.
+(Luckily if it happens while downloading packages while updating,
+only the files database is affected, which is easy (though time consuming)
+to rebuild, see \fBrecovery\fP file in the documentation).
+Ideally put the database on another partition to avoid that.
+
+While the source part is mostly considered as the architecture
+.B source
+some parts may still not use this notation.
+.SH "WORK-AROUNDS TO COMMON PROBLEMS"
+.TP
+.B gpgme returned an impossible condition
+With the woody version this normally meant that there was no .gnupg
+directory in $HOME, but it created one and reprepro succeeds when called
+again with the same command.
+Since sarge the problem sometimes shows up, too. But it is no longer
+reproducible and it does not fix itself, neither. Try running
+\fBgpg \-\-verify \fP\fIfile-you-had-problems-with\fP manually as the
+user reprepro is running and with the same $HOME. This alone might
+fix the problem. It should not print any messages except perhaps
+.br
+gpg: no valid OpenPGP data found.
+.br
+gpg: the signature could not be verified.
+.br
+if it was an unsigned file.
+.TP
+.B not including .orig.tar.gz when a .changes file's version does not end in \-0 or \-1
+If dpkg\-buildpackage is run without the \fB\-sa\fP option to build a version with
+a Debian revision not being \-0 or \-1, it does not list the \fB.orig.tar.gz\fP file
+in the \fB.changes\fP file.
+If you want to \fBinclude\fP such a file with reprepro
+when the .orig.tar.gz file does not already exist in the pool, reprepro will report
+an error.
+This can be worked around by:
+.br
+call \fBdpkg\-buildpackage\fP with \fB\-sa\fP (recommended)
+.br
+copy the .orig.tar.gz file to the proper place in the pool before
+.br
+call reprepro with \-\-ignore=missingfile (discouraged)
+.TP
+.B leftover files in the pool directory.
+reprepro is sometimes a bit too timid of deleting stuff. When things
+go wrong and there have been errors it sometimes just leaves everything
+where it is.
+To see what files reprepro remembers to be in your pool directory but
+does not know anything needing them right know, you can use
+.br
+\fBreprepro dumpunreferenced\fP
+.br
+To delete them:
+.br
+\fBreprepro deleteunreferenced\fP
+.SH INTERRUPTING
+Interrupting reprepro has its problems.
+Some things (like speaking with apt methods, database stuff) can cause
+problems when interrupted at the wrong time.
+Then there are design problems of the code making it hard to distinguish
+if the current state is dangerous or non-dangerous to interrupt.
+Thus if reprepro receives a signal normally sent to tell a process to
+terminate itself softly,
+it continues its operation, but does not start any new operations.
+(I.e. it will not tell the apt\-methods any new file to download, it will
+not replace a package in a target, unless it already had started with it,
+it will not delete any files gotten dereferenced, and so on).
+
+\fBIt only catches the first signal of each type. The second signal of a
+given type will terminate reprepro. You will risk database corruption
+and have to remove the lockfile manually.\fP
+
+Also note that even normal interruption leads to code-paths mostly untested
+and thus expose a multitude of bugs including those leading to data corruption.
+Better think a second more before issuing a command than risking the need
+for interruption.
+.SH "REPORTING BUGS"
+Report bugs or wishlist requests to the Debian BTS
+.br
+(e.g. by using \fBreportbug reprepro\fP under Debian)
+.br
+or directly to
+.MTO brlink@debian.org
+.SH COPYRIGHT
+Copyright \(co 2004,2005,2006,2007,2008,2009,2010,2011,2012
+.URL http://www.brlink.eu "Bernhard R. Link"
+.br
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
diff --git a/docs/reprepro.bash_completion b/docs/reprepro.bash_completion
new file mode 100644
index 0000000..062397e
--- /dev/null
+++ b/docs/reprepro.bash_completion
@@ -0,0 +1,742 @@
+_reprepro()
+{
+ local cur prev commands options noargoptions i state cmd ignores hiddencommands commands codenames confdir outdir basedir architectures components importrules snapshots
+
+ confdir=""
+ basedir=""
+ outdir=""
+ distdir=""
+
+ # for parsing configs consider:
+ # - command line arguments take priority over <confdir>/options take priority over environment variables
+ # - all the ways to set confdir may start with '+b/' to denote the basedir to be used.
+ # - basedir might also be set in <confdir>/options (which obviously does not change where this file is)
+
+ function parse_config() {
+ local conffile distfile
+ if [[ -n "$confdir" ]] ; then
+ conffile="$confdir/options"
+ distfile="$confdir/distributions"
+ elif [[ -n "${REPREPRO_CONFIG_DIR:+set}" ]] ; then
+ conffile="$REPREPRO_CONFIG_DIR/options"
+ distfile="$REPREPRO_CONFIG_DIR/distributions"
+ else
+ conffile="+b/conf/options"
+ distfile="+b/conf/distributions"
+ fi
+ confbasedir="${basedir:-${REPREPRO_BASE_DIR:-.}}"
+ if [ x"${conffile#+b/}" != x"${conffile}" ] ; then
+ conffile="$confbasedir/${conffile#+b/}"
+ fi
+ if [ -z "$basedir" ] && [[ -e "$conffile" ]] && grep -q '^basedir ' -- "$conffile" 2>/dev/null ; then
+ confbasedir="$(grep '^basedir ' -- "$conffile" 2>/dev/null | sed -e 's/^basedir *//')"
+ fi
+ if [ -z "$confdir" ] && [[ -e "$conffile" ]] && grep -q '^confdir ' -- "$conffile" 2>/dev/null ; then
+ distfile="$(grep '^confdir ' -- "$conffile" 2>/dev/null | sed -e 's/^confdir *//')/distributions"
+ fi
+ if [ x"${distfile#+b/}" != x"${distfile}" ] ; then
+ distfile="$confbasedir/${distfile#+b/}"
+ fi
+ if [[ -d "$distfile" ]] ; then
+ codenames="$(awk -- '/^[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]: / {$1="";print}' "$distfile"/*.conf)"
+ architectures="$(awk -- '/^[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]: / {$1="";print}' "$distfile"/*.conf)"
+ components="$(awk -- '/^[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {$1="";print}' "$distfile"/*.conf)"
+ elif [[ -e "$distfile" ]] ; then
+ codenames="$(awk -- '/^[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]: / {$1="";print}' "$distfile")"
+ architectures="$(awk -- '/^[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]: / {$1="";print}' "$distfile")"
+ components="$(awk -- '/^[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {$1="";print}' "$distfile")"
+ else
+ codenames="experimental sid whatever-you-defined"
+ architectures="source i386 abacus whatever-you-defined"
+ components="main contrib non-free whatever-you-defined"
+ fi
+ }
+ function parse_config_for_distdir() {
+ local conffile
+ if [[ -n "$confdir" ]] ; then
+ conffile="$confdir/options"
+ elif [[ -n "${REPREPRO_CONFIG_DIR:+set}" ]] ; then
+ conffile="$REPREPRO_CONFIG_DIR/options"
+ else
+ conffile="+b/conf/options"
+ fi
+ if [ x"${conffile#+b/}" != x"${conffile}" ] ; then
+ conffile="${basedir:-${REPREPRO_BASE_DIR:-.}}/${conffile#+b/}"
+ fi
+ if [ -z "$basedir" ] && [[ -e "$conffile" ]] ; then
+ if grep -q '^basedir ' -- "$conffile" 2>/dev/null ; then
+ basedir="$(grep '^basedir ' -- "$conffile" 2>/dev/null | sed -e 's/^basedir *//')"
+ fi
+ fi
+ if [ -z "$outdir" ] && [[ -e "$conffile" ]] ; then
+ if grep -q '^outdir ' -- "$conffile" 2>/dev/null ; then
+ outdir="$(grep '^outdir ' -- "$conffile" 2>/dev/null | sed -e 's/^outdir *//')"
+ fi
+ fi
+ if [ -z "$distdir" ] && [[ -e "$conffile" ]] ; then
+ if grep -q '^distdir ' -- "$conffile" 2>/dev/null ; then
+ distdir="$(grep '^distdir ' -- "$conffile" 2>/dev/null | sed -e 's/^distdir *//')"
+ fi
+ fi
+ if [ -z "$basedir" ] ; then
+ basedir="${REPREPRO_BASE_DIR:-.}"
+ fi
+ if [ -z "$outdir" ] ; then
+ outdir="${REPREPRO_OUT_DIR:-$basedir}"
+ fi
+ if [ x"${outdir#+b/}" != x"${outdir}" ] ; then
+ outdir="$basedir/${outdir#+b/}"
+ fi
+ if [ -z "$distdir" ] ; then
+ distdir="${REPREPRO_DIST_DIR:-$outdir/dists}"
+ fi
+ if [ x"${distdir#+o/}" != x"${distdir}" ] ; then
+ distdir="$outdir/${distdir#+o/}"
+ elif [ x"${distdir#+b/}" != x"${distdir}" ] ; then
+ distdir="$basedir/${distdir#+b/}"
+ fi
+ }
+ function parse_incoming() {
+ local conffile incomingfile
+ if [[ -n "$confdir" ]] ; then
+ conffile="$confdir/options"
+ incomingfile="$confdir/incoming"
+ elif [[ -n "${REPREPRO_CONFIG_DIR:+set}" ]] ; then
+ conffile="$REPREPRO_CONFIG_DIR/options"
+ incomingfile="$REPREPRO_CONFIG_DIR/incoming"
+ else
+ conffile="+b/conf/options"
+ incomingfile="+b/conf/incoming"
+ fi
+ confbasedir="${basedir:-${REPREPRO_BASE_DIR:-.}}"
+ if [ x"${conffile#+b/}" != x"${conffile}" ] ; then
+ conffile="$confbasedir/${conffile#+b/}"
+ fi
+ if [ -z "$basedir" ] && [[ -e "$conffile" ]] && grep -q '^basedir ' -- "$conffile" 2>/dev/null ; then
+ confbasedir="$(grep '^basedir ' -- "$conffile" 2>/dev/null | sed -e 's/^basedir *//')"
+ fi
+ if [ -z "$confdir" ] && [[ -e "$conffile" ]] && grep -q '^confdir ' -- "$conffile" 2>/dev/null ; then
+ incomingfile="$(grep '^confdir ' -- "$conffile" 2>/dev/null | sed -e 's/^confdir //')/incoming"
+ fi
+ if [ x"${incomingfile#+b/}" != x"${incomingfile}" ] ; then
+ incomingfile="$confbasedir/${incomingfile#+b/}"
+ fi
+ if [[ -d "$incomingfile" ]] ; then
+ importrules="$(awk -- '/^[Nn][Aa][Mm][Ee]: / {$1="";print}' "$incomingfile"/*.conf)"
+ elif [[ -e "$incomingfile" ]] ; then
+ importrules="$(awk -- '/^[Nn][Aa][Mm][Ee]: / {$1="";print}' "$incomingfile")"
+ else
+ importrules="rule-name"
+ fi
+ }
+
+ COMPREPLY=()
+
+ ignores='ignore flatandnonflat forbiddenchar 8bit emptyfilenamepart\
+ spaceonlyline malformedchunk unknownfield\
+ wrongdistribution missingfield brokenold\
+ undefinedtracking undefinedtarget unusedoption\
+ brokenversioncmp extension unusedarch surprisingarch\
+ surprisingbinary wrongsourceversion wrongversion dscinbinnmu\
+ brokensignatures uploaders missingfile longkeyid\
+ expiredkey expiredsignature revokedkey oldfile wrongarchitecture'
+ noargoptions='--delete --nodelete --help -h --verbose -v\
+ --nothingiserror --nolistsdownload --keepunreferencedfiles --keepunusednewfiles\
+ --keepdirectories --keeptemporaries --keepuneededlists\
+ --ask-passphrase --nonothingiserror --listsdownload\
+ --nokeepunreferencedfiles --nokeepdirectories --nokeeptemporaries\
+ --nokeepuneededlists --nokeepunusednewfiles\
+ --noask-passphrase --skipold --noskipold --show-percent \
+ --version --guessgpgtty --noguessgpgtty --verbosedb --silent -s --fast'
+ options='-b -i --basedir --outdir --ignore --unignore --methoddir --distdir --dbdir\
+ --listdir --confdir --logdir --morguedir \
+ --section -S --priority -P --component -C\
+ --architecture -A --type -T --export --waitforlock \
+ --spacecheck --safetymargin --dbsafetymargin\
+ --gunzip --bunzip2 --unlzma --unxz --lunzip --gnupghome --list-format --list-skip --list-max\
+ --outhook --endhook'
+
+ i=1
+ prev=""
+ cmd="XYZnoneyetXYZ"
+ while [[ $i -lt $COMP_CWORD ]] ; do
+ cur=${COMP_WORDS[i]}
+ prev=""
+ case "$cur" in
+ --basedir=*)
+ basedir="${cur#--basedir=}"
+ i=$((i+1))
+ ;;
+ --outdir=*)
+ outdir="${cur#--basedir=}"
+ i=$((i+1))
+ ;;
+ --distdir=*)
+ distdir="${cur#--basedir=}"
+ i=$((i+1))
+ ;;
+ --confdir=*)
+ confdir="${cur#--confdir=}"
+ i=$((i+1))
+ ;;
+ --*=*)
+ i=$((i+1))
+ ;;
+ -b|--basedir)
+ prev="$cur"
+ basedir="${COMP_WORDS[i+1]}"
+ i=$((i+2))
+ ;;
+ --outdir)
+ prev="$cur"
+ outdir="${COMP_WORDS[i+1]}"
+ i=$((i+2))
+ ;;
+ --distdir)
+ prev="$cur"
+ distdir="${COMP_WORDS[i+1]}"
+ i=$((i+2))
+ ;;
+ --confdir)
+ prev="$cur"
+ confdir="${COMP_WORDS[i+1]}"
+ i=$((i+2))
+ ;;
+ -i|--ignore|--unignore|--methoddir|--distdir|--dbdir|--listdir|--section|-S|--priority|-P|--component|-C|--architecture|-A|--type|-T|--export|--waitforlock|--spacecheck|--checkspace|--safetymargin|--dbsafetymargin|--logdir|--gunzip|--bunzip2|--unlzma|--unxz|--lunzip|--gnupghome|--morguedir)
+
+ prev="$cur"
+ i=$((i+2))
+ ;;
+ --*|-*)
+ i=$((i+1))
+ ;;
+ *)
+ cmd="$cur"
+ i=$((i+1))
+ break
+ ;;
+ esac
+ done
+ cur=${COMP_WORDS[COMP_CWORD]}
+ if [[ $i -gt $COMP_CWORD && -n "$prev" ]]; then
+ case "$prev" in
+ -b|--basedir|--outdir|--methoddir|--distdir|--dbdir|--listdir|--confdir)
+ COMPREPLY=( $( compgen -d -- $cur ) )
+
+ return 0
+ ;;
+ -T|--type)
+ COMPREPLY=( $( compgen -W "dsc deb udeb" -- $cur ) )
+ return 0
+ ;;
+ -i|--ignore|--unignore)
+ COMPREPLY=( $( compgen -W "$ignores" -- $cur ) )
+ return 0
+ ;;
+ -P|--priority)
+ COMPREPLY=( $( compgen -W "required important standard optional extra" -- $cur ) )
+ return 0
+ ;;
+ -S|--section)
+ COMPREPLY=( $( compgen -W "admin base comm contrib devel doc editors electronics embedded games gnome graphics hamradio interpreters kde libs libdevel mail math misc net news non-free oldlibs otherosfs perl python science shells sound tex text utils web x11 contrib/admin contrib/base contrib/comm contrib/contrib contrib/devel contrib/doc contrib/editors contrib/electronics contrib/embedded contrib/games contrib/gnome contrib/graphics contrib/hamradio contrib/interpreters contrib/kde contrib/libs contrib/libdevel contrib/mail contrib/math contrib/misc contrib/net contrib/news contrib/non-free contrib/oldlibs contrib/otherosfs contrib/perl contrib/python contrib/science contrib/shells contrib/sound contrib/tex contrib/text contrib/utils contrib/web contrib/x11 non-free/admin non-free/base non-free/comm non-free/contrib non-free/devel non-free/doc non-free/editors non-free/electronics non-free/embedded non-free/games non-free/gnome non-free/graphics non-free/hamradio non-free/interpreters non-free/kde non-free/libs non-free/libdevel non-free/mail non-free/math non-free/misc non-free/net non-free/news non-free/non-free non-free/oldlibs non-free/otherosfs non-free/perl non-free/python non-free/science non-free/shells non-free/sound non-free/tex non-free/text non-free/utils non-free/web non-free/x11" -- $cur ) )
+ return 0
+ ;;
+ -A|--architecture)
+ parse_config
+ COMPREPLY=( $( compgen -W "$architectures" -- $cur ) )
+ return 0
+ ;;
+ -C|--component)
+ parse_config
+ COMPREPLY=( $( compgen -W "$components" -- $cur ) )
+ return 0
+ ;;
+ --export)
+ COMPREPLY=( $( compgen -W "silent-never never changed lookedat force" -- $cur ) )
+ return 0
+ ;;
+ --waitforlock)
+ COMPREPLY=( $( compgen -W "0 60 3600 86400" -- $cur ) )
+ return 0
+ ;;
+ --spacecheck)
+ COMPREPLY=( $( compgen -W "none full" -- $cur ) )
+ return 0
+ ;;
+ --safetymargin)
+ COMPREPLY=( $( compgen -W "0 1048576" -- $cur ) )
+ return 0
+ ;;
+ --dbsafetymargin)
+ COMPREPLY=( $( compgen -W "0 104857600" -- $cur ) )
+ return 0
+ ;;
+ esac
+ fi
+
+ if [[ "XYZnoneyetXYZ" = "$cmd" ]] ; then
+ commands='build-needing\
+ check\
+ checkpool\
+ checkpull\
+ checkupdate\
+ cleanlists\
+ clearvanished\
+ collectnewchecksums\
+ copy\
+ copyfilter\
+ copymatched\
+ copysrc\
+ createsymlinks\
+ deleteunreferenced\
+ deleteifunreferenced\
+ dumpreferences\
+ dumptracks\
+ dumppull\
+ dumpunreferenced\
+ dumpupdate\
+ export\
+ forcerepairdescriptions\
+ flood\
+ generatefilelists\
+ gensnapshot\
+ unreferencesnapshot\
+ include\
+ includedeb\
+ includedsc\
+ includeudeb\
+ list\
+ listfilter\
+ listmatched\
+ ls\
+ lsbycomponent\
+ move\
+ movefilter\
+ movematched\
+ movesrc\
+ predelete\
+ processincoming\
+ pull\
+ remove\
+ removealltracks\
+ removefilter\
+ removematched\
+ removesrc\
+ removesrcs\
+ removetrack\
+ reoverride\
+ repairdescriptions\
+ reportcruft\
+ rereference\
+ rerunnotifiers\
+ restore\
+ restorefilter\
+ restorematched\
+ restoresrc\
+ retrack\
+ sourcemissing\
+ tidytracks\
+ translatefilelists\
+ translatelegacychecksums\
+ unusedsources\
+ update'
+ hiddencommands='__d\
+ __dumpuncompressors
+ __extractcontrol\
+ __extractfilelist\
+ __extractsourcesection\
+ __uncompress\
+ _addchecksums\
+ _addpackage\
+ _addreference\
+ _addreferences\
+ _detect\
+ _dumpcontents\
+ _fakeemptyfilelist\
+ _forget\
+ _listchecksums\
+ _listcodenames\
+ _listconfidentifiers\
+ _listdbidentifiers\
+ _listmd5sums\
+ _removereference\
+ _removereferences\
+ _versioncompare'
+
+ if [[ "$cur" == -* ]]; then
+ case "$cur" in
+ --ignore=*)
+ COMPREPLY=( $( compgen -W "$ignores" -- ${cur#--ignore=} ) )
+ ;;
+ --unignore=*)
+ COMPREPLY=( $( compgen -W "$ignores" -- ${cur#--unignore=} ) )
+ ;;
+ --component=*)
+ parse_config
+ COMPREPLY=( $( compgen -W "$components" -- {cur#--component=} ) )
+ ;;
+ --architectures=*)
+ parse_config
+ COMPREPLY=( $( compgen -W "$architectures" -- {cur#--architectures=} ) )
+ ;;
+
+ *)
+ COMPREPLY=( $( compgen -W "$options $noargoptions" -- $cur ) )
+ ;;
+ esac
+ elif [[ "$cur" == _* ]]; then
+ COMPREPLY=( $( compgen -W "$hiddencommands" -- $cur ) )
+ else
+ COMPREPLY=( $( compgen -W "$commands" -- $cur ) )
+ fi
+ return 0
+ fi
+
+ case "$cmd" in
+ remove|list|listfilter|removefilter|removetrack|listmatched|removematched|removesrc|removesrcs)
+ # first argument is the codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # these later could also look for stuff, but
+ # that might become a bit slow
+ ;;
+ export|update|checkupdate|pull|checkpull|rereference|retrack|removealltracks|tidytracks|dumptracks|check|repairdescriptions|forcerepairdescriptions|reoverride|rerunnotifiers|dumppull|dumpupdate|unusedsources|sourcemissing|reportcruft)
+ # all arguments are codenames
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ ;;
+
+ processincoming)
+ # arguments are rule-name from conf/incoming
+ parse_config
+ parse_incoming
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ COMPREPLY=( $( compgen -W "$importrules" -- $cur ) )
+ return 0
+ fi
+ ;;
+
+ collectnewchecksums|cleanlists|_listcodenames)
+ return 0
+ ;;
+
+ checkpool)
+ # first argument can be fast
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ COMPREPLY=( $( compgen -W "fast" -- $cur ) )
+ return 0
+ fi
+ return 0
+ ;;
+ flood)
+ # first argument is the codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # then an architecture might follow
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$architectures" -- $cur ) )
+ return 0
+ fi
+ # then nothing else
+ return 0
+ ;;
+ build-needing)
+ # first argument is the codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # then an architecture
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$architectures" -- $cur ) )
+ return 0
+ fi
+ # then a glob
+ if [[ $(( $i + 2 )) -eq $COMP_CWORD ]] ; then
+ COMPREPLY=( $( compgen -W "$cur'\*'" -- $cur ) )
+ return 0
+ fi
+ return 0
+ ;;
+ __uncompress)
+ # first argument is method
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ COMPREPLY=( $( compgen -W ".gz .bz2 .lzma .xz .lz" -- $cur ) )
+ return 0
+ fi
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ COMPREPLY=( $( compgen -f -- $cur ) )
+ return 0
+ fi
+ if [[ $(( $i + 2 )) -eq $COMP_CWORD ]] ; then
+ COMPREPLY=( $( compgen -f -- $cur ) )
+ return 0
+ fi
+ return 0
+ ;;
+ __extractsourcesection)
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ _filedir dsc
+ fi
+ return 0
+ ;;
+ includedeb)
+ # first argument is the codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # then one .deb file follows
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ _filedir deb
+ fi
+ return 0
+ ;;
+ includedsc)
+ # first argument is the codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # then one .dsc file follows
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ _filedir dsc
+ fi
+ return 0
+ ;;
+ include)
+ # first argument is the codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # then one .changes file follows
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ _filedir changes
+ fi
+ return 0
+ ;;
+ gensnapshot|unreferencesnapshot)
+ # first argument is a codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # then the name of a snapshot, add a suggestion
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ COMPREPLY=( $( compgen -W "$(date +%Y/%m/%d)" -- $cur ) )
+ return 0
+ fi
+ return 0;
+ ;;
+ copy|copysrc|copyfilter|copymatched|move|movesrc|movefilter|movematched)
+ # first argument is a codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # second argument is a codename
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # here we could look for package names existing in
+ # that distribution, but that would be slow...
+ ;;
+ restore|restoresrc|restorefilter|restorematched)
+ # first argument is a codename
+ if [[ $i -eq $COMP_CWORD ]] ; then
+ parse_config
+ COMPREPLY=( $( compgen -W "$codenames" -- $cur ) )
+ return 0
+ fi
+ # second argument is snapshot of that name
+ if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then
+ parse_config_for_distdir
+ snapshots="$( ls "$distdir/${COMP_WORDS[i]}/snapshots" )"
+ COMPREPLY=( $( compgen -W "$snapshots" -- $cur ) )
+ return 0
+ fi
+ # here we could look for package names existing in
+ # that distribution, but that would be slow...
+ ;;
+ __dumpuncompressors|translatelageacychecksums|deleteunreferenced)
+ # no arguments
+ return 0
+ ;;
+ deleteifunreferenced)
+ # less useful than the output of dumpunreferenced,
+ # but this way it should be massively faster:
+ parse_config_for_distdir
+ COMPREPLY=( $(cd "$outdir" && compgen -o filenames -f -- $cur) )
+ return 0
+ ;;
+ esac
+ COMPREPLY=( $( compgen -f -- $cur ) )
+ return 0
+}
+# This -o filename has its problems when there are directories named like
+# commands in you current directory. But it makes adding filenames so much
+# easier. I wished I knew a way to only active it for those parts that are
+# filenames.
+complete -o filenames -F _reprepro reprepro
+
+_changestool()
+{
+ local cur prev commands options noargoptions i j cmd ignores wascreate changesfilename
+
+ COMPREPLY=()
+
+ ignores=' notyetimplemented '
+ noargoptions='--help --create'
+ options='--ignore --searchpath'
+ wascreate=no
+
+ i=1
+ prev=""
+ while [[ $i -lt $COMP_CWORD ]] ; do
+ cur=${COMP_WORDS[i]}
+ prev=""
+ case "$cur" in
+ --*=*)
+ i=$((i+1))
+ ;;
+ -i|--ignore|--unignore|-s|--searchpath)
+ prev="$cur"
+ i=$((i+2))
+ ;;
+ --create|-c)
+ i=$((i+1))
+ wascreate=yes
+ ;;
+ --*|-*)
+ i=$((i+1))
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+ cur=${COMP_WORDS[COMP_CWORD]}
+ if [[ $i -gt $COMP_CWORD && -n "$prev" ]]; then
+ case "$prev" in
+ -i|--ignore|--unignore)
+ COMPREPLY=( $( compgen -W "$ignores" -- $cur ) )
+ return 0
+ ;;
+ -s|--searchpath)
+ COMPREPLY=( $( compgen -d -- $cur ) )
+ return 0
+ ;;
+ esac
+ fi
+
+ if [[ $i -ge $COMP_CWORD ]] ; then
+ # No changes filename yet specified:
+ commands='addrawfile adddsc adddeb add includeallsources setdistribution updatechecksums verify'
+
+ if [[ "$cur" == -* ]]; then
+ case "$cur" in
+ *)
+ COMPREPLY=( $( compgen -W "$options $noargoptions" -- $cur ) )
+ ;;
+ esac
+ return 0
+ fi
+ if [ "$wascreate" = "yes" ] ; then
+ _filedir
+ else
+ _filedir changes
+ fi
+ return 0
+ fi
+ changesfilename=${COMP_WORDS[i]}
+ i=$((i+1))
+ if [[ $i -ge $COMP_CWORD ]] ; then
+ # No command yet specified:
+ commands='addrawfile adddsc adddeb add includeallsources setdistribution updatechecksums verify'
+ # todo: restrict to add commands when --create and file not yet existing?
+ COMPREPLY=( $( compgen -W "$commands" -- $cur ) )
+ return 0
+ fi
+ cmd=${COMP_WORDS[i]}
+
+ case "$cmd" in
+# with searchpath it should also list the files available there,
+# but I know no easy way to get that done...
+ addrawfile)
+ _filedir
+ return 0
+ ;;
+ adddsc)
+ _filedir dsc
+ return 0
+ ;;
+ adddeb)
+ _filedir deb
+ return 0
+ ;;
+ adddeb)
+ _filedir
+ return 0
+ ;;
+ includeallsources)
+ prev="$(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[a-zA-Z/0-9.:-]\+ \+[a-zA-Z/0-9.:-]\+ \+[^ ]\+\.dsc$' -- "$changesfilename" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+[^ ]\+ \+[^ ]\+ \+//')"
+ j=0
+ options=()
+ for i in $prev ; do
+ if [ -f "$i" ] ; then
+ options=(${options[@]:-} $(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[^ ]\+$' -- "$i" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+//') )
+ elif [ -f "$(dirname $changesfilename)/$i" ] ; then
+ options=(${options[@]:-} $(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[^ ]\+$' -- "$(dirname $changesfilename)/$i" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+//') )
+ else
+ cmd="missing"
+ fi
+ done
+ COMPREPLY=( $( compgen -W "${options[@]}" -- $cur ) )
+ # if some .dsc cannot be found or read, offer everything additionally
+ if [ "$cmd" = "missing" ] ; then
+ _filedir
+ fi
+ return 0
+ ;;
+ setdistribution)
+ COMPREPLY=( $( compgen -W "unstable testing stable sarge etch lenny sid backports local" -- $cur ) )
+ return 0
+ ;;
+ updatechecksums)
+ options="$(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[a-zA-Z/0-9.:-]\+ \+[a-zA-Z/0-9.:-]\+ \+[^ ]\+$' -- "$changesfilename" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+[^ ]\+ \+[^ ]\+ \+//')"
+ if [ -n "$options" ] ; then
+ COMPREPLY=( $( compgen -W "$options" -- $cur ) )
+ else
+ _filedir
+ fi
+ return 0
+ ;;
+ verify)
+ return 0
+ ;;
+ esac
+ COMPREPLY=( $( compgen -f -- $cur ) )
+ return 0
+}
+# same problem as above with -o filenames,
+# but I guess still better than without...
+complete -o filenames -F _changestool changestool
+
diff --git a/docs/reprepro.zsh_completion b/docs/reprepro.zsh_completion
new file mode 100644
index 0000000..44a132b
--- /dev/null
+++ b/docs/reprepro.zsh_completion
@@ -0,0 +1,554 @@
+#compdef reprepro
+
+# This is a zsh completion script for reprepro.
+# To make use of it make sure it is stored as _reprepro in your
+# zsh's fpath (like /usr/local/share/zsh/site-functions/).
+#
+# to install as user:
+#
+# mkdir ~/.zshfiles
+# cp reprepro.zsh_completion ~/.zshfiles/_reprepro
+# echo 'fpath=(~/.zshfiles $fpath)' >> ~/.zshrc
+# echo 'autoload -U ~/.zshfiles*(:t)' >> ~/.zshrc
+#
+# make sure compinit is called after those lines in .zshrc
+
+local context state line confdir distfile incomingfile incomingdir outdir basedir confdirset basedirset
+typeset -A opt_args
+local -a codenames architectures list commands hiddencommands
+
+function _reprepro_calcbasedir ()
+{
+ if [[ -n "$opt_args[-b]" ]]; then
+ basedir=${opt_args[-b]}
+ basedirset=true
+ elif [[ -n "$opt_args[--basedir]" ]]; then
+ basedir=${opt_args[--basedir]}
+ basedirset=true
+ elif [[ -n "$REPREPRO_BASE_DIR" ]]; then
+ basedir=${REPREPRO_BASE_DIR}
+ basedirset=true
+ else
+ basedir=$PWD
+ basedirset=false
+ fi
+ if [[ -n "$opt_args[--confdir]" ]]; then
+ confdir=${opt_args[--confdir]}
+ elif [[ -n "$REPREPRO_CONFIG_DIR" ]]; then
+ confdir=${REPREPRO_CONFIG_DIR}
+ else
+ confdir=$basedir/conf
+ fi
+ if [[ -e "$confdir/options" ]] ; then
+ if [ "$basedirset" != "true" ] && grep -q '^basedir ' -- "$confdir/options" 2>/dev/null ; then
+ basedir="$(grep '^basedir ' -- "$confdir/options" 2>/dev/null | sed -e 's/^basedir *//')"
+ fi
+ fi
+}
+function _reprepro_filekeys ()
+{
+ _reprepro_calcbasedir
+ if [[ -n "$opt_args[--outdir]" ]]; then
+ outdir=${opt_args[--outdir]}
+ else
+ outdir=$basedir
+ fi
+ list=( $outdir )
+ _files -W list
+}
+
+function _reprepro_calcconfdir ()
+{
+ if [[ -n "$opt_args[--confdir]" ]]; then
+ confdir=${opt_args[--confdir]}
+ confdirset=direct
+ elif [[ -n "$REPREPRO_CONFIG_DIR" ]]; then
+ confdir=${REPREPRO_CONFIG_DIR}
+ confdirset=direct
+ elif [[ -n "$opt_args[-b]" ]]; then
+ confdir=${opt_args[-b]}/conf
+ confdirset=basedir
+ basedirset=true
+ elif [[ -n "$opt_args[--basedir]" ]]; then
+ confdir=${opt_args[--basedir]}/conf
+ confdirset=basedir
+ basedirset=true
+ elif [[ -n "$REPREPRO_BASE_DIR" ]]; then
+ confdir=${REPREPRO_BASE_DIR}/conf
+ confdirset=basedir
+ basedirset=true
+ else
+ confdir=$PWD/conf
+ confdirset=default
+ basedirset=false
+ fi
+ if [ "$confdirset" != "direct" ] && [[ -e "$confdir/options" ]] ; then
+ if grep -q '^confdir ' -- "$confdir/options" 2>/dev/null ; then
+ confdir="$(grep '^confdir ' -- "$confdir/options" 2>/dev/null | sed -e 's/^confdir *//')"
+ elif [ "$basedirset" = "false" ] \
+ && grep -q '^basedir ' -- "$confdir/options" 2>/dev/null ; then
+ confdir="$(grep '^basedir ' -- "$confdir/options" 2>/dev/null | sed -e 's/^basedir *//')/conf"
+ fi
+ fi
+}
+
+function _reprepro_finddistributions ()
+{
+ _reprepro_calcconfdir
+ distfile="$confdir"/distributions
+ test -e "$distfile"
+}
+
+function _reprepro_findincoming ()
+{
+ _reprepro_calcconfdir
+ incomingfile="$confdir"/incoming
+ test -e "$incomingfile"
+}
+
+function _reprepro_grepdistfile ()
+{
+ _reprepro_finddistributions &&
+ if test -d "$distfile" ; then
+ sed -n -e 's#^'"$1"': \(.*\)#\1#p' "$distfile"/*.conf
+ else
+ sed -n -e 's#^'"$1"': \(.*\)#\1#p' "$distfile"
+ fi
+}
+
+function _reprepro_architectures ()
+{
+ architectures=($(_reprepro_grepdistfile '[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]')) \
+ || architectures=(i386 m68k sparc alpha powerpc arm mips mipsel hppa ia64 s390 amd64 ppc64 sh armeb m32r hurd-i386 netbsd-i386 netbsd-alpha kfreebsd-gnu)
+ _wanted -V 'architectures' expl 'architecture' compadd -a architectures
+}
+
+function _reprepro_components ()
+{
+ components=($(_reprepro_grepdistfile '[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]')) \
+ || components=(main contrib non-free bad)
+ _wanted -V 'components' expl 'component' compadd -a components
+}
+function _reprepro_codenames () {
+ codenames=($(_reprepro_grepdistfile '[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]')) \
+ || codenames=(sid lenny etch sarge unstable testing stable local)
+ _wanted -V 'codenames' expl 'codename' compadd -a codenames
+}
+function _reprepro_identifiers () {
+ _reprepro_finddistributions \
+ && list=($(if test -d "$distfile" ; then set -- "$distfile"/*.conf ;
+ else set -- "$distfile" ; fi && awk '
+ /^$/ {for(a=2;a<=acount;a++){
+ for(c=2;c<=ccount;c++){
+ print codename "|" components[c] "|" architectures[a]
+ }
+ if( architectures[a] != "source" ) {
+ for(c=2;c<=uccount;c++){
+ print "u|" codename "|" ucomponents[c] "|" architectures[a]
+ }
+ }
+ }; acount=0;ccount=0;ucount=0}
+ /^[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {ccount = split($0,components); next}
+ /^[Uu][Dd][Ee][Bb][Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {uccount = split($0,ucomponents); next}
+ /^[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]: / {acount = split($0,architectures); next}
+ /^[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]: / {codename = $2; next}
+ END {for(a=2;a<=acount;a++){
+ for(c=2;c<=ccount;c++){
+ print codename "|" components[c] "|" architectures[a]
+ }
+ if( architectures[a] != "source" ) {
+ for(c=2;c<=uccount;c++){
+ print "u|" codename "|" ucomponents[c] "|" architectures[a]
+ }
+ }
+ }; acount=0;ccount=0;ucount=0}
+ {next}
+ ' "$@" )) \
+ || list=(identifier)
+ _wanted -V 'identifiers' expl 'identifier' compadd -a list
+}
+function _reprepro_incomings () {
+ _reprepro_findincoming \
+ && list=($(if test -d "$incomingfile" ; then set -- "$incomingfile"/*.conf ; else set -- "$incomingfile" ; fi && awk '/^[Nn][Aa][Mm][Ee]: / {print $2}' "$@")) \
+ || list=(rule-name)
+ _wanted -V 'rule names' expl 'rule name' compadd -a list
+}
+function _reprepro_incomingdir () {
+ local rulename=$1
+ shift
+ _reprepro_findincoming \
+ && incomingdir=($(if test -d "$incomingfile" ; then set -- "$incomingfile"/*.conf ; else set -- "$incomingfile" ; fi && awk '
+ /^[Ii][Nn][Cc][Oo][Mm][Ii][Nn][Gg][Dd][Ii][Rr]: / {dir=$2; next}
+ /^[Nn][Aa][Mm][Ee]: / {name=$2; next}
+ /^$/ { if( name="'"$rulename"'" ) { print dir } ; next }
+ END { if( name="'"$rulename"'" ) { print dir }}
+ {next}
+ ' "$@"))
+ # needs to be an array, as it might not be absolute...
+ list=( $incomingdir )
+}
+function _reprepro_package_names () {
+#todo get package names?...
+ _wanted -V 'package names' expl 'package name' compadd name
+}
+function _reprepro_source_package_names () {
+#todo get package names?...
+ _wanted -V 'source package names' expl 'source package name' compadd name
+}
+
+commands=(
+ build-needing:"list packages likely needing a build"
+ check:"check if all references are correct"
+ checkpool:"check if all files are still there and correct"
+ checkpull:"check what would be pulled"
+ checkupdate:"check what would be updated"
+ cleanlists:"clean unneeded downloaded list files"
+ clearvanished:"remove empty databases"
+ collectnewchecksums:"calculate missing file hashes"
+ copy:"copy a package from one distribution to another"
+ copyfilter:"copy packages from one distribution to another"
+ copymatched:"copy packages from one distribution to another"
+ copysrc:"copy packages belonging to a specific source from one distribution to another"
+ createsymlinks:"create suite symlinks"
+ deleteunreferenced:"delete files without reference"
+ dumpreferences:"dump reference information"
+ dumppull:"dump what would be pulled"
+ dumptracks:"dump tracking information"
+ dumpupdate:"dump what would be updated"
+ dumpunreferenced:"dump files without reference (i.e. unneded)"
+ export:"export index files"
+ forcerepairdescriptions:"forcefully readd lost long descriptions from .deb file"
+ flood:"copy architecture all packages within a distribution"
+ generatefilelists:"pre-prepare filelist caches for all binary packages"
+ gensnapshot:"generate a snapshot"
+ includedeb:"include a .deb file"
+ includedsc:"include a .dsc file"
+ include:"include a .changes file"
+ includeudeb:"include a .udeb file"
+ listfilter:"list packages matching filter"
+ listmatched:"list packages matching filter"
+ list:"list packages"
+ ls:"list versions of package"
+ lsbycomponent:"list versions of package (grouped by component)"
+ predelete:"delete what would be removed or superseded by an update"
+ processincoming:"process files from an incoming directory"
+ pull:"update from another local distribtuion"
+ removealltracks:"remove tracking information"
+ remove:"remove packages"
+ removefilter:"remove packages matching a formula"
+ removematched:"remove packages matching a glob"
+ removesrc:"remove packages belonging to a source package"
+ removesrcs:"remove packages belonging to names source packages"
+ removetrack:"remove a single tracking data"
+ reoverride:"apply override information to already existing packages"
+ repairdescriptions:"readd lost long descriptions from .deb file"
+ reportcruft:"report source packages without binaries and vice versa"
+ rereference:"recreate references"
+ rerunnotifiers:"call notificators as if all packages were just included"
+ restore:"restore a package from a distribution's snapshot"
+ restorefilter:"restore packages matching a filter from a snapshot"
+ restorematched:"restore packages matching a glob from a snapshot"
+ restoresrc:"restore packages belonging to a specific source from a snapshot"
+ retrack:"refresh tracking information"
+ sourcemissing:"list binary packages with no source package"
+ tidytracks:"look for files referened by tracks but no longer needed"
+ translatefilelists:"translate pre-3.0.0 contents.cache.db into new format"
+ translatelegacychecksums:"get rid of obsolete files.db"
+ unreferencesnapshot:"no longer mark files used by an snapshot"
+ unusedsources:"list source packages with no binary packages"
+ update:"update from external source"
+ )
+hiddencommands=(
+ __dumpuncompressors:"list what external uncompressors are available"
+ __extractcontrol:"extract the control file from a .deb file"
+ __extractfilelist:"extract the filelist from a .deb file"
+ __extractsourcesection:"extract source and priority from a .dsc"
+ __uncompress:"uncompress a file"
+ _addchecksums:"add checksums to database"
+ _addmd5sums:"add checksums to database"
+ _addreference:"mark a filekey needed by an identifier"
+ _addreferences:"mark multiple filekeys needed by an identifier"
+ _detect:"look if the file belonging to a filekey exists and add to the database."
+ _dumpcontents:"output contents of a part of the repository"
+ _fakeemptyfilelist:"create an empty fake filelist cache item for a filekey"
+ _forget:"forget a file specified by filekey."
+ _listchecksums:"print a list of filekeys and their checksums"
+ _listcodenames:"list configured codenames"
+ _listconfidentifiers:"list parts of the repository in the configuration"
+ _listdbidentifiers:"list parts of the repository in the database"
+ _listmd5sums:"print a list of filekeys and their md5 hashes"
+ _removereference:"manuall remove a reference"
+ _removereferences:"remove all references by an identifier"
+ )
+
+_arguments \
+ '*'{-v,-V,--verbose}'[be more verbose]' \
+ '*--silent[be less verbose]' \
+ '*--delete[Delete files after inclusion]' \
+ '(-b --basedir)'{-b,--basedir}'[Base drectory]:basedir:_files -/' \
+ '--outdir[Directory where pool and dist are in]:out dir:_files -/' \
+ '--confdir[Directory where config files are]:config dir:_files -/' \
+ '--distdir[Directory where index files will be exported to]:dist dir:_files -/' \
+ '--logdir[Directory where log files will be generated]:log dir:_files -/' \
+ '--morguedir[Directory where files removed from the pool are stored]:morgue dir:_files -/' \
+ '--dbdir[Directory where the database is stored]:database dir:_files -/' \
+ '--listdir[Directory where downloaded index files will be stored]:list dir:_files -/' \
+ '--methoddir[Directory to search apt methods in]:method dir:_files -/' \
+ '(-C --component)'{-C,--component}'[Override component]:component:{_reprepro_components}' \
+ '(-A --architecture)'{-A,--architecture}'[Limit to a specific architecture]:architecture:{_reprepro_architectures}' \
+ '(-T --type)'{-T,--type}'[Limit to a specific type]:file type:(dsc deb udeb)' \
+ '(-S --section)'{-S,--section}'[Override section]:section:(admin base comm contrib devel doc editors electronics embedded games gnome graphics hamradio interpreters kde libs libdevel mail math misc net news non-free oldlibs otherosfs perl python science shells sound tex text utils web x11 contrib/admin contrib/base contrib/comm contrib/contrib contrib/devel contrib/doc contrib/editors contrib/electronics contrib/embedded contrib/games contrib/gnome contrib/graphics contrib/hamradio contrib/interpreters contrib/kde contrib/libs contrib/libdevel contrib/mail contrib/math contrib/misc contrib/net contrib/news contrib/non-free contrib/oldlibs contrib/otherosfs contrib/perl contrib/python contrib/science contrib/shells contrib/sound contrib/tex contrib/text contrib/utils contrib/web contrib/x11 non-free/admin non-free/base non-free/comm non-free/contrib non-free/devel non-free/doc non-free/editors non-free/electronics non-free/embedded non-free/games non-free/gnome non-free/graphics non-free/hamradio non-free/interpreters non-free/kde non-free/libs non-free/libdevel non-free/mail non-free/math non-free/misc non-free/net non-free/news non-free/non-free non-free/oldlibs non-free/otherosfs non-free/perl non-free/python non-free/science non-free/shells non-free/sound non-free/tex non-free/text non-free/utils non-free/web non-free/x11)' \
+ '(-P --priority)'{-P,--priority}'[Override priority]:priority:(required important standard optional extra)' \
+ '--export=[]:when:(silent-never never changed lookedat force)' \
+ '*--ignore=[Do ignore errors of some type]:error type:((\
+ ignore\:"ignore unknown ignore tags"\
+ flatandnonflat\:"ignore warnings about flat and non-flat distribution"\
+ forbiddenchar\:"allow more 7bit characters for names and versions"\
+ 8bit\:"allow 8 bit characters"\
+ emptyfilenamepart\:"allow strings used to construct filenames to be empty"\
+ spaceonlyline\:"do not warn about lines containing only spaces"\
+ malformedchunk\:"ignore lines without colons"\
+ unknownfield\:"ignore unknown fields"\
+ wrongdistribution\:"put .changes files in distributed they were not made for"\
+ wrongarchitecture\:"do not warn about wrong Architecture fields in downloaded Packages files"\
+ missingfield\:"allow missing fields"\
+ brokenold\:"ignore broken packages in database"\
+ brokenversioncmp\:"ignore versions not parseable"\
+ extension\:"ignore unexpected suffixes of files"\
+ unusedarch\:"allow changes files to list architectures not used"\
+ unusedoption\:"ignore command line options not used by an action"\
+ undefinedtarget\:"allow unspecified package databases"\
+ undefinedtracking\:"allow unspecified tracking databases"\
+ surprisingarch\:"do not protest when a changes file does not list a architecture it has files for"\
+ surprisingbinary\:"do not demand a .changes Binaries header to list all binaries"\
+ wrongsourceversion\:"do not demand coherent source versions in a .changes"\
+ wrongversion\:"do not demand coherent version of source packages in a .changes"\
+ dscinbinnmu\:"do not reject source files in what looks like a binMNU"\
+ brokensignatures\:"ignore corrupted signatures if there is a valid one"\
+ uploaders\:"allow even when forbidden by uploaders file"\
+ missingfile\:"include commands search harder for missing files like .orig.tar.gz"\
+ expiredkey\:"allow signatures with expired keys"\
+ expiredsignature\:"allow expired signatures"\
+ revokedkey\:"allow signatures with revoked keys"\
+ oldfile\:"silence warnings about strange old files in dists"\
+ longkeyid\:"do not warn about keyid in uploaders files gpgme might not accept"\
+ ))' \
+ '*--unignore=[Do not ignore errors of type]:error type:(
+ ignore flatandnonflat forbiddenchar 8bit emptyfilenamepart\
+ spaceonlyline malformedchunk unknownfield unusedoption\
+ wrongdistribution missingfield brokenold brokenversioncmp\
+ extension unusedarch surprisingarch surprisingbinary\
+ wrongsourceversion wrongversion brokensignatures\
+ missingfile uploaders undefinedtarget undefinedtracking\
+ expiredkey expiredsignature revokedkey wrongarchitecture)' \
+ '--waitforlock=[Time to wait if database is locked]:count:(0 3600)' \
+ '--spacecheck[Mode for calculating free space before downloading packages]:behavior:(full none)' \
+ '--dbsafetymargin[Safety margin for the partition with the database]:bytes count:' \
+ '--safetymargin[Safety margin per partition]:bytes count:' \
+ '--gunzip[external Program to extract .gz files]:gunzip binary:_files' \
+ '--bunzip2[external Program to extract .bz2 files]:bunzip binary:_files' \
+ '--unlzma[external Program to extract .lzma files]:unlzma binary:_files' \
+ '--unxz[external Program to extract .xz files]:unxz binary:_files' \
+ '--lunzip[external Program to extract .lz files]:lunzip binary:_files' \
+ '--list-format[Format for list output]:listfilter format:' \
+ '--list-skip[Number of packages to skip in list output]:list skip:' \
+ '--list-max[Maximum number of packages in list output]:list max:' \
+ '(--nonothingiserror)--nothingiserror[Return error code when nothing was done]' \
+ '(--listsdownload --nonolistsdownload)--nolistsdownload[Do not download Release nor index files]' \
+ '(--nokeepunneededlists)--keepunneededlists[Do not delete list/ files that are no longer needed]' \
+ '(--nokeepunreferencedfiles)--keepunreferencedfiles[Do not delete files that are no longer used]' \
+ '(--nokeepunusednewfiles)--keepunusednewfiles[Do not delete newly added files that later were found to not be used]' \
+ '(--nokeepdirectories)--keepdirectories[Do not remove directories when they get empty]' \
+ '(--nokeeptemporaries)--keeptemporaries[When exporting fail do not remove temporary files]' \
+ '(--noask-passphrase)--ask-passphrase[Ask for passphrases (insecure)]' \
+ '(--nonoskipold --skipold)--noskipold[Do not ignore parts where no new index file is available]' \
+ '(--guessgpgtty --nonoguessgpgtty)--noguessgpgtty[Do not set GPG_TTY variable even when unset and stdin is a tty]' \
+ ':reprepro command:->commands' \
+ '2::arguments:->first' \
+ '3::arguments:->second' \
+ '4::arguments:->third' \
+ '*::arguments:->argument' && return 0
+
+case "$state" in
+ (commands)
+ if [[ -prefix _* ]] ; then
+ _describe "reprepro command" hiddencommands
+ else
+ _describe "reprepro command" commands
+ fi
+ ;;
+
+ (first argument|second argument|third argument|argument)
+ case "$words[1]" in
+ (export|update|checkupdate|predelete|pull|checkpull|check|reoverride|repairdescriptions|forcerepairdescriptions|rereference|dumptracks|retrack|removealltracks|tidytracks|dumppull|dumpupdate|rerunnotifiers|unusedsources|sourcemissing|reportcruft)
+ _reprepro_codenames
+ ;;
+ (checkpool)
+ if [[ "$state" = "first argument" ]] ; then
+ _wanted -V 'modifiers' expl 'modifier' compadd fast
+ fi
+ ;;
+
+ (cleanlists|clearvanished|dumpreferences|dumpunreferened|deleteunreferenced|_listmd5sums|_listchecksums|_addmd5sums|_addchecksums|__dumpuncompressors|transatelegacychecksums|_listcodenames)
+ ;;
+ (_dumpcontents|_removereferences)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_identifiers
+ fi
+ ;;
+ (_removereference)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_identifiers
+ elif [[ "$state" = "second argument" ]] ; then
+ _reprepro_filekeys
+ fi
+ ;;
+ (list|listfilter|listmatched)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ fi
+ ;;
+ (remove)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ else
+ _reprepro_package_names "$words[2]"
+ fi
+ ;;
+ # removesrcs might be improveable...
+ (removesrc|removesrcs)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ else
+ _reprepro_source_package_names "$words[2]"
+ fi
+ ;;
+ (removefilter|removematched)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ fi
+ ;;
+ (gensnapshot|unreferencesnapshot)
+ # TODO: for unreferencesnapshot get instead a list of existing ones
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _wanted -V 'snapshot names' expl 'snapshot name' compadd $(date -I)
+ fi
+ ;;
+ (removetrack)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _reprepro_source_package_names "$words[2]"
+ elif [[ "$state" = "third argument" ]] ; then
+#and version...
+ fi
+ ;;
+ (includedeb)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _files -g "*.deb"
+ fi
+ ;;
+ (includedsc)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _files -g "*.dsc"
+ fi
+ ;;
+ (__extractsourcesection)
+ if [[ "$state" = "first argument" ]] ; then
+ _files -g "*.dsc"
+ fi
+ ;;
+ (copy|copysrc|copyfilter|copymatched)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _reprepro_codenames
+ fi
+ ;;
+ (restore|restoresrc|restorefilter|restorematched)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+# TODO:
+# elif [[ "$state" = "second argument" ]] ; then
+# _reprepro_codenames
+ fi
+ ;;
+ (include)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _files -g "*.changes"
+ fi
+ ;;
+ (__extractfilelist|__extractcontrol)
+ _files -g "*.deb"
+ ;;
+ (processincoming)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_incomings
+ elif [[ "$state" = "second argument" ]] ; then
+ _reprepro_incomingdir "$words[2]" \
+ && _files -g "*.changes" -W list \
+ || _files -g "*.changes"
+ fi
+ ;;
+ (_detect|_forget)
+ _reprepro_filekeys
+ ;;
+ (_fakeemptyfilelist)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_filekeys
+ fi
+ ;;
+ (_addreference)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_filekeys
+ elif [[ "$state" = "second argument" ]] ; then
+ _reprepro_identifiers
+ fi
+ ;;
+ (_addreferences)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_identifiers
+ else
+ _reprepro_filekeys
+ fi
+ ;;
+ (__uncompress)
+ if [[ "$state" = "first argument" ]] ; then
+ uncompressions=(.gz .bz2 .lzma .xz .lz)
+ _wanted -V 'uncompressions' expl 'uncompression' compadd -a uncompressions
+ elif [[ "$state" = "second argument" ]] ; then
+ _files
+ elif [[ "$state" = "third argument" ]] ; then
+ _files
+ fi
+ ;;
+ (build-needing)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _reprepro_architectures
+##TODO elif [[ "$state" = "third argument" ]] ; then
+##TODO _reprepro_glob
+ fi
+ ;;
+ (flood)
+ if [[ "$state" = "first argument" ]] ; then
+ _reprepro_codenames
+ elif [[ "$state" = "second argument" ]] ; then
+ _reprepro_architectures
+ fi
+ ;;
+ (*)
+ _files
+ ;;
+ esac
+ ;;
+esac
diff --git a/docs/rredtool.1 b/docs/rredtool.1
new file mode 100644
index 0000000..f217128
--- /dev/null
+++ b/docs/rredtool.1
@@ -0,0 +1,90 @@
+.TH RREDTOOL 1 "2009-11-12" "reprepro" REPREPRO
+.SH NAME
+rredtool \- merge or apply a very restricted subset of ed patches
+.SH SYNOPSIS
+.B rredtool \-\-help
+
+.B rredtool
+[
+\fIoptions\fP
+]
+.B \-\-merge
+.I patches...
+
+.B rredtool
+[
+\fIoptions\fP
+]
+.B \-\-patch
+.IR file-to-patch " " patches...
+
+.B rredtool
+.IR directory " " newfile " " oldfile " " mode
+.SH DESCRIPTION
+rredtool is a tool to handle a subset of ed patches in a safe way.
+It is especially targeted at ed patches as used in Packages.diff
+and Sources.diff.
+Is also has a mode supposed to be called from reprepro as Index Hook
+to generate and update a \fBPackages.diff/Index\fP file.
+.SH "MODI"
+One of the following has to be given, so that rredtool know that to
+do.
+.TP
+.B \-\-version
+Print the version of this tool
+(or rather the version of reprepro which it is coming with).
+.TP
+.B \-\-help
+Print a short overview of the modi.
+.TP
+.B \-\-patch
+The first argument of rredtool is the file to patch,
+the other arguments are ed patches to apply on this one.
+.TP
+.B \-\-merge
+The arguments are treated as ed patches, which are merged into
+a single one.
+.TP
+.BR \-\-reprepro\-hook " (or no other mode flag)
+Act as reprepro index hook to manage a \fBPackages.diff/index\fP file.
+That means it expects to get exactly 4 arguments
+and writes the names of files to place into filedescriptor 3.
+
+If neither \-\-patch nor \-\-merge is given,
+this mode is used, so you can just put
+
+ \fBDebIndices: Packages Release . .gz /usr/bin/rredtool\fP
+
+into reprepro's \fBconf/distributions\fP file to have a Packages.diff
+directory generated.
+(Note that you have to generate an uncompressed file (the single dot).
+You will need to have patch, gzip and gunzip available in your path.)
+
+.SH "OPTIONS"
+.TP
+.B \-\-debug
+Print intermediate results or other details that might be interesting
+when trying to track down bugs in rredtool but not intresting otherwise.
+.TP
+.B \-\-max\-patch\-count=\fIcount\fP
+When generating a \fIPackages\fP\fB.diff/Index\fP file,
+put at most \fIcount\fP patches in it
+(not counting possible apt workaround patches).
+.TP
+.BR \-o | \-\-output
+Not yet implemented.
+.SH "ENVIRONMENT"
+.TP
+.BR TMPDIR ", " TEMPDIR
+temporary files are created in $\fITEMPDIR\fP if set,
+otherwise in $\fITMPDIR\fP if set, otherwise in \fB/tmp/\fP.
+.SH "REPORTING BUGS"
+Report bugs or wishlist requests the Debian BTS
+(e.g. by using \fBreportbug reperepro\fP)
+or directly to <brlink@debian.org>.
+.br
+.SH COPYRIGHT
+Copyright \(co 2009 Bernhard R. Link
+.br
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
diff --git a/docs/sftp.py b/docs/sftp.py
new file mode 100755
index 0000000..072dcf1
--- /dev/null
+++ b/docs/sftp.py
@@ -0,0 +1,886 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2013 Bernhard R. Link <brlink@debian.org>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# SOFTWARE IN THE PUBLIC INTEREST, INC. BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+#
+
+"""
+This is a sftp module to be used in reprepro's outsftphook example.
+Like the sftp binary it calls ssh to do the connection in a secure
+way and then speaks the sftp subsystem language over that connection.
+"""
+
+
+import os, subprocess, select
+
+class EnumInternException(Exception):
+ def __init__(self, v):
+ super().__init__(v)
+ self.value = v
+
+class _EnumType(type):
+ """
+ Metaclass for Enum. Allows one to set values as parameters.
+ """
+ def __new__(cls, name, bases, namespace, **values):
+ return type.__new__(cls, name, bases, namespace)
+ def __init__(self, name, bases, namespace, **values):
+ super().__init__(name, bases, namespace)
+ if bases:
+ self._byvalue = dict()
+ self._byname = dict()
+ if values:
+ for k,v in values.items():
+ self._create_instance(k, v)
+
+class Enum(metaclass=_EnumType):
+ """
+ An enum is a class with a fixed set of instances.
+ Each instance has a name and a integer value.
+ If a new new instance is to be created, one of those
+ fix instances is returned instead.
+ """
+ @classmethod
+ def _create_instance(cls, name, value):
+ # create a new instance:
+ result = super(Enum, cls).__new__(cls)
+ if isinstance(name, str):
+ result.name = name
+ else:
+ result.name = name[0]
+ result.__name__ = result.name
+ result.value = value
+ cls._byvalue[value] = result
+ if isinstance(name, str):
+ cls._byname[name] = result
+ setattr(cls, name, result)
+ else:
+ for n in name:
+ cls._byname[n] = result
+ setattr(cls, n, result)
+ return result
+ def __new__(cls, l):
+ try:
+ if isinstance(l, cls):
+ return l
+ elif isinstance(l, int):
+ return cls._byvalue[l]
+ elif isinstance(l, str):
+ return cls._byname[l]
+ else:
+ raise EnumInternException(repr(l))
+ except KeyError:
+ raise EnumInternException(repr(l))
+ def __init__(self, l):
+ pass
+ def __int__(self):
+ return self.value
+ def __str__(self):
+ return self.name
+ def __repr__(self):
+ return "%s.%s.%s" % (type(self).__module__, type(self).__name__, self.name)
+
+class _BitmaskType(type):
+ """
+ Metaclass for Bitmask types. Allows one to set values as parameters.
+ """
+ @classmethod
+ def __prepare__(cls, name, bases, **values):
+ namespace = type.__prepare__(cls, name, bases)
+ if values:
+ flagcls = _EnumType.__new__(type, "flags of " + name, (Enum,), dict())
+ flagcls._byvalue = dict()
+ flagcls._byname = dict()
+ namespace["_Values"] = flagcls
+ for (k,v) in values.items():
+ if isinstance(v, int):
+ e = flagcls._create_instance(k, v)
+ e.mask = v
+ else:
+ e = flagcls._create_instance(k, v[0])
+ e.mask = v[1]
+ namespace[k] = e
+ return namespace
+ def __new__(cls, name, bases, namespace, **values):
+ return type.__new__(cls, name, bases, namespace)
+ def __init__(self, name, bases, namespace, **values):
+ return super().__init__(name, bases, namespace)
+
+class Bitmask(set, metaclass=_BitmaskType):
+ def __init__(self, l):
+ if isinstance(l, int):
+ super().__init__([i
+ for (k,i)
+ in self._Values._byvalue.items()
+ if (l & i.mask) == k])
+ if l != int(self):
+ raise Exception("Unrepresentable number %d (got parsed as %s = %d)" %
+ (l, str(self), int(self)))
+ elif isinstance(l, str):
+ try:
+ super().__init__([self._Values(i)
+ for i
+ in l.split("|")])
+ # test for inconsistencies:
+ type(self)(int(self))
+ except EnumInternException as e:
+ raise Exception("Invalid value '%s' in value '%s' for %s" %
+ (e.value, str(l), type(self).__name__))
+ else:
+ try:
+ super().__init__([self._Values(i) for i in l])
+ # test for inconsistencies:
+ type(self)(int(self))
+ except EnumInternException as e:
+ raise Exception("Invalid value '%s' in value '%s' for %s" %
+ (e.value, str(l), type(self).__name__))
+ def __int__(self):
+ v = 0
+ for i in self:
+ v = v | int(i)
+ return v
+ def __str__(self):
+ return "|".join([str(i) for i in self])
+
+class SSH_FILEXFER(Bitmask, ATTR_SIZE = 0x00000001,
+ ATTR_UIDGID = 0x00000002,
+ ATTR_PERMISSIONS = 0x00000004,
+ ATTR_ACMODTIME = 0x00000008,
+ ATTR_EXTENDED = 0x80000000):
+ pass
+
+def ssh_data(b):
+ return len(b).to_bytes(4, byteorder='big') + b
+def ssh_string(s):
+ b = str(s).encode(encoding='utf-8')
+ return len(b).to_bytes(4, byteorder='big') + b
+def ssh_u8(i):
+ return int(i).to_bytes(1, byteorder='big')
+def ssh_u32(i):
+ return int(i).to_bytes(4, byteorder='big')
+def ssh_u64(i):
+ return int(i).to_bytes(8, byteorder='big')
+def ssh_attrs(**opts):
+ flags = SSH_FILEXFER(0)
+ extended = []
+ for key in opts:
+ if key == 'size':
+ flags.add(SSH_FILEXFER.ATTR_SIZE)
+ elif key == 'uid' or key == 'gid':
+ flags.add(SSH_FILEXFER.ATTR_UIDGID)
+ elif key == 'permissions':
+ flags.add(SSH_FILEXFER.ATTR_PERMISSIONS)
+ elif key == 'atime' or key == 'mtime':
+ flags.add(SSH_FILEXFER.ATTR_ACMODTIME)
+ elif '@' in key:
+ extended.add(opts[key])
+ else:
+ raise SftpException("Unsupported file attribute type %s" % repr(key))
+ if extended:
+ flags.add(SSH_FILEXFER.ATTR_EXTENDED)
+ b = ssh_u32(int(flags))
+ if SSH_FILEXFER.ATTR_SIZE in flags:
+ b = b + ssh_u64(opts['size'])
+ if SSH_FILEXFER.ATTR_UIDGID in flags:
+ b = b + ssh_u32(opts['uid'])
+ b = b + ssh_u32(opts['gid'])
+ if SSH_FILEXFER.ATTR_PERMISSIONS in flags:
+ b = b + ssh_u32(opts['permissions'])
+ if SSH_FILEXFER.ATTR_ACMODTIME in flags:
+ b = b + ssh_u32(opts['atime'])
+ b = b + ssh_u32(opts['mtime'])
+ if SSH_FILEXFER.ATTR_EXTENDED in flags:
+ b = b + ssh_u32(len(extended))
+ for key in extended:
+ b = b + ssh_string(key)
+ b = b + ssh_data(opts[key])
+ return b
+
+def ssh_getu32(m):
+ v = int.from_bytes(m[:4], byteorder='big')
+ return v, m[4:]
+def ssh_getstring(m):
+ l = int.from_bytes(m[:4], byteorder='big')
+ return (m[4:4+l].decode(encoding='utf-8'), m[4+l:])
+def ssh_getdata(m):
+ l = int.from_bytes(m[:4], byteorder='big')
+ return (m[4:4+l], m[4+l:])
+def ssh_getattrs(m):
+ attrs = dict()
+ flags, m = ssh_getu32(m)
+ flags = SSH_FILEXFER(flags)
+ if SSH_FILEXFER.ATTR_SIZE in flags:
+ attrs['size'], m = ssh_getu64(m)
+ if SSH_FILEXFER.ATTR_UIDGID in flags:
+ attrs['uid'], m = ssh_getu32(m)
+ attrs['gid'], m = ssh_getu32(m)
+ if SSH_FILEXFER.ATTR_PERMISSIONS in flags:
+ attrs['permissions'], m = ssh_getu32(m)
+ if SSH_FILEXFER.ATTR_ACMODTIME in flags:
+ attrs['atime'], m = ssh_getu32(m)
+ attrs['mtime'], m = ssh_getu32(m)
+ if SSH_FILEXFER.ATTR_EXTENDED in flags:
+ count, m = ssh_getu32(m)
+ while count > 0:
+ count -= 1
+ key, m = ssh_getstring(m)
+ attrs[key], m = ssh_getdata(m)
+ return (attrs, m)
+
+class SftpException(Exception):
+ pass
+
+class SftpStrangeException(SftpException):
+ """Unparseable stuff from server"""
+ pass
+
+class SftpUnexpectedAnswerException(SftpStrangeException):
+ def __init__(self, answer, request):
+ super().__init__("Unexpected answer '%s' to request '%s'" %
+ (str(answer), str(request)))
+
+class SftpTooManyRequestsException(SftpException):
+ def __init__(self):
+ super().__init__("Too many concurrent requests (out of request ids)")
+
+class SftpInternalException(SftpException):
+ """a programming or programmer mistake"""
+ pass
+
+class Request:
+ def __init__(self, **args):
+ self.data = args
+ pass
+ def __int__(self):
+ return self.requestid
+ def __str__(self):
+ return type(self).__name__ + "(" + " ".join(["%s=%s" % (key, repr(val))
+ for (key, val) in self.data.items()]) + ")"
+ @classmethod
+ def bin(cls, conn, req, *payload):
+ s = 5
+ for b in payload:
+ s = s + len(b)
+ # print("Sending packet of type %d and size %d" % (cls.typeid, s))
+ r = ssh_u32(s) + ssh_u8(cls.typeid) + ssh_u32(int(req))
+ for b in payload:
+ r = r + b
+ return r
+ def send(self, conn):
+ conn.requests[int(self)] = self
+ self.conn = conn
+ conn.send(self.bin(conn, self, **self.data))
+ def done(self):
+ if self.requestid != None:
+ del self.conn.requests[self.requestid]
+ self.requestid = None
+
+class NameRequest(Request):
+ """Base class for requests with a single name as argument"""
+ def __init__(self, name):
+ super().__init__(name = name)
+ @classmethod
+ def bin(cls, conn, req, name):
+ return super().bin(conn, req, ssh_string(name))
+
+class HandleRequest(Request):
+ """Base class for requests with a single name as argument"""
+ def __init__(self, handle):
+ super().__init__(handle = handle)
+ @classmethod
+ def bin(cls, conn, req, handle):
+ return super().bin(conn, req, ssh_data(handle))
+
+class NameAttrRequest(Request):
+ """Base class for requests with a name and attributes as argument"""
+ def __init__(self, name, **attrs):
+ super().__init__(name = name, attrs = attrs)
+ @classmethod
+ def bin(cls, conn, req, name, attrs):
+ return super().bin(conn, req,
+ ssh_string(name),
+ ssh_attrs(**attrs))
+
+class INIT(Request):
+ typeid = 1
+ @classmethod
+ def bin(cls, conn, version):
+ # INIT has no request id but instead sends a protocol version
+ return super().bin(conn, int(version))
+
+class SSH_FXF(Bitmask, READ = 0x00000001,
+ WRITE = 0x00000002,
+ APPEND = 0x00000004,
+ CREAT = 0x00000008,
+ TRUNC = 0x00000010,
+ EXCL = 0x00000020):
+ pass
+
+class OPEN(Request):
+ typeid = 3
+ def __init__(self, name, flags, **attributes):
+ super().__init__(name = name, flags = SSH_FXF(flags), attrs = attributes)
+ @classmethod
+ def bin(cls, conn, req, name, flags, attrs):
+ return super().bin(conn, req,
+ ssh_string(name),
+ ssh_u32(flags),
+ ssh_attrs(**attrs))
+
+class CLOSE(HandleRequest):
+ typeid = 4
+
+class READ(Request):
+ typeid = 5
+ def __init__(self, handle, start, length):
+ super().__init__(handle = handle, start = start, length = int(length))
+ @classmethod
+ def bin(cls, conn, req, handle, start, length):
+ return super().bin(conn, req, ssh_data(handle), ssh_u64(start), ssh_u32(length))
+
+class WRITE(Request):
+ typeid = 6
+ def __init__(self, handle, start, data):
+ super().__init__(handle = handle, start = start, data = bytes(data))
+ @classmethod
+ def bin(cls, conn, req, handle, start, data):
+ return super().bin(conn, req, ssh_data(handle), ssh_u64(start), ssh_data(data))
+
+class LSTAT(NameRequest):
+ typeid = 7
+
+class FSTAT(HandleRequest):
+ typeid = 8
+
+class SETSTAT(NameAttrRequest):
+ typeid = 9
+
+class FSETSTAT(Request):
+ typeid = 10
+ def __init__(self, handle, **attrs):
+ super().__init__(handle = handle, attrs = attrs)
+ @classmethod
+ def bin(cls, conn, req, name, attrs):
+ return super().bin(conn, req,
+ ssh_data(handle),
+ ssh_attrs(**attrs))
+
+class OPENDIR(NameRequest):
+ typeid = 11
+
+class READDIR(HandleRequest):
+ typeid = 12
+
+class REMOVE(NameRequest):
+ typeid = 13
+
+class MKDIR(NameAttrRequest):
+ typeid = 14
+
+class RMDIR(NameRequest):
+ typeid = 15
+
+class REALPATH(NameRequest):
+ typeid = 16
+
+class STAT(NameRequest):
+ typeid = 17
+
+class SSH_FXF_RENAME(Bitmask, OVERWRITE = 0x00000001,
+ ATOMIC = 0x00000002,
+ NATIVE = 0x00000004):
+ pass
+
+class RENAME(Request):
+ typeid = 18
+ def __init__(self, src, dst, flags):
+ if not isinstance(flags, SSH_FXF_RENAME):
+ flags = SSH_FXF_RENAME(flags)
+ super().__init__(src = src, dst = dst, flags = flags)
+ @classmethod
+ def bin(cls, conn, req, src, dst, flags):
+ # TODO: Version 3 has no flags (though they do not seem to harm)
+ return super().bin(conn, req, ssh_string(src),
+ ssh_string(dst), ssh_u32(flags))
+
+class READLINK(NameRequest):
+ typeid = 19
+
+class SYMLINK(Request):
+ typeid = 20
+ def __init__(self, name, dest):
+ super().__init__(name = name, dest = dest)
+ @classmethod
+ def bin(cls, conn, req, name, dest):
+ # TODO: this is openssh and not the standard (they differ)
+ return super().bin(conn, req, ssh_string(dest),
+ ssh_string(name))
+
+class EXTENDED(Request):
+ typeid = 200
+ # TODO?
+
+################ Answers ################
+
+class Answer:
+ def __int__(self):
+ return self.id
+ # Fallbacks, can be removed once all are done:
+ def __init__(self, m):
+ self.data = m
+ def __str__(self):
+ return "%s %s" % (type(self).__name__, repr(self.data))
+
+class VERSION(Answer):
+ id = 2
+
+class SSH_FX(Enum,
+ OK = 0,
+ EOF = 1,
+ NO_SUCH_FILE = 2,
+ PERMISSION_DENIED = 3,
+ FAILURE = 4,
+ BAD_MESSAGE = 5,
+ NO_CONNECTION = 6,
+ CONNECTION_LOST = 7,
+ OP_UNSUPPORTED = 8,
+ INVALID_HANDLE = 9,
+ NO_SUCH_PATH = 10,
+ FILE_ALREADY_EXISTS = 11,
+ WRITE_PROTECT = 12,
+ NO_MEDIA = 13
+):
+ pass
+
+class STATUS(Answer):
+ id = 101
+ def __init__(self, m):
+ s, m = ssh_getu32(m)
+ self.status = SSH_FX(s)
+ self.message, m = ssh_getstring(m)
+ self.lang, m = ssh_getstring(m)
+ def __str__(self):
+ return "STATUS %s: %s[%s]" % (
+ str(self.status),
+ self.message,
+ self.lang)
+
+class HANDLE(Answer):
+ id = 102
+ def __init__(self, m):
+ self.handle, m = ssh_getdata(m)
+ def __str__(self):
+ return "HANDLE %s" % repr(self.handle)
+
+class DATA(Answer):
+ id = 103
+ def __init__(self, m):
+ self.data, m = ssh_getdata(m)
+ def __str__(self):
+ return "DATA %s" % repr(self.data)
+
+class NAME(Answer):
+ id = 104
+ def __init__(self, m):
+ count, m = ssh_getu32(m)
+ self.names = []
+ while count > 0:
+ count -= 1
+ filename, m = ssh_getstring(m)
+ longname, m = ssh_getstring(m)
+ attrs, m = ssh_getattrs(m)
+ self.append((filename, longname, attrs))
+
+ def __str__(self):
+ return "NAME" + "".join(("%s:%s:%s" % (repr(fn), repr(ln), str(attrs))
+ for (fn,ln,attrs) in self.names))
+
+class ATTRS(Answer):
+ id = 105
+ def __init__(self, m):
+ self.attrs, m = ssh_getattrs(m)
+
+ def __str__(self):
+ return "ATTRS %s" % str(self.attrs)
+
+class EXTENDED_REPLY(Answer):
+ id = 201
+ # TODO?
+
+################ Tasks ################
+
+class Task:
+ """A task is everything that sends requests,
+ receives answers, uses collectors or is
+ awakened by collectors.
+ """
+ def start(self, connection):
+ self.connection = connection
+ def enqueueRequest(self, request):
+ request.task = self
+ self.connection.enqueueRequest(request)
+ def sftpanswer(self, a):
+ raise SftpInternalException("unimplemented sftpanswer called")
+ def writeready(self):
+ raise SftpInternalException("unimplemented writeready called")
+ def parentinfo(self, command):
+ raise SftpInternalException("unimplemented parentinfo called")
+
+class TaskFromGenerator(Task):
+ """A wrapper around a python corotine (generator)"""
+ def __init__(self, gen):
+ super().__init__()
+ self.gen = gen
+ def start(self, connection):
+ super().start(connection)
+ self.enqueue(next(self.gen))
+ def parentinfo(self, command):
+ self.enqueue(self.gen.send(command))
+ def sftpanswer(self, answer):
+ self.enqueue(self.gen.send(answer))
+ def writeready(self):
+ self.enqueue(self.gen.send('canwrite'))
+ def __str__(self):
+ return "Task(by %s)" % self.gen
+ def enqueue(self, joblist):
+ if len(joblist) == 0:
+ return
+ for job in joblist:
+ if isinstance(job, Request):
+ self.enqueueRequest(job)
+ elif job == 'wantwrite':
+ self.connection.enqueueTask(self)
+ elif (isinstance(job, tuple) and len(job) == 2 and
+ isinstance(job[0], Task)):
+ if DebugMode.LOCKS in self.debug:
+ print("parentinfo", job,
+ **self.debugopts)
+ job[0].parentinfo(job[1])
+ elif (isinstance(job, tuple) and len(job) >= 2 and
+ issubclass(job[1], Collector)):
+ self.connection.collect(self, *job)
+ elif isinstance(job, Task):
+ self.connection.start(job)
+ else:
+ raise SftpInternalException("strange result from generator")
+
+
+class Collector(Task):
+ """ Collectors collect information from Tasks and send them
+ triggers at requested events (parent directory created,
+ another file can be processed, ...)
+ """
+ def childinfo(self, who, command):
+ raise SftpInternalException("unimplemented parentinfo called")
+
+class DebugMode(Bitmask, **{
+ 'COOKED_IN': 1,
+ 'COOKED_OUT': 2,
+ 'RAW_IN_STAT': 4,
+ 'RAW_OUT_STAT': 8,
+ 'RAW_IN': 16,
+ 'RAW_OUT': 32,
+ 'ENQUEUE': 64,
+ 'LOCKS': 128,
+}):
+ pass
+
+class Connection:
+ def next_request_id(self):
+ i = self.requestid_try_next
+ while i in self.requests:
+ i = (i + 1) % 0x100000000
+ if i == self.requestid_try_next:
+ raise SftpTooManyRequestsException()
+ self.requestid_try_next = (i + 1) % 0x100000000
+ return i
+ def __init__(self, servername, sshcommand="ssh", username=None, ssh_options=[], debug=0, debugopts=dict(), maxopenfiles=10):
+ self.debug = DebugMode(debug)
+ self.debugopts = debugopts
+ self.requests = dict()
+ self.collectors = dict()
+ self.queue = list()
+ self.wantwrite = list()
+ self.requestid_try_next = 17
+ self.semaphores = {'openfile': maxopenfiles}
+
+ commandline = [sshcommand]
+ if ssh_options:
+ commandline.extend(ssh_options)
+ # those defaults are after the user-supplied ones so they can be overridden.
+ # (earlier ones win with ssh).
+ commandline.extend(["-oProtocol 2", # "-oLogLevel DEBUG",
+ "-oForwardX11 no", "-oForwardAgent no",
+ "-oPermitLocalCommand no",
+ "-oClearAllForwardings yes"])
+ if username:
+ commandline.extend(["-l", username])
+ commandline.extend(["-s", "--", servername, "sftp"])
+ self.connection = subprocess.Popen(commandline,
+ close_fds = True,
+ stdin = subprocess.PIPE,
+ stdout = subprocess.PIPE,
+ bufsize = 0)
+ self.poll = select.poll()
+ self.poll.register(self.connection.stdout, select.POLLIN)
+ self.inbuffer = bytes()
+ self.send(INIT.bin(self, 3))
+ t,b = self.getpacket()
+ if t != VERSION.id:
+ raise SftpUnexpectedAnswerException(b, "INIT")
+ # TODO: parse answer data (including available extensions)
+ def close(self):
+ self.connection.send_signal(15)
+ def getmoreinput(self, minlen):
+ while len(self.inbuffer) < minlen:
+ o = self.connection.stdout.read(minlen - len(self.inbuffer))
+ if o == None:
+ continue
+ if len(o) == 0:
+ raise SftpStrangeException("unexpected EOF")
+ self.inbuffer = self.inbuffer + o
+ def getpacket(self):
+ self.getmoreinput(5)
+ s = int.from_bytes(self.inbuffer[:4], byteorder='big')
+ if s < 1:
+ raise SftpStrangeException("Strange size field in Paket from server!")
+ t = self.inbuffer[4]
+ if DebugMode.RAW_IN_STAT in self.debug:
+ print("receiving packet of length %d and type %d " %
+ (s, t), **self.debugopts)
+ s = s - 1
+ self.inbuffer = self.inbuffer[5:]
+ self.getmoreinput(s)
+ d = self.inbuffer[:s]
+ self.inbuffer = self.inbuffer[s:]
+ if DebugMode.RAW_IN in self.debug:
+ print("received packet(type %d):" % t, repr(d),
+ **self.debugopts)
+ return (t, d)
+ def send(self, b):
+ if not isinstance(b, bytes):
+ raise SftpInternalException("send not given byte sequence")
+ if DebugMode.RAW_OUT_STAT in self.debug:
+ print("sending packet of %d bytes" % len(b),
+ **self.debugopts)
+ if DebugMode.RAW_OUT in self.debug:
+ print("sending packet:", repr(b),
+ **self.debugopts)
+ self.connection.stdin.write(b)
+ def enqueueRequest(self, job):
+ if DebugMode.ENQUEUE in self.debug:
+ print("enqueue", job,
+ **self.debugopts)
+ if len(self.queue) == 0 and len(self.wantwrite) == 0:
+ self.poll.register(self.connection.stdin,
+ select.POLLOUT)
+ job.requestid = self.next_request_id()
+ self.queue.append(job)
+ def enqueueTask(self, task):
+ if DebugMode.ENQUEUE in self.debug:
+ print("enqueue", task, **self.debugopts)
+ if len(self.queue) == 0 and len(self.wantwrite) == 0:
+ self.poll.register(self.connection.stdin,
+ select.POLLOUT)
+ self.wantwrite.append(task)
+ def collect(self, who, command, collectortype, *collectorargs):
+ if DebugMode.LOCKS in self.debug:
+ print("collector", command, collectortype.__name__,
+ *collectorargs, **self.debugopts)
+ """Tell the (possibly to be generated) """
+ collectorid = (collectortype, collectorargs)
+ if not collectorid in self.collectors:
+ l = collectortype(*collectorargs)
+ self.collectors[collectorid] = l
+ l.start(self)
+ else:
+ l = self.collectors[collectorid]
+ l.childinfo(who, command)
+ def start(self, task):
+ task.start(self)
+ def dispatchanswer(self, answer):
+ task = answer.forr.task
+ try:
+ task.sftpanswer(answer)
+ except StopIteration:
+ orphanreqs = [ r
+ for r in self.requests.values()
+ if r.task == task ]
+ for r in orphanreqs:
+ r.done()
+ def readdata(self):
+ t,m = self.getpacket()
+ for answer in Answer.__subclasses__():
+ if t == answer.id:
+ break
+ else:
+ raise SftpUnexpectedAnswerException("Unknown answer type %d" % t, "")
+ id, m = ssh_getu32(m)
+ a = answer(m)
+ if DebugMode.COOKED_IN in self.debug:
+ print("got answer for request %d: %s" %
+ (id, str(a)), **self.debugopts)
+ if not id in self.requests:
+ raise SftpUnexpectedAnswerException(a, "unknown-id-%d" % id)
+ else:
+ a.forr = self.requests[id]
+ self.dispatchanswer(a)
+ def senddata(self):
+ if len(self.queue) == 0:
+ while len(self.wantwrite) > 0:
+ w = self.wantwrite.pop(0)
+ if len(self.wantwrite) == 0 and len(self.queue) == 0:
+ self.poll.unregister(self.connection.stdin)
+ w.writeready()
+ if len(self.queue) > 0:
+ request = self.queue.pop(0)
+ if len(self.queue) == 0 and len(self.wantwrite) == 0:
+ self.poll.unregister(self.connection.stdin)
+ if DebugMode.COOKED_OUT in self.debug:
+ print("sending request %d: %s" %
+ (request.requestid, str(request)),
+ **self.debugopts)
+ request.send(self)
+ def dispatch(self):
+ while self.requests or self.queue:
+ for (fd, event) in self.poll.poll():
+ if event == select.POLLIN:
+ self.readdata()
+ elif event == select.POLLHUP:
+ raise SftpStrangeException(
+ "Server disconnected unexpectedly"
+ " or ssh client process terminated")
+ elif event == select.POLLOUT:
+ self.senddata()
+ else:
+ raise SftpException("Unexpected event %d from poll" % event)
+
+class Dirlock(Collector):
+ def __init__(self, name):
+ super().__init__()
+ self.name = name
+ self.dirname = os.path.dirname(name)
+ self.queue = []
+ def start(self, connection):
+ super().start(connection)
+ if self.dirname and (self.name != self.dirname):
+ self.mode = "wait-for-parent"
+ self.connection.collect(self, 'waitingfor',
+ Dirlock, self.dirname)
+ else:
+ self.tellparent = False
+ self.mode = "wait-for-client"
+ self.isnew = False
+ def sftpanswer(self, a):
+ assert(self.mode == "creating")
+ if not isinstance(a, STATUS):
+ raise SftpUnexpectedAnswer(a, a.forr)
+ # Only one answer is expected:
+ a.forr.done()
+ if a.status == SSH_FX.OK:
+ self.mode = "exists"
+ self.isnew = True
+ self.releaseallqueued()
+ elif self.tellparent and a.status == SSH_FX.NO_SUCH_FILE:
+ self.mode = "wait-for-parent"
+ self.connection.collect(self, 'missing',
+ Dirlock, self.dirname)
+ else:
+ raise SftpException("Cannot create directory %s: %s" % (self.name, a))
+ def parentinfo(self, command):
+ assert(self.mode == "wait-for-parent")
+ if command == "createnew":
+ self.tellparent = False
+ self.isnew = True
+ self.createdir()
+ return
+ if command != "tryandtell" and command != "ready":
+ raise SftpInternalException(
+ "Unexpected parent info %s" %
+ command)
+ self.tellparent = command == "tryandtell"
+ if len(self.queue) > 0:
+ self.mode = "testing"
+ self.queue.pop(0).parentinfo("tryandtell")
+ else:
+ self.mode = "wait-for-client"
+ def childinfo(self, who, command):
+ if command == "waitingfor":
+ if self.mode == "exists":
+ if self.isnew:
+ who.parentinfo("createnew")
+ else:
+ who.parentinfo("ready")
+ elif self.mode == "wait-for-client":
+ self.mode = "testing"
+ who.parentinfo("tryandtell")
+ else:
+ self.queue.append(who)
+ elif command == "found":
+ assert(self.mode == "testing")
+ self.mode = "exists"
+ self.isnew = False
+ self.releaseallqueued()
+ elif command == "missing":
+ self.queue.append(who)
+ self.mode = "creating"
+ self.createdir()
+ else:
+ raise SftpInternalException(
+ "Unexpected child information: %s" %
+ command)
+ def createdir(self):
+ self.mode = "creating"
+ self.enqueueRequest(MKDIR(self.name))
+ def releaseallqueued(self):
+ if self.tellparent:
+ self.connection.collect(self, 'found',
+ Dirlock, self.dirname)
+ self.tellparent = False
+ if self.isnew:
+ command = "createnew"
+ else:
+ command = "ready"
+ # This assumes out mode cannot change any more:
+ while self.queue:
+ self.queue.pop(0).parentinfo(command)
+
+class Semaphore(Collector):
+ def __init__(self, name):
+ super().__init__()
+ self.name = name
+ self.queue = []
+ self.allowed = 10
+ def start(self, connection):
+ self.allowed = connection.semaphores[self.name]
+ def childinfo(self, who, command):
+ if command == "lock":
+ if self.allowed > 0:
+ self.allowed -= 1
+ who.parentinfo("unlock")
+ else:
+ self.queue.append(who)
+ elif command == "release":
+ if self.allowed == 0 and self.queue:
+ self.queue.pop(0).parentinfo("unlock")
+ else:
+ self.allowed += 1
+ else:
+ raise SftpInternalException("Semaphore.childinfo called with invalid command")
diff --git a/docs/short-howto b/docs/short-howto
new file mode 100644
index 0000000..bf55bce
--- /dev/null
+++ b/docs/short-howto
@@ -0,0 +1,209 @@
+This short HOW-TO describes how to setup a repository using reprepro.
+
+First choose a directory where you want to store your repository,
+
+1) Configuration:
+
+Generate a directory named conf/.
+
+Create a file named "distributions" there.
+
+Add entries such as:
+
+Origin: Debian
+Label: Debian-All
+Suite: stable
+Codename: woody
+Version: 3.0
+Architectures: i386 sparc mips source
+Components: main non-free contrib
+Description: Debian woody + woody/non-US + woody/updates
+#Update: debian non-US security
+#SignWith: yes
+
+Or:
+
+Origin: PCPool
+Label: PCPool
+Suite: stable
+Codename: pcpool
+Version: 3.0
+Architectures: i386 source
+Components: main non-free contrib bad protected server
+UDebComponents: main
+Description: PCPool specific (or backported) packages
+SignWith: yes
+DebOverride: override
+UDebOverride: override
+DscOverride: srcoverride
+
+Multiple entries are separated with an empty line.
+
+The codename of the distribution is specified with Codename:.
+It is the primary name of a distribution and e.g. used to determine the
+directory to create and put the index files into.
+
+Update: is described later.
+
+If SignWith: is there, it will try to sign it: either use "yes" or give
+something gpg can use to identify the key you want to use.
+
+The other fields are copied into the appropriate "Release" files generated.
+
+2) Adding files to the repository:
+
+To add a .deb manually:
+
+reprepro -Vb . includedeb pcpool /var/cache/apt/archives/libc6_2.2.5-11.8_i386.deb
+
+to add a .changes file:
+
+reprepro -Vb . include pcpool test.changes
+
+Hint: you can add "-C component", "-A architecture", "-S section" and "-P
+priority" to give additional hints where it should go. Note -A will not
+overwrite something to go into another architecture, but simply ignore those
+not fitting, only "Architecture: all" packages are placed exactly in these
+architecture. Helps when it is not available for all architectures and each
+binary version needs a fitting version of the "Architecture: all" package.
+
+3) Removing files from the repository:
+
+reprepro -Vb . remove pcpool libc6
+
+to only remove from a specific component or architecture:
+
+reprepro -Vb . -C main -A i386 remove pcpool libc6
+
+4) Getting information about a package:
+
+To see in which architectures/components a package exists and which version it
+uses.
+
+reprepro -b . list pcpool libc6
+
+5) Override-Files:
+
+When including packages via "includedeb", "includedsc" or "include"
+the applicable override file from the distribution it is placed
+into is used. The file given by DebOverride: for ".deb"s, the
+file given by UDebOverride: for ".udeb"s and the file given by
+DscOverride: for ".dsc"s. If the filename starts with
+a slash (/) it is not relative to the conf directory given
+with --conf, defaulting to "conf" in the current directory (or in the
+directory specified with --basedir, if that is given).
+
+Note that the Format is those of apt-ftparchive's ExtraOverride, not the old format.
+An (stupid) example line for that file would be:
+libc6 Priority extra
+
+6) importing from upstream repositories:
+
+The file conf/updates can contain entries like this:
+
+Name: debian
+Method: http://ftp.debian.de/debian
+VerifyRelease: F1D53D8C4F368D5D
+
+Name: non-US
+Method: http://ftp.debian.de/debian-non-US
+Suite: */non-US
+Architectures: i386 sparc mips source
+Components: main>main non-free>non-free contrib>contrib
+UDebComponents:
+VerifyRelease: B629A24C38C6029A
+
+Name: security
+Method: http://security.debian.org/debian-security
+Suite: */updates
+UDebComponents:
+VerifyRelease: F1D53D8C4F368D5D
+
+Which of those are used is determined by the Update: line
+in the description in conf/distributions. When Suite:,
+Architecture:, Components: or UDebComponents: are not given,
+those of the distribution to be added are used.
+The suite of the target can be used as "*" in the Suite: here.
+VerifyRelease: tells which GPG key to use checking the Release.gpg.
+
+Add a "IgnoreRelease: yes" to ignore any Release files.
+
+To import components in other components, use the source>target
+syntax.
+
+Method: describes an apt-method, for which the programs
+from /usr/lib/apt/methods are used...
+
+To update everything possible do:
+
+reprepro -b . update
+
+To only update some distributions do:
+
+reprepro -b . update woody
+
+There is no support for updating a distribution from only specific
+ upstreams yet. You will have to edit conf/distributions for that.
+
+The value for VerifyRelease: can be retrieved using:
+
+gpg --with-colons --list-keys <whatever>
+
+===============================================================================
+The following is from V. Stanley Jaddoe <debian@terabytemusic.cjb.net>.
+Make sure to include all sources when allowing everyone access to software
+only available under GPL to you. Well, you should always supply sources,
+but in some cases not doing so might cause you trouble.
+
+Using reprepro with apache2 (sarge, etch, sid)
+
+This example assumes the reprepro repository is under /srv/reprepro/ and that
+apache2 has been correctly installed and configured.
+
+The first step is to create a virtual directory called debian/. Assuming your
+server runs the host http://www.example.com/, the web repository will be
+placed at http://www.example.com/debian/.
+
+Create an apache2 config file in the conf dir of your reprepro repository,
+using the following command:
+
+cat > /srv/reprepro/conf/apache.conf << EOF
+Alias /debian /srv/reprepro/
+<Directory /srv/reprepro>
+ Options +Indexes
+ AllowOverride None
+ order allow,deny
+ allow from all
+</Directory>
+EOF
+
+To enable this virtual directory, a symlink has to be created. This can be done
+using the following command:
+
+ln -s /srv/reprepro/conf/apache.conf /etc/apache2/conf.d/reprepro.conf
+
+The second step is setting the permissions in such a way that web users can
+browse the repository, but cannot view the reprepro specific configuration.
+This can be done using the following commands:
+
+chown -R root:root /srv/reprepro/
+chmod 755 /srv/reprepro/
+chown -R root:www-data /srv/reprepro/dists/ /srv/reprepro/pool/
+chmod 750 /srv/reprepro/*
+
+Reload apache2:
+
+/etc/init.d/apache2 reload
+
+Check if the repository is viewable by web-users, by pointing your browser to
+
+http://www.example.com/debian/
+
+If there are no problems with your reprepro repository and the apache2
+configuration, you should see two directories, dists/ and pool/.
+
+The last step is to add this new repository to your sources.list.
+
+This is as easy as:
+
+echo "deb http://www.example.com/debian pcpool main non-free contrib" >> /etc/apt/sources.list
diff --git a/docs/xz.example b/docs/xz.example
new file mode 100644
index 0000000..2725bb0
--- /dev/null
+++ b/docs/xz.example
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+# Copy this script to your conf/ dir as xz.sh, make it executable
+# and add to some definition in conf/distributions
+# DscIndices: Sources Release . .gz xz.sh
+# DebIndices: Packages Release . .gz xz.sh
+# UDebIndices: Packages . .gz xz.sh
+# and you have .xz'd Packages and Sources.
+# (alternatively, if you are very brave, put the full path to this file in there)
+
+DIROFDIST="$1"
+NEWNAME="$2"
+OLDNAME="$3"
+# this can be old($3 exists), new($2 exists) or change (both):
+STATUS="$4"
+BASENAME="`basename "$OLDNAME"`"
+
+if [ "xPackages" = "x$BASENAME" ] || [ "xSources" = "x$BASENAME" ] ; then
+ if [ "x$STATUS" = "xold" ] ; then
+ if [ -f "$DIROFDIST/$OLDNAME.xz" ] ; then
+ echo "$OLDNAME.xz" >&3
+ else
+ xz -c -- "$DIROFDIST/$OLDNAME" >"$DIROFDIST/$OLDNAME.xz.new" 3>/dev/null
+ echo "$OLDNAME.xz.new" >&3
+ fi
+ else
+ xz -c -- "$DIROFDIST/$NEWNAME" >"$DIROFDIST/$OLDNAME.xz.new" 3>/dev/null
+ echo "$OLDNAME.xz.new" >&3
+ fi
+fi
diff --git a/donefile.c b/donefile.c
new file mode 100644
index 0000000..bea1e62
--- /dev/null
+++ b/donefile.c
@@ -0,0 +1,242 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2008 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include "donefile.h"
+#include "names.h"
+#include "checksums.h"
+#include "remoterepository.h"
+
+/* This stores what an distribution that is updated from remote repositories
+ * has already processed, so that things already processed do not have to be
+ * downloaded or processed again. */
+
+struct markdonefile {
+ char *finalfilename;
+ char *tempfilename;
+ FILE *file;
+};
+
+
+static inline char *donefilename(const char *codename) {
+ return genlistsfilename("lastseen", 2, "", codename, NULL);
+}
+
+retvalue markdone_create(const char *codename, struct markdonefile **done_p) {
+ struct markdonefile *done;
+
+ done = NEW(struct markdonefile);
+ if (FAILEDTOALLOC(done))
+ return RET_ERROR_OOM;
+ done->finalfilename = donefilename(codename);
+ if (FAILEDTOALLOC(done->finalfilename)) {
+ free(done);
+ return RET_ERROR_OOM;
+ }
+ done->tempfilename = calc_addsuffix(done->finalfilename, "new");
+ if (FAILEDTOALLOC(done->tempfilename)) {
+ free(done->finalfilename);
+ free(done);
+ return RET_ERROR_OOM;
+ }
+ done->file = fopen(done->tempfilename, "w+");
+ if (done->file == NULL) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating '%s': %s\n",
+ e, done->tempfilename, strerror(e));
+ free(done->finalfilename);
+ free(done->tempfilename);
+ free(done);
+ return RET_ERROR;
+ }
+ fprintf(done->file, "Updates already processed for %s:\n", codename);
+ *done_p = done;
+ return RET_OK;
+}
+
+void markdone_finish(struct markdonefile *done) {
+ bool error = false;
+
+ if (done == NULL)
+ return;
+ if (done->file == NULL)
+ error = true;
+ else {
+ if (ferror(done->file) != 0) {
+ fprintf(stderr, "An error occurred writing to '%s'!\n",
+ done->tempfilename);
+ (void)fclose(done->file);
+ error = true;
+ } else if (fclose(done->file) != 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d occurred writing to '%s': %s!\n",
+ e, done->tempfilename, strerror(e));
+ error = true;
+ }
+ done->file = NULL;
+ }
+ if (error)
+ (void)unlink(done->tempfilename);
+ else {
+ int i;
+
+ i = rename(done->tempfilename, done->finalfilename);
+ if (i != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d moving '%s' to '%s': %s!\n",
+ e, done->tempfilename,
+ done->finalfilename, strerror(e));
+ }
+ }
+ free(done->finalfilename);
+ free(done->tempfilename);
+ free(done);
+}
+
+void markdone_target(struct markdonefile *done, const char *identifier) {
+ fprintf(done->file, "Target %s\n", identifier);
+}
+
+void markdone_index(struct markdonefile *done, const char *file, const struct checksums *checksums) {
+ retvalue r;
+ size_t s;
+ const char *data;
+
+ r = checksums_getcombined(checksums, &data, &s);
+ if (!RET_IS_OK(r))
+ return;
+ fprintf(done->file, "Index %s %s\n", file, data);
+}
+
+void markdone_cleaner(struct markdonefile *done) {
+ fprintf(done->file, "Delete\n");
+}
+
+/* the same for reading */
+
+struct donefile {
+ char *filename;
+ char *linebuffer;
+ size_t linebuffer_size;
+ FILE *file;
+};
+
+retvalue donefile_open(const char *codename, struct donefile **done_p) {
+ struct donefile *done;
+ ssize_t s;
+
+ done = zNEW(struct donefile);
+ if (FAILEDTOALLOC(done))
+ return RET_ERROR_OOM;
+
+ done->filename = donefilename(codename);
+ if (FAILEDTOALLOC(done->filename)) {
+ free(done);
+ return RET_ERROR_OOM;
+ }
+
+ done->file = fopen(done->filename, "r");
+ if (done->file == NULL) {
+ donefile_close(done);
+ return RET_NOTHING;
+ }
+ s = getline(&done->linebuffer, &done->linebuffer_size, done->file);
+ if (s <= 0 || done->linebuffer[s-1] != '\n') {
+ /* if it cannot be read or is empty or not a text file,
+ * delete it, and do as if it never existed... */
+ unlink(done->filename);
+ donefile_close(done);
+ return RET_NOTHING;
+ }
+ done->linebuffer[s-1] = '\0';
+ // TODO: check the first line?
+ *done_p = done;
+ return RET_OK;
+}
+
+void donefile_close(struct donefile *done) {
+ if (done == NULL)
+ return;
+ // TODO: check return, only print a warning, though,
+ // no need to interrupt anything.
+ if (done->file != NULL)
+ fclose(done->file);
+ free(done->linebuffer);
+ free(done->filename);
+ free(done);
+}
+
+retvalue donefile_nexttarget(struct donefile *done, const char **identifier_p) {
+ ssize_t s;
+
+ while (strncmp(done->linebuffer, "Target ", 7) != 0) {
+ s = getline(&done->linebuffer, &done->linebuffer_size,
+ done->file);
+ if (s <= 0 || done->linebuffer[s-1] != '\n')
+ /* Malformed line, ignore the rest... */
+ return RET_NOTHING;
+ done->linebuffer[s-1] = '\0';
+ }
+ /* do not process a second time */
+ done->linebuffer[0] = '\0';
+ /* and return the identifier part */
+ *identifier_p = done->linebuffer + 7;
+ return RET_OK;
+}
+
+bool donefile_nextindex(struct donefile *done, const char **filename_p, struct checksums **checksums_p) {
+ char *p;
+ ssize_t s;
+ retvalue r;
+
+ s = getline(&done->linebuffer, &done->linebuffer_size, done->file);
+ if (s <= 0 || done->linebuffer[s-1] != '\n') {
+ done->linebuffer[0] = '\0';
+ return false;
+ }
+ done->linebuffer[s-1] = '\0';
+ if (strncmp(done->linebuffer, "Index ", 6) != 0)
+ return false;
+ p = done->linebuffer + 6;
+ *filename_p = p;
+ p = strchr(p, ' ');
+ if (p == NULL)
+ return false;
+ *(p++) = '\0';
+ r = checksums_parse(checksums_p, p);
+ return RET_IS_OK(r);
+}
+
+bool donefile_iscleaner(struct donefile *done) {
+ ssize_t s;
+
+ s = getline(&done->linebuffer, &done->linebuffer_size, done->file);
+ if (s <= 0 || done->linebuffer[s-1] != '\n') {
+ done->linebuffer[0] = '\0';
+ return false;
+ }
+ done->linebuffer[s-1] = '\0';
+ return strcmp(done->linebuffer, "Delete") == 0;
+}
diff --git a/donefile.h b/donefile.h
new file mode 100644
index 0000000..e5c6783
--- /dev/null
+++ b/donefile.h
@@ -0,0 +1,24 @@
+#ifndef REPREPRO_DONEFILE_H
+#define REPREPRO_DONEFILE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#endif
+
+struct checksums;
+
+struct markdonefile;
+retvalue markdone_create(const char *, /*@out@*/struct markdonefile **);
+void markdone_finish(/*@only@*/struct markdonefile *);
+void markdone_target(struct markdonefile *, const char *);
+void markdone_index(struct markdonefile *, const char *, const struct checksums *);
+void markdone_cleaner(struct markdonefile *);
+
+struct donefile;
+retvalue donefile_open(const char *, /*@out@*/struct donefile **);
+void donefile_close(/*@only@*/struct donefile *);
+retvalue donefile_nexttarget(struct donefile *, /*@out@*/const char **);
+bool donefile_nextindex(struct donefile *, /*@out@*/const char **, /*@out@*/struct checksums **);
+bool donefile_iscleaner(struct donefile *);
+
+#endif
diff --git a/downloadcache.c b/downloadcache.c
new file mode 100644
index 0000000..a51f4e5
--- /dev/null
+++ b/downloadcache.c
@@ -0,0 +1,315 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2004,2005,2007,2009 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include "error.h"
+#include "strlist.h"
+#include "names.h"
+#include "dirs.h"
+#include "files.h"
+#include "freespace.h"
+#include "downloadcache.h"
+
+
+struct downloaditem {
+ /*@dependent@*//*@null@*/struct downloaditem *parent;
+ /*@null@*/struct downloaditem *left, *right;
+ char *filekey;
+ struct checksums *checksums;
+ bool done;
+};
+
+/* Initialize a new download session */
+retvalue downloadcache_initialize(enum spacecheckmode mode, off_t reserveddb, off_t reservedother, struct downloadcache **download) {
+ struct downloadcache *cache;
+ retvalue r;
+
+ cache = zNEW(struct downloadcache);
+ if (FAILEDTOALLOC(cache))
+ return RET_ERROR_OOM;
+ r = space_prepare(&cache->devices, mode, reserveddb, reservedother);
+ if (RET_WAS_ERROR(r)) {
+ free(cache);
+ return r;
+ }
+ *download = cache;
+ return RET_OK;
+}
+
+/* free all memory */
+static void freeitem(/*@null@*//*@only@*/struct downloaditem *item) {
+ if (item == NULL)
+ return;
+ freeitem(item->left);
+ freeitem(item->right);
+ free(item->filekey);
+ checksums_free(item->checksums);
+ free(item);
+}
+
+retvalue downloadcache_free(struct downloadcache *download) {
+ if (download == NULL)
+ return RET_NOTHING;
+
+ freeitem(download->items);
+ space_free(download->devices);
+ free(download);
+ return RET_OK;
+}
+
+static retvalue downloaditem_callback(enum queue_action action, void *privdata, void *privdata2, const char *uri, const char *gotfilename, const char *wantedfilename, /*@null@*/const struct checksums *checksums, const char *method) {
+ struct downloaditem *d = privdata;
+ struct downloadcache *cache = privdata2;
+ struct checksums *read_checksums = NULL;
+ retvalue r;
+ bool improves;
+
+ if (action != qa_got)
+ // TODO: instead store in downloaditem?
+ return RET_ERROR;
+
+ /* if the file is somewhere else, copy it: */
+ if (strcmp(gotfilename, wantedfilename) != 0) {
+ if (verbose > 1)
+ fprintf(stderr,
+"Linking file '%s' to '%s'...\n", gotfilename, wantedfilename);
+ r = checksums_linkorcopyfile(wantedfilename, gotfilename,
+ &read_checksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Cannot open '%s', obtained from '%s' method.\n",
+ gotfilename, method);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r)) {
+ // TODO: instead store in downloaditem?
+ return r;
+ }
+ if (read_checksums != NULL)
+ checksums = read_checksums;
+ }
+
+ if (checksums == NULL || !checksums_iscomplete(checksums)) {
+ assert(read_checksums == NULL);
+ r = checksums_read(wantedfilename, &read_checksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Cannot open '%s', though '%s' method claims to have put it there!\n",
+ wantedfilename, method);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r)) {
+ // TODO: instead store in downloaditem?
+ return r;
+ }
+ checksums = read_checksums;
+ }
+ assert (checksums != NULL);
+
+ if (!checksums_check(d->checksums, checksums, &improves)) {
+ fprintf(stderr, "Wrong checksum during receive of '%s':\n",
+ uri);
+ checksums_printdifferences(stderr, d->checksums, checksums);
+ checksums_free(read_checksums);
+ (void)unlink(wantedfilename);
+ // TODO: instead store in downloaditem?
+ return RET_ERROR_WRONG_MD5;
+ }
+ if (improves) {
+ r = checksums_combine(&d->checksums, checksums, NULL);
+ checksums_free(read_checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else
+ checksums_free(read_checksums);
+
+ if (global.showdownloadpercent > 0) {
+ unsigned int percent;
+
+ cache->size_done += checksums_getfilesize(d->checksums);
+
+ percent = (100 * cache->size_done) / cache->size_todo;
+ if (global.showdownloadpercent > 1
+ || percent > cache->last_percent) {
+ unsigned long long all = cache->size_done;
+ int kb, mb, gb, tb, b, groups = 0;
+
+ cache->last_percent = percent;
+
+ printf("Got %u%%: ", percent);
+ b = all & 1023;
+ all = all >> 10;
+ kb = all & 1023;
+ all = all >> 10;
+ mb = all & 1023;
+ all = all >> 10;
+ gb = all & 1023;
+ all = all >> 10;
+ tb = all;
+ if (tb != 0) {
+ printf("%dT ", tb);
+ groups++;
+ }
+ if (groups < 2 && (groups > 0 || gb != 0)) {
+ printf("%dG ", gb);
+ groups++;
+ }
+ if (groups < 2 && (groups > 0 || mb != 0)) {
+ printf("%dM ", mb);
+ groups++;
+ }
+ if (groups < 2 && (groups > 0 || kb != 0)) {
+ printf("%dK ", kb);
+ groups++;
+ }
+ if (groups < 2 && (groups > 0 || b != 0))
+ printf("%d ", b);
+ puts("bytes");
+ }
+ }
+ r = files_add_checksums(d->filekey, d->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ d->done = true;
+ return RET_OK;
+}
+
+/*@null@*//*@dependent@*/ static struct downloaditem *searchforitem(struct downloadcache *list,
+ const char *filekey,
+ /*@out@*/struct downloaditem **p,
+ /*@out@*/struct downloaditem ***h) {
+ struct downloaditem *item;
+ int c;
+
+ *h = &list->items;
+ *p = NULL;
+ item = list->items;
+ while (item != NULL) {
+ *p = item;
+ c = strcmp(filekey, item->filekey);
+ if (c == 0)
+ return item;
+ else if (c < 0) {
+ *h = &item->left;
+ item = item->left;
+ } else {
+ *h = &item->right;
+ item = item->right;
+ }
+ }
+ return NULL;
+}
+
+/* queue a new file to be downloaded:
+ * results in RET_ERROR_WRONG_MD5, if someone else already asked
+ * for the same destination with other md5sum created. */
+retvalue downloadcache_add(struct downloadcache *cache, struct aptmethod *method, const char *orig, const char *filekey, const struct checksums *checksums) {
+
+ struct downloaditem *i;
+ struct downloaditem *item, **h, *parent;
+ char *fullfilename;
+ retvalue r;
+
+ assert (cache != NULL && method != NULL);
+ r = files_expect(filekey, checksums, false);
+ if (r != RET_NOTHING)
+ return r;
+
+ i = searchforitem(cache, filekey, &parent, &h);
+ if (i != NULL) {
+ bool improves;
+
+ assert (i->filekey != NULL);
+ if (!checksums_check(i->checksums, checksums, &improves)) {
+ fprintf(stderr,
+"ERROR: Same file is requested with conflicting checksums:\n");
+ checksums_printdifferences(stderr,
+ i->checksums, checksums);
+ return RET_ERROR_WRONG_MD5;
+ }
+ if (improves) {
+ r = checksums_combine(&i->checksums,
+ checksums, NULL);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_NOTHING;
+ }
+ item = zNEW(struct downloaditem);
+ if (FAILEDTOALLOC(item))
+ return RET_ERROR_OOM;
+
+ item->done = false;
+ item->filekey = strdup(filekey);
+ item->checksums = checksums_dup(checksums);
+ if (FAILEDTOALLOC(item->filekey) || FAILEDTOALLOC(item->checksums)) {
+ freeitem(item);
+ return RET_ERROR_OOM;
+ }
+
+ fullfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(fullfilename)) {
+ freeitem(item);
+ return RET_ERROR_OOM;
+ }
+ (void)dirs_make_parent(fullfilename);
+ r = space_needed(cache->devices, fullfilename, checksums);
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ freeitem(item);
+ return r;
+ }
+ r = aptmethod_enqueue(method, orig, fullfilename,
+ downloaditem_callback, item, cache);
+ if (RET_WAS_ERROR(r)) {
+ freeitem(item);
+ return r;
+ }
+ item->left = item->right = NULL;
+
+ item->parent = parent;
+ *h = item;
+
+ cache->size_todo += checksums_getfilesize(item->checksums);
+
+ return RET_OK;
+}
+
+/* some as above, only for more files... */
+retvalue downloadcache_addfiles(struct downloadcache *cache, struct aptmethod *method, const struct checksumsarray *origfiles, const struct strlist *filekeys) {
+ retvalue result, r;
+ int i;
+
+ assert (origfiles != NULL && filekeys != NULL
+ && origfiles->names.count == filekeys->count);
+
+ result = RET_NOTHING;
+
+ for (i = 0 ; i < filekeys->count ; i++) {
+ r = downloadcache_add(cache, method,
+ origfiles->names.values[i],
+ filekeys->values[i],
+ origfiles->checksums[i]);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
diff --git a/downloadcache.h b/downloadcache.h
new file mode 100644
index 0000000..60558d7
--- /dev/null
+++ b/downloadcache.h
@@ -0,0 +1,49 @@
+#ifndef REPREPRO_DOWNLOADLIST_H
+#define REPREPRO_DOWNLOADLIST_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_APTMETHOD_H
+#include "aptmethod.h"
+#endif
+#ifndef REPREPRO_CHECKSUMS_H
+#include "checksums.h"
+#endif
+#ifndef REPREPRO_FREESPACE_H
+#include "freespace.h"
+#endif
+
+struct downloaditem;
+
+struct downloadcache {
+ /*@null@*/struct downloaditem *items;
+ /*@null@*/struct devices *devices;
+
+ /* for showing what percentage was downloaded */
+ long long size_todo, size_done;
+ unsigned int last_percent;
+};
+
+/* Initialize a new download session */
+retvalue downloadcache_initialize(enum spacecheckmode, off_t /*reserveddb*/, off_t /*reservedother*/, /*@out@*/struct downloadcache **);
+
+/* free all memory */
+retvalue downloadcache_free(/*@null@*//*@only@*/struct downloadcache *);
+
+/* queue a new file to be downloaded:
+ * results in RET_ERROR_WRONG_MD5, if someone else already asked
+ * for the same destination with other md5sum created. */
+retvalue downloadcache_add(struct downloadcache *, struct aptmethod *, const char * /*orig*/, const char * /*filekey*/, const struct checksums *);
+
+/* some as above, only for more files... */
+retvalue downloadcache_addfiles(struct downloadcache *, struct aptmethod *, const struct checksumsarray * /*origfiles*/, const struct strlist * /*filekeys*/);
+#endif
diff --git a/dpkgversions.c b/dpkgversions.c
new file mode 100644
index 0000000..c51eb6a
--- /dev/null
+++ b/dpkgversions.c
@@ -0,0 +1,150 @@
+/*
+ * Most contents of this file are taken from:
+ * libdpkg - Debian packaging suite library routines
+ * from the files
+ * parsehelp.c - helpful routines for parsing and writing
+ * and
+ * vercmp.c - comparison of version numbers
+ *
+ * Copyright (C) 1995 Ian Jackson <ian@chiark.greenend.org.uk>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with dpkg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <ctype.h>
+#include "error.h"
+#include "dpkgversions.h"
+
+#define _(a) a
+#define cisalpha(a) (isalpha(a)!=0)
+#define cisdigit(a) (isdigit(a)!=0)
+
+/* from dpkg-db.h.in: */
+
+struct versionrevision {
+ unsigned long epoch;
+ const char *version;
+ const char *revision;
+};
+
+/* from parsehelp.c */
+
+static
+const char *parseversion(struct versionrevision *rversion, const char *string) {
+ char *hyphen, *colon, *eepochcolon;
+ const char *end, *ptr;
+ unsigned long epoch;
+
+ if (!*string) return _("version string is empty");
+
+ /* trim leading and trailing space */
+ while (*string && (*string == ' ' || *string == '\t')) string++;
+ /* string now points to the first non-whitespace char */
+ end = string;
+ /* find either the end of the string, or a whitespace char */
+ while (*end && *end != ' ' && *end != '\t') end++;
+ /* check for extra chars after trailing space */
+ ptr = end;
+ while (*ptr && (*ptr == ' ' || *ptr == '\t')) ptr++;
+ if (*ptr) return _("version string has embedded spaces");
+
+ colon= strchr(string, ':');
+ if (colon) {
+ epoch= strtoul(string, &eepochcolon, 10);
+ if (colon != eepochcolon) return _("epoch in version is not number");
+ if (!*++colon) return _("nothing after colon in version number");
+ string= colon;
+ rversion->epoch= epoch;
+ } else {
+ rversion->epoch= 0;
+ }
+ rversion->version= strndup(string, end - string);
+ hyphen= strrchr(rversion->version,'-');
+ if (hyphen) *hyphen++= 0;
+ rversion->revision= hyphen ? hyphen : "";
+
+ return NULL;
+}
+
+/* from vercmp.c */
+
+/* assume ascii; warning: evaluates x multiple times! */
+#define order(x) ((x) == '~' ? -1 \
+ : cisdigit((x)) ? 0 \
+ : !(x) ? 0 \
+ : cisalpha((x)) ? (x) \
+ : (x) + 256)
+
+static int verrevcmp(const char *val, const char *ref) {
+ if (!val) val= "";
+ if (!ref) ref= "";
+
+ while (*val || *ref) {
+ int first_diff= 0;
+
+ while ((*val && !cisdigit(*val)) || (*ref && !cisdigit(*ref))) {
+ int vc= order(*val), rc= order(*ref);
+ if (vc != rc) return vc - rc;
+ val++; ref++;
+ }
+
+ while (*val == '0') val++;
+ while (*ref == '0') ref++;
+ while (cisdigit(*val) && cisdigit(*ref)) {
+ if (!first_diff) first_diff= *val - *ref;
+ val++; ref++;
+ }
+ if (cisdigit(*val)) return 1;
+ if (cisdigit(*ref)) return -1;
+ if (first_diff) return first_diff;
+ }
+ return 0;
+}
+
+static
+int versioncompare(const struct versionrevision *version,
+ const struct versionrevision *refversion) {
+ int r;
+
+ if (version->epoch > refversion->epoch) return 1;
+ if (version->epoch < refversion->epoch) return -1;
+ r= verrevcmp(version->version,refversion->version); if (r) return r;
+ return verrevcmp(version->revision,refversion->revision);
+}
+
+/* now own code */
+
+retvalue dpkgversions_cmp(const char *first,const char *second,int *result) {
+ struct versionrevision v1,v2;
+ const char *m;
+
+ if ((m = parseversion(&v1,first)) != NULL) {
+ fprintf(stderr,"Error while parsing '%s' as version: %s\n",first,m);
+ return RET_ERROR;
+ }
+ if ((m = parseversion(&v2,second)) != NULL) {
+ fprintf(stderr,"Error while parsing '%s' as version: %s\n",second,m);
+ return RET_ERROR;
+ }
+ *result = versioncompare(&v1,&v2);
+ free((char*)v1.version);
+ free((char*)v2.version);
+ return RET_OK;
+}
diff --git a/dpkgversions.h b/dpkgversions.h
new file mode 100644
index 0000000..b7e04bc
--- /dev/null
+++ b/dpkgversions.h
@@ -0,0 +1,13 @@
+#ifndef REPREPRO_DPKGVERSIONS_H
+#define REPREPRO_DPKGVERSIONS_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning wth?
+#endif
+
+/* return error if those are not proper versions,
+ * otherwise RET_OK and result is <0, ==0 or >0, if first is smaller, equal or larger */
+retvalue dpkgversions_cmp(const char *, const char *, /*@out@*/int *);
+
+#endif
diff --git a/error.h b/error.h
new file mode 100644
index 0000000..715b1a8
--- /dev/null
+++ b/error.h
@@ -0,0 +1,56 @@
+#ifndef REPREPRO_ERROR_H
+#define REPREPRO_ERROR_H
+
+#ifndef REPREPRO_GLOBALS_H
+#include "globals.h"
+#endif
+
+bool interrupted(void);
+
+/* retvalue is simply an int.
+ * just named to show it follows the given semantics */
+/*@numabstract@*/ enum retvalue_enum {
+ DB_MALFORMED_KEY = -30001,
+ RET_ERROR_INCOMING_DENY = -13,
+ RET_ERROR_INTERNAL = -12,
+ RET_ERROR_BZ2 = -11,
+ RET_ERROR_Z = -10,
+ RET_ERROR_INTERRUPTED = -9,
+ RET_ERROR_UNKNOWNFIELD = -8,
+ RET_ERROR_MISSING = -7,
+ RET_ERROR_BADSIG = -6,
+ RET_ERROR_GPGME = -5,
+ RET_ERROR_EXIST = -4,
+ RET_ERROR_OOM = -3,
+ RET_ERROR_WRONG_MD5 = -2,
+ RET_ERROR = -1,
+ RET_NOTHING = 0,
+ RET_OK = 1
+};
+typedef enum retvalue_enum retvalue;
+
+#define FAILEDTOALLOC(x) unlikely(x == NULL)
+
+#define RET_IS_OK(r) likely((r) == RET_OK)
+#define RET_WAS_NO_ERROR(r) likely((r) >= (retvalue)0)
+#define RET_WAS_ERROR(r) unlikely((r) < (retvalue)0)
+
+/* update a return value, so that it contains the first error-code
+ * and otherwise is RET_OK, if anything was RET_OK */
+#define RET_UPDATE(ret, update) { if ((update)!=RET_NOTHING && RET_WAS_NO_ERROR(ret)) ret=update;}
+
+/* like RET_UPDATE, but RET_ENDUPDATE(RET_NOTHING, RET_OK) keeps RET_NOTHING */
+#define RET_ENDUPDATE(ret, update) {if (RET_WAS_ERROR(update) && RET_WAS_NO_ERROR(ret)) ret=update;}
+
+/* code a errno in a error */
+#define RET_ERRNO(err) ((err>0)?((retvalue)-err):RET_ERROR)
+
+/* code a db-error in a error */
+// TODO: to be implemented...
+#define RET_DBERR(e) RET_ERROR
+
+#define ASSERT_NOT_NOTHING(r) {assert (r != RET_NOTHING); if (r == RET_NOTHING) r = RET_ERROR_INTERNAL;}
+
+#define EXIT_RET(ret) (RET_WAS_NO_ERROR(ret)?((nothingiserror&&ret==RET_NOTHING)?EXIT_FAILURE:EXIT_SUCCESS):(int)ret)
+
+#endif
diff --git a/exports.c b/exports.c
new file mode 100644
index 0000000..aef8a85
--- /dev/null
+++ b/exports.c
@@ -0,0 +1,548 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2005,2007,2008,2009,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <string.h>
+#include <ctype.h>
+
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "dirs.h"
+#include "database.h"
+#include "target.h"
+#include "exports.h"
+#include "configparser.h"
+#include "filecntl.h"
+#include "hooks.h"
+#include "package.h"
+
+static const char *exportdescription(const struct exportmode *mode, char *buffer, size_t buffersize) {
+ char *result = buffer;
+ enum indexcompression ic;
+ static const char* compression_names[ic_count] = {
+ "uncompressed"
+ ,"gzipped"
+#ifdef HAVE_LIBBZ2
+ ,"bzip2ed"
+#endif
+#ifdef HAVE_LIBLZMA
+ ,"xzed"
+#endif
+ };
+ bool needcomma = false,
+ needellipsis = false;
+
+ assert (buffersize > 50);
+ *buffer++ = ' '; buffersize--;
+ *buffer++ = '('; buffersize--;
+ for (ic = ic_first ; ic < ic_count ; ic++) {
+ if ((mode->compressions & IC_FLAG(ic)) != 0) {
+ size_t l = strlen(compression_names[ic]);
+ assert (buffersize > l+3);
+ if (needcomma) {
+ *buffer++ = ','; buffersize--;
+ }
+ memcpy(buffer, compression_names[ic], l);
+ buffer += l; buffersize -= l;
+ needcomma = true;
+ }
+ }
+ /* should be long enough for the previous things in all cases */
+ assert (buffersize > 10);
+ if (mode->hooks.count > 0) {
+ int i;
+
+ if (needcomma) {
+ *buffer++ = ','; buffersize--;
+ }
+ strcpy(buffer, "script: ");
+ buffer += 8; buffersize -= 8;
+ needcomma = false;
+
+ for (i = 0 ; i < mode->hooks.count ; i++) {
+ const char *hook = dirs_basename(mode->hooks.values[i]);
+ size_t l = strlen(hook);
+
+ if (buffersize < 6) {
+ needellipsis = true;
+ break;
+ }
+ if (needcomma) {
+ *buffer++ = ','; buffersize--;
+ }
+
+ if (l > buffersize - 5) {
+ memcpy(buffer, hook, buffersize-5);
+ buffer += (buffersize-5);
+ buffersize -= (buffersize-5);
+ needellipsis = true;
+ break;
+ } else {
+ memcpy(buffer, hook, l);
+ buffer += l; buffersize -= l;
+ assert (buffersize >= 2);
+ }
+ needcomma = true;
+ }
+ }
+ if (needellipsis) {
+ /* moveing backward here is easier than checking above */
+ if (buffersize < 5) {
+ buffer -= (5 - buffersize);
+ buffersize = 5;
+ }
+ *buffer++ = '.'; buffersize--;
+ *buffer++ = '.'; buffersize--;
+ *buffer++ = '.'; buffersize--;
+ }
+ assert (buffersize >= 2);
+ *buffer++ = ')'; buffersize--;
+ *buffer = '\0';
+ return result;
+}
+
+retvalue exportmode_init(/*@out@*/struct exportmode *mode, bool uncompressed, /*@null@*/const char *release, const char *indexfile) {
+ strlist_init(&mode->hooks);
+ mode->compressions = IC_FLAG(ic_gzip) | (uncompressed
+ ? IC_FLAG(ic_uncompressed) : 0);
+ mode->filename = strdup(indexfile);
+ if (FAILEDTOALLOC(mode->filename))
+ return RET_ERROR_OOM;
+ if (release == NULL)
+ mode->release = NULL;
+ else {
+ mode->release = strdup(release);
+ if (FAILEDTOALLOC(mode->release))
+ return RET_ERROR_OOM;
+ }
+ return RET_OK;
+}
+
+// TODO: check for scripts in confdir early...
+retvalue exportmode_set(struct exportmode *mode, struct configiterator *iter) {
+ retvalue r;
+ char *word;
+
+ r = config_getword(iter, &word);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: Unexpected end of field!\n"
+"Filename to use for index files (Packages, Sources, ...) missing.\n",
+ config_filename(iter),
+ config_markerline(iter), config_markercolumn(iter));
+ return RET_ERROR_MISSING;
+ }
+ assert (word[0] != '\0');
+
+ if (word[0] == '.') {
+ free(word);
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: filename for index files expected!\n",
+ config_filename(iter),
+ config_markerline(iter), config_markercolumn(iter));
+ return RET_ERROR;
+ }
+
+ free(mode->release);
+ mode->release = NULL;
+ free(mode->filename);
+ mode->filename = word;
+
+ r = config_getword(iter, &word);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING)
+ word = NULL;
+ if (r != RET_NOTHING && word[0] != '.') {
+ assert (word[0] != '\0');
+ mode->release = word;
+ r = config_getword(iter, &word);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: Unexpected end of field!\n"
+"Compression identifiers ('.', '.gz' or '.bz2') missing.\n",
+ config_filename(iter),
+ config_markerline(iter), config_markercolumn(iter));
+ return RET_ERROR;
+ }
+ if (word[0] != '.') {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u:\n"
+"Compression extension ('.', '.gz' or '.bz2') expected.\n",
+ config_filename(iter),
+ config_markerline(iter), config_markercolumn(iter));
+ free(word);
+ return RET_ERROR;
+ }
+ mode->compressions = 0;
+ while (r != RET_NOTHING && word[0] == '.') {
+ if (word[1] == '\0')
+ mode->compressions |= IC_FLAG(ic_uncompressed);
+ else if (word[1] == 'g' && word[2] == 'z' &&
+ word[3] == '\0')
+ mode->compressions |= IC_FLAG(ic_gzip);
+#ifdef HAVE_LIBBZ2
+ else if (word[1] == 'b' && word[2] == 'z' && word[3] == '2' &&
+ word[4] == '\0')
+ mode->compressions |= IC_FLAG(ic_bzip2);
+#endif
+#ifdef HAVE_LIBLZMA
+ else if (word[1] == 'x' && word[2] == 'z' &&word[3] == '\0')
+ mode->compressions |= IC_FLAG(ic_xz);
+#endif
+ else {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u:\n"
+"Unsupported compression extension '%s'!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ word);
+ free(word);
+ return RET_ERROR;
+ }
+ free(word);
+ r = config_getword(iter, &word);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ while (r != RET_NOTHING) {
+ if (word[0] == '.') {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u:\n"
+"Scripts starting with dot are forbidden to avoid ambiguity ('%s')!\n"
+"Try to put all compressions first and then all scripts to avoid this.\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ word);
+ free(word);
+ return RET_ERROR;
+ } else {
+ char *fullfilename = configfile_expandname(word, word);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+ r = strlist_add(&mode->hooks, fullfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ r = config_getword(iter, &word);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static retvalue gotfilename(const char *relname, size_t l, struct release *release) {
+
+ if (l > 12 && memcmp(relname+l-12, ".tobedeleted", 12) == 0) {
+ char *filename;
+
+ filename = strndup(relname, l - 12);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+ return release_adddel(release, filename);
+
+ } else if (l > 4 && memcmp(relname+(l-4), ".new", 4) == 0) {
+ char *filename, *tmpfilename;
+
+ filename = strndup(relname, l - 4);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+ tmpfilename = strndup(relname, l);
+ if (FAILEDTOALLOC(tmpfilename)) {
+ free(filename);
+ return RET_ERROR_OOM;
+ }
+ return release_addnew(release, tmpfilename, filename);
+ } else if (l > 5 && memcmp(relname + (l-5), ".new.", 5) == 0) {
+ char *filename, *tmpfilename;
+
+ filename = strndup(relname, l-5);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+ tmpfilename = strndup(relname, l-1);
+ if (FAILEDTOALLOC(tmpfilename)) {
+ free(filename);
+ return RET_ERROR_OOM;
+ }
+ return release_addsilentnew(release, tmpfilename, filename);
+ } else if (l > 5 && memcmp(relname + (l-5), ".keep", 5) == 0) {
+ return RET_OK;
+ } else {
+ char *filename;
+
+ filename = strndup(relname, l);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+ return release_addold(release, filename);
+ }
+}
+
+static retvalue callexporthook(/*@null@*/const char *hook, const char *relfilename, const char *mode, struct release *release) {
+ pid_t f, c;
+ int status;
+ int io[2];
+ char buffer[1000];
+ int already = 0;
+
+ if (hook == NULL)
+ return RET_NOTHING;
+
+ status = pipe(io);
+ if (status < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating pipe: %s!\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+
+ f = fork();
+ if (f < 0) {
+ int e = errno;
+ (void)close(io[0]);
+ (void)close(io[1]);
+ fprintf(stderr, "Error %d while forking for exporthook: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (f == 0) {
+ char *reltmpfilename;
+ int e;
+
+ if (dup2(io[1], 3) < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d dup2'ing fd %d to 3: %s\n",
+ e, io[1], strerror(e));
+ exit(255);
+ }
+ /* "Doppelt haelt besser": */
+ if (io[0] != 3)
+ (void)close(io[0]);
+ if (io[1] != 3)
+ (void)close(io[1]);
+ closefrom(4);
+ /* backward compatibility */
+ reltmpfilename = calc_addsuffix(relfilename, "new");
+ if (reltmpfilename == NULL) {
+ exit(255);
+ }
+ sethookenvironment(causingfile, NULL, NULL, NULL);
+ (void)execl(hook, hook, release_dirofdist(release),
+ reltmpfilename, relfilename, mode,
+ ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "Error %d while executing '%s': %s\n",
+ e, hook, strerror(e));
+ exit(255);
+ }
+ close(io[1]);
+ markcloseonexec(io[0]);
+
+ if (verbose > 6)
+ printf("Called %s '%s' '%s.new' '%s' '%s'\n",
+ hook, release_dirofdist(release),
+ relfilename, relfilename, mode);
+ /* read what comes from the client */
+ while (true) {
+ ssize_t r;
+ int last, j;
+
+ r = read(io[0], buffer + already, 999 - already);
+ if (r < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d reading from exporthook: %s!\n",
+ e, strerror(e));
+ break;
+ }
+
+ already += r;
+ if (r == 0) {
+ buffer[already] = '\0';
+ already++;
+ }
+ last = 0;
+ for (j = 0 ; j < already ; j++) {
+ if (buffer[j] == '\n' || buffer[j] == '\0') {
+ int next = j+1;
+ int e = (j>0)?(j-1):j;
+ retvalue ret;
+
+ while (last < j && xisspace(buffer[last]))
+ last++;
+ if (last >= j) {
+ last = next;
+ continue;
+ }
+ while (xisspace(buffer[e])) {
+ e--;
+ assert (e >= last);
+ }
+
+ ret = gotfilename(buffer + last, e - last + 1,
+ release);
+ if (RET_WAS_ERROR(ret)) {
+ (void)close(io[0]);
+ return ret;
+ }
+ last = next;
+ }
+ }
+ if (last > 0) {
+ if (already > last)
+ memmove(buffer, buffer + last, already - last);
+ already -= last;
+ }
+ if (r == 0)
+ break;
+ }
+ (void)close(io[0]);
+ do {
+ c = waitpid(f, &status, WUNTRACED);
+ if (c < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d while waiting for hook '%s' to finish: %s\n", e, hook, strerror(e));
+ return RET_ERRNO(e);
+ }
+ } while (c != f);
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) == 0) {
+ if (verbose > 6)
+ printf("Exporthook successfully returned!\n");
+ return RET_OK;
+ } else {
+ fprintf(stderr,
+"Exporthook failed with exitcode %d!\n",
+ (int)WEXITSTATUS(status));
+ return RET_ERROR;
+ }
+ } else if (WIFSIGNALED(status)) {
+ fprintf(stderr, "Exporthook killed by signal %d!\n",
+ (int)(WTERMSIG(status)));
+ return RET_ERROR;
+ } else {
+ fprintf(stderr,
+"Exporthook terminated abnormally. (status is %x)!\n",
+ status);
+ return RET_ERROR;
+ }
+}
+
+retvalue export_target(const char *relativedir, struct target *target, const struct exportmode *exportmode, struct release *release, bool onlyifmissing, bool snapshot) {
+ retvalue r;
+ struct filetorelease *file;
+ const char *status;
+ char *relfilename;
+ char buffer[100];
+ struct package_cursor iterator;
+
+ relfilename = calc_dirconcat(relativedir, exportmode->filename);
+ if (FAILEDTOALLOC(relfilename))
+ return RET_ERROR_OOM;
+
+ r = release_startfile(release, relfilename, exportmode->compressions,
+ onlyifmissing, &file);
+ if (RET_WAS_ERROR(r)) {
+ free(relfilename);
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ if (release_oldexists(file)) {
+ if (verbose > 5)
+ printf(" replacing '%s/%s'%s\n",
+ release_dirofdist(release), relfilename,
+ exportdescription(exportmode, buffer, 100));
+ status = "change";
+ } else {
+ if (verbose > 5)
+ printf(" creating '%s/%s'%s\n",
+ release_dirofdist(release), relfilename,
+ exportdescription(exportmode, buffer, 100));
+ status = "new";
+ }
+ r = package_openiterator(target, READONLY, true, &iterator);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ free(relfilename);
+ return r;
+ }
+ while (package_next(&iterator)) {
+ if (iterator.current.controllen == 0)
+ continue;
+ (void)release_writedata(file, iterator.current.control,
+ iterator.current.controllen);
+ (void)release_writestring(file, "\n");
+ if (iterator.current.control[iterator.current.controllen-1] != '\n')
+ (void)release_writestring(file, "\n");
+ }
+ r = package_closeiterator(&iterator);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ free(relfilename);
+ return r;
+ }
+ r = release_finishfile(release, file);
+ if (RET_WAS_ERROR(r)) {
+ free(relfilename);
+ return r;
+ }
+ } else {
+ if (verbose > 9)
+ printf(" keeping old '%s/%s'%s\n",
+ release_dirofdist(release), relfilename,
+ exportdescription(exportmode, buffer, 100));
+ status = "old";
+ }
+ if (!snapshot) {
+ int i;
+
+ for (i = 0 ; i < exportmode->hooks.count ; i++) {
+ const char *hook = exportmode->hooks.values[i];
+
+ r = callexporthook(hook, relfilename, status, release);
+ if (RET_WAS_ERROR(r)) {
+ free(relfilename);
+ return r;
+ }
+ }
+ }
+ free(relfilename);
+ return RET_OK;
+}
+
+void exportmode_done(struct exportmode *mode) {
+ assert (mode != NULL);
+ free(mode->filename);
+ strlist_done(&mode->hooks);
+ free(mode->release);
+}
diff --git a/exports.h b/exports.h
new file mode 100644
index 0000000..fb4062f
--- /dev/null
+++ b/exports.h
@@ -0,0 +1,26 @@
+#ifndef REPREPRO_EXPORTS_H
+#define REPREPRO_EXPORTS_H
+
+#ifndef REPREPRO_RELEASE_H
+#include "release.h"
+#endif
+
+struct exportmode {
+ /* "Packages", "Sources" or something like that */
+ char *filename;
+ /* create uncompressed, create .gz, <future things...> */
+ compressionset compressions;
+ /* Generate a Release file next to the Indexfile , if non-null*/
+ /*@null@*/
+ char *release;
+ /* programms to start after all are generated */
+ struct strlist hooks;
+};
+
+retvalue exportmode_init(/*@out@*/struct exportmode *, bool /*uncompressed*/, /*@null@*/const char * /*release*/, const char * /*indexfile*/);
+struct configiterator;
+retvalue exportmode_set(struct exportmode *, struct configiterator *);
+void exportmode_done(struct exportmode *);
+
+retvalue export_target(const char * /*relativedir*/, struct target *, const struct exportmode *, struct release *, bool /*onlyifmissing*/, bool /*snapshot*/);
+#endif
diff --git a/extractcontrol.c b/extractcontrol.c
new file mode 100644
index 0000000..a9df08a
--- /dev/null
+++ b/extractcontrol.c
@@ -0,0 +1,458 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <string.h>
+#include "error.h"
+#include "filecntl.h"
+#include "readtextfile.h"
+#include "debfile.h"
+#include "chunks.h"
+
+#ifdef HAVE_LIBARCHIVE
+#error Why did this file got compiled instead of debfile.c?
+#endif
+// **********************************************************************
+// * This is a very simple implementation calling ar and tar, which
+// * is only used with --without-libarchive or when no libarchive was
+// * found.
+// **********************************************************************
+
+static retvalue try_extractcontrol(char **control, const char *debfile, bool brokentar) {
+ int pipe_1[2];
+ int pipe_2[2];
+ int ret;
+ pid_t ar, tar, pid;
+ int status;
+ char *controlchunk;
+
+ retvalue result, r;
+
+ result = RET_OK;
+
+ ret = pipe(pipe_1);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e));
+ return RET_ERRNO(e);
+ }
+
+ ret = pipe(pipe_2);
+ if (ret < 0) {
+ int e = errno;
+ close(pipe_1[0]); close(pipe_1[1]);
+ fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e));
+ return RET_ERRNO(e);
+ }
+
+ ar = fork();
+ if (ar < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d forking: %s\n", e, strerror(e));
+ result = RET_ERRNO(e);
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ return result;
+ }
+
+ if (ar == 0) {
+ int e;
+ /* calling ar */
+ if (dup2(pipe_1[1], 1) < 0)
+ exit(255);
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ //TODO without explicit path
+ ret = execl("/usr/bin/ar",
+ "ar", "p", debfile, "control.tar.gz",
+ ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "ar call failed with error %d: %s\n",
+ e, strerror(e));
+ exit(254);
+ }
+
+ tar = fork();
+ if (tar < 0) {
+ int e = errno;
+ result = RET_ERRNO(e);
+ fprintf(stderr, "Error %d forking: %s\n", e, strerror(e));
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ tar = -1;
+ } else if (tar == 0) {
+ int e;
+ /* calling tar */
+ if (dup2(pipe_1[0], 0) < 0)
+ exit(255);
+ if (dup2(pipe_2[1], 1) < 0)
+ exit(255);
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ //TODO without explicit path
+ execl("/bin/tar", "tar", "-xOzf", "-",
+ brokentar?"control":"./control",
+ ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "tar call failed with error %d: %s\n",
+ e, strerror(e));
+ exit(254);
+
+ }
+
+ close(pipe_1[0]); close(pipe_1[1]);
+ markcloseonexec(pipe_2[0]); close(pipe_2[1]);
+
+ controlchunk = NULL;
+
+ /* read data: */
+ if (RET_IS_OK(result)) {
+ size_t len, controllen;
+ const char *afterchanges;
+
+ r = readtextfilefd(pipe_2[0],
+ brokentar?
+"output from ar p <debfile> control.tar.gz | tar -xOzf - control":
+"output from ar p <debfile> control.tar.gz | tar -xOzf - ./control",
+ &controlchunk, &controllen);
+ if (RET_IS_OK(r)) {
+ len = chunk_extract(controlchunk,
+ controlchunk, controllen,
+ false, &afterchanges);
+ if (len == 0)
+ r = RET_NOTHING;
+ if (*afterchanges != '\0') {
+ fprintf(stderr,
+"Unexpected empty line in control information within '%s'\n"
+"(obtained via 'ar p %s control.tar.gz | tar -XOzf - %scontrol')\n",
+ debfile, debfile,
+ brokentar?"":"./");
+ free(controlchunk);
+ controlchunk = NULL;
+ r = RET_ERROR;
+ }
+ }
+ if (r == RET_NOTHING) {
+ free(controlchunk);
+ controlchunk = NULL;
+ fprintf(stderr,
+"No control information found in .deb!\n");
+ /* only report error now,
+ * if we haven't try everything yet */
+ if (brokentar)
+ r = RET_ERROR_MISSING;
+ }
+ RET_UPDATE(result, r);
+
+ }
+
+ while (ar != -1 || tar != -1) {
+ pid=wait(&status);
+ if (pid < 0) {
+ if (errno != EINTR)
+ RET_UPDATE(result, RET_ERRNO(errno));
+ } else {
+ if (pid == ar) {
+ ar = -1;
+ if (!WIFEXITED(status)) {
+ fprintf(stderr,
+"Ar exited unnaturally!\n");
+ result = RET_ERROR;
+ } else if (WEXITSTATUS(status) != 0) {
+ fprintf(stderr,
+"Error from ar for '%s': %d\n", debfile, WEXITSTATUS(status));
+ result = RET_ERROR;
+ }
+ } else if (pid == tar) {
+ tar = -1;
+ if (!WIFEXITED(status)) {
+ fprintf(stderr,
+"Tar exited unnaturally!\n");
+ result = RET_ERROR;
+ } else if (!brokentar && WEXITSTATUS(status) == 2) {
+ if (RET_IS_OK(result))
+ result = RET_NOTHING;
+ } else if (WEXITSTATUS(status) != 0) {
+ fprintf(stderr,
+"Error from tar for control.tar.gz within '%s': %d\n",
+ debfile,
+ WEXITSTATUS(status));
+ result = RET_ERROR;
+ }
+ } else {
+ // WTH?
+ fprintf(stderr,
+"Who is %d, and why does this bother me?\n", (int)pid);
+ }
+ }
+
+ }
+ if (RET_IS_OK(result)) {
+ if (controlchunk == NULL)
+ /* we got not data but tar gave not error.. */
+ return RET_ERROR_MISSING;
+ else
+ *control = controlchunk;
+ } else
+ free(controlchunk);
+ return result;
+}
+
+retvalue extractcontrol(char **control, const char *debfile) {
+ retvalue r;
+
+ r = try_extractcontrol(control, debfile, false);
+ if (r != RET_NOTHING)
+ return r;
+ /* perhaps the control.tar.gz is packaged by hand wrongly,
+ * try again: */
+ r = try_extractcontrol(control, debfile, true);
+ if (RET_IS_OK(r)) {
+ fprintf(stderr,
+"WARNING: '%s' contains a broken/unusual control.tar.gz.\n"
+"reprepro was able to work around this but other tools or versions might not.\n",
+ debfile);
+ }
+ assert (r != RET_NOTHING);
+ return r;
+}
+
+retvalue getfilelist(/*@out@*/char **filelist, /*@out@*/size_t *size, const char *debfile) {
+ fprintf(stderr,
+"Extraction of file list without libarchive currently not implemented.\n");
+ return RET_ERROR;
+#if 0
+ int pipe_1[2];
+ int pipe_2[2];
+ int ret;
+ pid_t ar, tar, pid;
+ int status;
+ struct filelistcompressor c;
+ size_t last = 0;
+ retvalue result;
+
+#error this still needs to be reimplemented...
+ result = filelistcompressor_setup(&c);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = RET_OK;
+
+ ret = pipe(pipe_1);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e));
+ filelistcompressor_cancel(&c);
+ return RET_ERRNO(e);
+ }
+
+ ret = pipe(pipe_2);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e));
+ close(pipe_1[0]); close(pipe_1[1]);
+ filelistcompressor_cancel(&c);
+ return RET_ERRNO(e);
+ }
+
+ ar = fork();
+ if (ar < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d forking: %s\n", e, strerror(e));
+ result = RET_ERRNO(e);
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ filelistcompressor_cancel(&c);
+ return result;
+ }
+
+ if (ar == 0) {
+ int e;
+ /* calling ar */
+ if (dup2(pipe_1[1], 1) < 0)
+ exit(255);
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ //TODO without explicit path
+ ret = execl("/usr/bin/ar",
+ "ar", "p", debfile, "data.tar.gz",
+ ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "ar call failed with error %d: %s\n",
+ e, strerror(e));
+ exit(254);
+ }
+
+ tar = fork();
+ if (tar < 0) {
+ int e = errno;
+ result = RET_ERRNO(e);
+ fprintf(stderr, "Error %d forking: %s\n", e, strerror(e));
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ tar = -1;
+ } else if (tar == 0) {
+ int e;
+ /* calling tar */
+ if (dup2(pipe_1[0], 0) < 0)
+ exit(255);
+ if (dup2(pipe_2[1], 1) < 0)
+ exit(255);
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[0]); close(pipe_2[1]);
+ //TODO without explicit path
+ execl("/bin/tar", "tar", "-tzf", "-", ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "tar call failed with error %d: %s\n",
+ e, strerror(e));
+ exit(254);
+
+ }
+
+ close(pipe_1[0]); close(pipe_1[1]);
+ close(pipe_2[1]);
+
+ /* read data: */
+ if (RET_IS_OK(result)) do {
+ ssize_t bytes_read;
+ size_t ignore;
+
+ if (listsize <= len + 512) {
+ char *n;
+
+ listsize = len + 1024;
+ n = realloc(list, listsize);
+ if (FAILEDTOALLOC(n)) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ list = n;
+ }
+
+ ignore = 0;
+ bytes_read = read(pipe_2[0], list+len, listsize-len-1);
+ if (bytes_read < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d reading from pipe: %s\n",
+ e, strerror(e));
+ result = RET_ERRNO(e);
+ break;
+ } else if (bytes_read == 0)
+ break;
+ else while (bytes_read > 0) {
+ if (list[len] == '\0') {
+ fprintf(stderr,
+"Unexpected NUL character from tar while getting file list from %s!\n", debfile);
+ result = RET_ERROR;
+ break;
+ } else if (list[len] == '\n') {
+ if (len > last+ignore && list[len-1] != '/') {
+ list[len] = '\0';
+ len++;
+ bytes_read--;
+ memmove(list+last, list+last+ignore,
+ 1+len-last-ignore);
+ last = len-ignore;
+ } else {
+ len++;
+ bytes_read--;
+ ignore = len-last;
+ }
+ } else if (list[len] == '.' && len == last+ignore) {
+ len++; ignore++;
+ bytes_read--;
+ } else if (list[len] == '/' && len == last+ignore) {
+ len++; ignore++;
+ bytes_read--;
+ } else {
+ len++;
+ bytes_read--;
+ }
+ }
+ if (ignore > 0) {
+ if (len <= last+ignore)
+ len = last;
+ else {
+ memmove(list+last, list+last+ignore,
+ 1+len-last-ignore);
+ len -= ignore;
+ }
+ }
+ } while (true);
+ if (len != last) {
+ fprintf(stderr,
+"WARNING: unterminated output from tar pipe while extracting file list of %s\n", debfile);
+ list[len] = '\0';
+ fprintf(stderr, "The item '%s' might got lost.\n",
+ list+last);
+ result = RET_ERROR;
+ } else {
+ char *n = realloc(list, len+1);
+ if (FAILEDTOALLOC(n))
+ result = RET_ERROR_OOM;
+ else {
+ list = n;
+ list[len] = '\0';
+ }
+ }
+ close(pipe_2[0]);
+
+ while (ar != -1 || tar != -1) {
+ pid=wait(&status);
+ if (pid < 0) {
+ if (errno != EINTR)
+ RET_UPDATE(result, RET_ERRNO(errno));
+ } else {
+ if (pid == ar) {
+ ar = -1;
+ if (!WIFEXITED(status) ||
+ WEXITSTATUS(status) != 0) {
+ fprintf(stderr,
+"Error from ar for '%s': %d\n", debfile, WEXITSTATUS(status));
+ result = RET_ERROR;
+ }
+ } else if (pid == tar) {
+ tar = -1;
+ if (!WIFEXITED(status) ||
+ WEXITSTATUS(status) != 0) {
+ fprintf(stderr,
+"Error from tar for data.tar.gz within '%s': %d\n",
+ debfile,
+ WEXITSTATUS(status));
+ result = RET_ERROR;
+ }
+ } else {
+ // WTH?
+ fprintf(stderr,
+"Who is %d, and why does this bother me?\n", pid);
+ }
+ }
+ }
+ if (RET_IS_OK(result))
+ return filelistcompressor_finish(&c, filelist);
+ else
+ filelistcompressor_cancel(&c);
+ return result;
+#endif
+}
diff --git a/filecntl.c b/filecntl.c
new file mode 100644
index 0000000..6cb9c9e
--- /dev/null
+++ b/filecntl.c
@@ -0,0 +1,89 @@
+/* written 2007 by Bernhard R. Link
+ * This file is in the public domain.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <config.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+
+#include "filecntl.h"
+
+#ifndef HAVE_CLOSEFROM
+void closefrom(int lowfd) {
+ long maxopen;
+ int fd;
+
+# ifdef F_CLOSEM
+ if (fcntl(lowfd, F_CLOSEM, NULL) == 0)
+ return;
+# endif
+ maxopen = sysconf(_SC_OPEN_MAX);
+ if (maxopen > INT_MAX)
+ maxopen = INT_MAX;
+ if (maxopen < 0)
+ maxopen = 1024;
+ for (fd = lowfd ; fd <= maxopen ; fd++)
+ (void)close(fd);
+}
+#endif
+
+void markcloseonexec(int fd) {
+ long l;
+ l = fcntl(fd, F_GETFD, 0);
+ if (l >= 0) {
+ (void)fcntl(fd, F_SETFD, l|FD_CLOEXEC);
+ }
+}
+
+int deletefile(const char *fullfilename) {
+ int ret, e;
+
+ ret = unlink(fullfilename);
+ if (ret != 0) {
+ e = errno;
+ fprintf(stderr, "error %d unlinking %s: %s\n",
+ e, fullfilename, strerror(e));
+ return (e != 0)?e:EINVAL;
+ }
+ return 0;
+}
+
+bool isregularfile(const char *fullfilename) {
+ struct stat s;
+ int i;
+
+ assert(fullfilename != NULL);
+ i = stat(fullfilename, &s);
+ return i == 0 && S_ISREG(s.st_mode);
+}
+
+bool isdirectory(const char *fullfilename) {
+ struct stat s;
+ int i;
+
+ assert(fullfilename != NULL);
+ i = stat(fullfilename, &s);
+ return i == 0 && S_ISDIR(s.st_mode);
+}
+
+bool isanyfile(const char *fullfilename) {
+ struct stat s;
+ int i;
+
+ assert(fullfilename != NULL);
+ i = lstat(fullfilename, &s);
+ return i == 0;
+}
diff --git a/filecntl.h b/filecntl.h
new file mode 100644
index 0000000..5d8d6cc
--- /dev/null
+++ b/filecntl.h
@@ -0,0 +1,13 @@
+#ifndef REPREPRO_FILECNTL_H
+#define REPREPRO_FILECNTL_H
+
+#ifndef HAVE_CLOSEFROM
+void closefrom(int);
+#endif
+void markcloseonexec(int);
+int deletefile(const char *);
+bool isanyfile(const char *);
+bool isregularfile(const char *);
+bool isdirectory(const char *fullfilename);
+
+#endif
diff --git a/filelist.c b/filelist.c
new file mode 100644
index 0000000..1cb7258
--- /dev/null
+++ b/filelist.c
@@ -0,0 +1,735 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006,2007,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "error.h"
+#include "database_p.h"
+#include "files.h"
+#include "chunks.h"
+#include "package.h"
+#include "debfile.h"
+#include "filelist.h"
+
+struct filelist_package {
+ struct filelist_package *next;
+ char name[];
+};
+
+struct dirlist;
+struct filelist {
+ struct filelist *nextl;
+ struct filelist *nextr;
+ int balance;
+ char *name;
+ size_t count;
+ const char *packages[];
+};
+struct dirlist {
+ struct dirlist *nextl;
+ struct dirlist *nextr;
+ int balance;
+ /*@dependant@*/ struct dirlist *parent;
+ struct dirlist *subdirs;
+ struct filelist *files;
+ /*@dependant@*/struct filelist *lastfile;
+ size_t len;
+ char name[];
+};
+
+struct filelist_list {
+ struct dirlist *root;
+ struct filelist_package *packages;
+};
+
+retvalue filelist_init(struct filelist_list **list) {
+ struct filelist_list *filelist;
+
+ filelist = zNEW(struct filelist_list);
+ if (FAILEDTOALLOC(filelist))
+ return RET_ERROR_OOM;
+ filelist->root = zNEW(struct dirlist);
+ if (FAILEDTOALLOC(filelist->root)) {
+ free(filelist);
+ return RET_ERROR_OOM;
+ }
+ *list = filelist;
+ return RET_OK;
+};
+static void files_free(/*@only@*/struct filelist *list) {
+ if (list == NULL)
+ return;
+ files_free(list->nextl);
+ files_free(list->nextr);
+ free(list->name);
+ free(list);
+}
+static void dirlist_free(/*@only@*/struct dirlist *list) {
+ if (list == NULL)
+ return;
+ files_free(list->files);
+ dirlist_free(list->subdirs);
+ dirlist_free(list->nextl);
+ dirlist_free(list->nextr);
+ free(list);
+}
+void filelist_free(struct filelist_list *list) {
+
+ if (list == NULL)
+ return;
+ dirlist_free(list->root);
+ while (list->packages != NULL) {
+ struct filelist_package *package = list->packages;
+ list->packages = package->next;
+ free(package);
+ }
+ free(list);
+};
+
+static retvalue filelist_newpackage(struct filelist_list *filelist, const char *name, const char *section, const struct filelist_package **pkg) {
+ struct filelist_package *p;
+ size_t name_len = strlen(name);
+ size_t section_len = strlen(section);
+
+ p = malloc(sizeof(struct filelist_package)+name_len+section_len+2);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ p->next = filelist->packages;
+ memcpy(p->name, section, section_len);
+ p->name[section_len] = '/';
+ memcpy(p->name+section_len+1, name, name_len+1);
+ filelist->packages = p;
+ *pkg = p;
+ return RET_OK;
+};
+
+static bool findfile(struct dirlist *parent, const char *packagename, const char *basefilename, size_t namelen) {
+ struct filelist *file, *n, *last;
+ struct filelist **stack[128];
+ int stackpointer = 0;
+
+ stack[stackpointer++] = &parent->files;
+ file = parent->files;
+
+ while (file != NULL) {
+ int c = strncmp(basefilename, file->name, namelen);
+ if (c == 0 && file->name[namelen] == '\0') {
+ n = realloc(file, sizeof(struct filelist)+
+ (file->count+1)*sizeof(const char*));
+ if (n == NULL)
+ return false;
+ n->packages[n->count++] = packagename;
+ *(stack[--stackpointer]) = n;
+ return true;
+ } else if (c > 0) {
+ stack[stackpointer++] = &file->nextr;
+ file = file->nextr;
+ } else {
+ stack[stackpointer++] = &file->nextl;
+ file = file->nextl;
+ }
+ }
+ n = malloc(sizeof(struct filelist)+sizeof(const char*));
+ if (FAILEDTOALLOC(n))
+ return false;
+ n->name = strndup(basefilename, namelen);
+ n->nextl = NULL;
+ n->nextr = NULL;
+ n->balance = 0;
+ n->count = 1;
+ n->packages[0] = packagename;
+ if (FAILEDTOALLOC(n->name)) {
+ free(n);
+ return false;
+ }
+ *(stack[--stackpointer]) = n;
+ while (stackpointer > 0) {
+ file = *(stack[--stackpointer]);
+ if (file->nextl == n) {
+ file->balance--;
+ if (file->balance > -1)
+ break;
+ if (file->balance == -1) {
+ n = file;
+ continue;
+ }
+ if (n->balance == -1) {
+ file->nextl = n->nextr;
+ file->balance = 0;
+ n->nextr = file;
+ n->balance = 0;
+ *(stack[stackpointer]) = n;
+ break;
+ } else {
+ last = n->nextr;
+ file->nextl = last->nextr;
+ *(stack[stackpointer]) = last;
+ last->nextr = file;
+ n->nextr = last->nextl;
+ last->nextl = n;
+ if (last->balance == 0) {
+ file->balance = 0;
+ n->balance = 0;
+ } else if (last->balance < 0) {
+ file->balance = 1;
+ n->balance = 0;
+ } else {
+ file->balance = 0;
+ n->balance = -1;
+ }
+ last->balance = 0;
+ break;
+ }
+ } else {
+ file->balance++;
+ if (file->balance < 1)
+ break;
+ if (file->balance == 1) {
+ n = file;
+ continue;
+ }
+ if (n->balance == 1) {
+ file->nextr = n->nextl;
+ file->balance = 0;
+ n->nextl = file;
+ n->balance = 0;
+ *(stack[stackpointer]) = n;
+ break;
+ } else {
+ last = n->nextl;
+ file->nextr = last->nextl;
+ *(stack[stackpointer]) = last;
+ last->nextl = file;
+ n->nextl = last->nextr;
+ last->nextr = n;
+ if (last->balance == 0) {
+ file->balance = 0;
+ n->balance = 0;
+ } else if (last->balance > 0) {
+ file->balance = -1;
+ n->balance = 0;
+ } else {
+ file->balance = 0;
+ n->balance = 1;
+ }
+ last->balance = 0;
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+typedef const unsigned char cuchar;
+
+static struct dirlist *finddir(struct dirlist *dir, cuchar *name, size_t namelen) {
+ struct dirlist *d, *this, *parent, *h;
+ struct dirlist **stack[128];
+ int stackpointer = 0;
+
+ stack[stackpointer++] = &dir->subdirs;
+ d = dir->subdirs;
+
+ while (d != NULL) {
+ int c;
+
+ if (namelen < d->len) {
+ c = memcmp(name, d->name, namelen);
+ if (c <= 0) {
+ stack[stackpointer++] = &d->nextl;
+ d = d->nextl;
+ } else {
+ stack[stackpointer++] = &d->nextr;
+ d = d->nextr;
+ }
+ } else {
+ c = memcmp(name, d->name, d->len);
+ if (c == 0 && d->len == namelen) {
+ return d;
+ } else if (c >= 0) {
+ stack[stackpointer++] = &d->nextr;
+ d = d->nextr;
+ } else {
+ stack[stackpointer++] = &d->nextl;
+ d = d->nextl;
+ }
+ }
+ }
+ /* not found, create it and rebalance */
+ d = malloc(sizeof(struct dirlist) + namelen);
+ if (FAILEDTOALLOC(d))
+ return d;
+ d->subdirs = NULL;
+ d->nextl = NULL;
+ d->nextr = NULL;
+ d->balance = 0;
+ d->parent = dir;
+ d->files = NULL;
+ d->len = namelen;
+ memcpy(d->name, name, namelen);
+ *(stack[--stackpointer]) = d;
+ this = d;
+ while (stackpointer > 0) {
+ parent = *(stack[--stackpointer]);
+ if (parent->nextl == this) {
+ parent->balance--;
+ if (parent->balance > -1)
+ break;
+ if (parent->balance == -1) {
+ this = parent;
+ continue;
+ }
+ if (this->balance == -1) {
+ parent->nextl = this->nextr;
+ parent->balance = 0;
+ this->nextr = parent;
+ this->balance = 0;
+ *(stack[stackpointer]) = this;
+ break;
+ } else {
+ h = this->nextr;
+ parent->nextl = h->nextr;
+ *(stack[stackpointer]) = h;
+ h->nextr = parent;
+ this->nextr = h->nextl;
+ h->nextl = this;
+ if (h->balance == 0) {
+ parent->balance = 0;
+ this->balance = 0;
+ } else if (h->balance < 0) {
+ parent->balance = 1;
+ this->balance = 0;
+ } else {
+ parent->balance = 0;
+ this->balance = -1;
+ }
+ h->balance = 0;
+ break;
+ }
+ } else {
+ parent->balance++;
+ if (parent->balance < 1)
+ break;
+ if (parent->balance == 1) {
+ this = parent;
+ continue;
+ }
+ if (this->balance == 1) {
+ parent->nextr = this->nextl;
+ parent->balance = 0;
+ this->nextl = parent;
+ this->balance = 0;
+ *(stack[stackpointer]) = this;
+ break;
+ } else {
+ h = this->nextl;
+ parent->nextr = h->nextl;
+ *(stack[stackpointer]) = h;
+ h->nextl = parent;
+ this->nextl = h->nextr;
+ h->nextr = this;
+ if (h->balance == 0) {
+ parent->balance = 0;
+ this->balance = 0;
+ } else if (h->balance > 0) {
+ parent->balance = -1;
+ this->balance = 0;
+ } else {
+ parent->balance = 0;
+ this->balance = 1;
+ }
+ h->balance = 0;
+ break;
+ }
+ }
+ }
+ return d;
+}
+
+static retvalue filelist_addfiles(struct filelist_list *list, const struct filelist_package *package, const char *filekey, const char *datastart, size_t size) {
+ struct dirlist *curdir = list->root;
+ const unsigned char *data = (const unsigned char *)datastart;
+
+ while (*data != '\0') {
+ int d;
+
+ if ((size_t)(data - (const unsigned char *)datastart) >= size-1) {
+ /* This might not catch everything, but we are only
+ * accessing it readonly */
+ fprintf(stderr, "Corrupted file list data for %s\n",
+ filekey);
+ return RET_ERROR;
+ }
+ d = *(data++);
+ if (d == 1) {
+ size_t len = 0;
+ while (*data == 255) {
+ data++;
+ len += 255;
+ }
+ if (*data == 0) {
+ fprintf(stderr,
+ "Corrupted file list data for %s\n",
+ filekey);
+ return RET_ERROR;
+ }
+ len += *(data++);
+ if (!findfile(curdir, package->name, (const char*)data, len))
+ return RET_ERROR_OOM;
+ data += len;
+ } else if (d == 2) {
+ size_t len = 0;
+ while (*data == 255) {
+ data++;
+ len += 255;
+ }
+ if (*data == 0) {
+ fprintf(stderr,
+ "Corrupted file list data for %s\n",
+ filekey);
+ return RET_ERROR;
+ }
+ len += *(data++);
+ curdir = finddir(curdir, data, len);
+ if (FAILEDTOALLOC(curdir))
+ return RET_ERROR_OOM;
+ data += len;
+ } else {
+ d -= 2;
+ while (d-- > 0 && curdir->parent != NULL)
+ curdir = curdir->parent;
+ }
+ }
+ if ((size_t)(data - (const unsigned char *)datastart) != size-1) {
+ fprintf(stderr,
+"Corrupted file list data for %s (format suggest %llu, is %llu)\n",
+ filekey,
+ (unsigned long long)(data -
+ (const unsigned char *)datastart),
+ (unsigned long long)(size-1));
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+retvalue filelist_addpackage(struct filelist_list *list, struct package *pkg) {
+ const struct filelist_package *package;
+ char *debfilename, *contents = NULL;
+ retvalue r;
+ const char *c;
+ size_t len;
+ char *section, *filekey;
+
+ r = chunk_getvalue(pkg->control, "Section", &section);
+ /* Ignoring packages without section, as they should not exist anyway */
+ if (!RET_IS_OK(r))
+ return r;
+ r = chunk_getvalue(pkg->control, "Filename", &filekey);
+ /* dito with filekey */
+ if (!RET_IS_OK(r)) {
+ free(section);
+ return r;
+ }
+
+ r = filelist_newpackage(list, pkg->name, section, &package);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ free(filekey);
+ free(section);
+ return r;
+ }
+
+ r = table_gettemprecord(rdb_contents, filekey, &c, &len);
+ if (r == RET_NOTHING) {
+ if (verbose > 3)
+ printf("Reading filelist for %s\n", filekey);
+ debfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(debfilename)) {
+ free(filekey);
+ free(section);
+ return RET_ERROR_OOM;
+ }
+ r = getfilelist(&contents, &len, debfilename);
+ len--;
+ free(debfilename);
+ c = contents;
+ }
+ if (RET_IS_OK(r)) {
+ r = filelist_addfiles(list, package, filekey, c, len + 1);
+ if (contents != NULL)
+ r = table_adduniqsizedrecord(rdb_contents, filekey,
+ contents, len + 1, true, false);
+ }
+ free(contents);
+ free(filekey);
+ free(section);
+ return r;
+}
+
+retvalue fakefilelist(const char *filekey) {
+ return table_adduniqsizedrecord(rdb_contents, filekey,
+ "", 1, true, false);
+}
+
+static const char separator_chars[] = "\t ";
+
+static void filelist_writefiles(char *dir, size_t len,
+ struct filelist *files, struct filetorelease *file) {
+ unsigned int i;
+ bool first;
+
+ if (files == NULL)
+ return;
+ filelist_writefiles(dir, len, files->nextl, file);
+ (void)release_writedata(file, dir, len);
+ (void)release_writestring(file, files->name);
+ (void)release_writedata(file, separator_chars,
+ sizeof(separator_chars) - 1);
+ first = true;
+ for (i = 0 ; i < files->count ; i ++) {
+ if (!first)
+ (void)release_writestring(file, ",");
+ first = false;
+ (void)release_writestring(file, files->packages[i]);
+ }
+ (void)release_writestring(file, "\n");
+ filelist_writefiles(dir, len, files->nextr, file);
+}
+
+static retvalue filelist_writedirs(char **buffer_p, size_t *size_p, size_t ofs, struct dirlist *dir, struct filetorelease *file) {
+
+ if (dir->nextl != NULL) {
+ retvalue r;
+ r = filelist_writedirs(buffer_p, size_p, ofs, dir->nextl, file);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ { size_t len = dir->len;
+ register retvalue r;
+
+ if (ofs+len+2 >= *size_p) {
+ char *n;
+
+ *size_p += 1024*(1+(len/1024));
+ n = realloc(*buffer_p, *size_p);
+ if (FAILEDTOALLOC(n)) {
+ free(*buffer_p);
+ *buffer_p = NULL;
+ return RET_ERROR_OOM;
+ }
+ *buffer_p = n;
+ }
+ memcpy((*buffer_p) + ofs, dir->name, len);
+ (*buffer_p)[ofs + len] = '/';
+ // TODO: output files and directories sorted together instead
+ filelist_writefiles(*buffer_p, ofs+len+1, dir->files, file);
+ if (dir->subdirs == NULL)
+ r = RET_OK;
+ else
+ r = filelist_writedirs(buffer_p, size_p, ofs+len+1,
+ dir->subdirs, file);
+ if (dir->nextr == NULL)
+ return r;
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return filelist_writedirs(buffer_p, size_p, ofs, dir->nextr, file);
+}
+
+retvalue filelist_write(struct filelist_list *list, struct filetorelease *file) {
+ size_t size = 1024;
+ char *buffer = malloc(size);
+ retvalue r;
+
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+
+ buffer[0] = '\0';
+ filelist_writefiles(buffer, 0, list->root->files, file);
+ if (list->root->subdirs != NULL)
+ r = filelist_writedirs(&buffer, &size, 0,
+ list->root->subdirs, file);
+ else
+ r = RET_OK;
+ free(buffer);
+ return r;
+}
+
+/* helpers for filelist generators to get the preprocessed form */
+
+retvalue filelistcompressor_setup(/*@out@*/struct filelistcompressor *c) {
+ c->size = 2000; c->len = 0;
+ c->filelist = malloc(c->size);
+ if (FAILEDTOALLOC(c->filelist))
+ return RET_ERROR_OOM;
+ c->dirdepth = 0;
+ return RET_OK;
+}
+
+static inline bool filelistcompressor_space(struct filelistcompressor *c, size_t len) {
+ if (c->len + len + 2 >= c->size) {
+ char *n;
+
+ if (c->size > 1024*1024*1024) {
+ fprintf(stderr, "Ridiculously long file list!\n");
+ return false;
+ }
+ c->size = c->len + len + 2048;
+ n = realloc(c->filelist, c->size);
+ if (FAILEDTOALLOC(n))
+ return false;
+ c->filelist = n;
+ }
+ return true;
+}
+
+retvalue filelistcompressor_add(struct filelistcompressor *c, const char *name, size_t name_len) {
+ unsigned int depth;
+ const char *separator;
+
+ /* check if it is already in the current dir or a subdir of that: */
+ if (name_len > 0 && *name == '.') {
+ name++; name_len--;
+ }
+ while (name_len > 0 && *name == '/') {
+ name++; name_len--;
+ }
+ for (depth = 0; depth < c->dirdepth ; depth++) {
+ const unsigned char *u =(unsigned char *)c->filelist
+ + c->offsets[depth];
+ size_t dir_len = 0;
+ while (*u == 255) {
+ dir_len += 255;
+ u++;
+ }
+ dir_len += *(u++);
+ if (dir_len >= name_len)
+ break;
+ if (memcmp(u, name, dir_len) != 0 || name[dir_len] != '/')
+ break;
+ name += dir_len + 1;
+ name_len -= dir_len + 1;
+ }
+ if (depth < c->dirdepth) {
+ if (!filelistcompressor_space(c, 1))
+ return RET_ERROR_OOM;
+ c->filelist[c->len++] = (unsigned char)2 +
+ c->dirdepth - depth;
+ c->dirdepth = depth;
+ }
+ while ((separator = memchr(name, '/', name_len)) != NULL) {
+ size_t dirlen = separator - name;
+ /* ignore files within directories with more than 255 chars */
+ if (dirlen >= 255)
+ return RET_NOTHING;
+ /* ignore too deep paths */
+ if (c->dirdepth > 252)
+ return RET_NOTHING;
+ /* add directory */
+ if (!filelistcompressor_space(c, 2 + dirlen))
+ return RET_ERROR_OOM;
+ c->filelist[c->len++] = 2;
+ c->offsets[c->dirdepth++] = c->len;
+ c->filelist[c->len++] = dirlen;
+ memcpy(c->filelist + c->len, name, dirlen);
+ c->len += dirlen;
+ name += dirlen+1;
+ name_len -= dirlen+1;
+ while (name_len > 0 && *name == '/') {
+ name++; name_len--;
+ }
+ }
+ if (name_len >= 255)
+ return RET_NOTHING;
+ /* all directories created, now only the file is left */
+ if (!filelistcompressor_space(c, 2 + name_len))
+ return RET_ERROR_OOM;
+ c->filelist[c->len++] = 1;
+ c->filelist[c->len++] = name_len;
+ memcpy(c->filelist + c->len, name, name_len);
+ c->len += name_len;
+ return RET_OK;
+}
+
+retvalue filelistcompressor_finish(struct filelistcompressor *c, /*@out@*/char **list, /*@out@*/size_t *size) {
+ char *l;
+
+ l = realloc(c->filelist, c->len+1);
+ if (FAILEDTOALLOC(l)) {
+ free(c->filelist);
+ return RET_ERROR_OOM;
+ }
+ l[c->len] = '\0';
+ *list = l;
+ *size = c->len+1;
+ return RET_OK;
+}
+
+void filelistcompressor_cancel(struct filelistcompressor *c) {
+ free(c->filelist);
+}
+
+retvalue filelists_translate(struct table *oldtable, struct table *newtable) {
+ retvalue r;
+ struct cursor *cursor;
+ const char *filekey, *olddata;
+ size_t olddata_len, newdata_size;
+ char *newdata;
+
+ r = table_newglobalcursor(oldtable, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+ while (cursor_nexttempdata(oldtable, cursor, &filekey,
+ &olddata, &olddata_len)) {
+ const char *p;
+ size_t l;
+ struct filelistcompressor c;
+
+ r = filelistcompressor_setup(&c);
+ if (RET_WAS_ERROR(r))
+ break;
+ for (p = olddata ; (l = strlen(p)) != 0 ; p += l + 1) {
+ r = filelistcompressor_add(&c, p, l);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ if (RET_WAS_ERROR(r)) {
+ filelistcompressor_cancel(&c);
+ break;
+ }
+ r = filelistcompressor_finish(&c, &newdata, &newdata_size);
+ if (!RET_IS_OK(r))
+ break;
+ r = table_adduniqsizedrecord(newtable, filekey,
+ newdata, newdata_size, false, false);
+ free(newdata);
+ if (RET_WAS_ERROR(r))
+ break;
+
+ }
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor_close(oldtable, cursor);
+ return r;
+ }
+ r = cursor_close(oldtable, cursor);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
diff --git a/filelist.h b/filelist.h
new file mode 100644
index 0000000..b1ab52a
--- /dev/null
+++ b/filelist.h
@@ -0,0 +1,33 @@
+#ifndef REPREPRO_FILELIST_H
+#define REPREPRO_FILELIST_H
+
+#ifndef REPREPRO_RELEASE_H
+#include "release.h"
+#endif
+
+struct filelist_list;
+struct package;
+
+retvalue filelist_init(struct filelist_list **list);
+
+retvalue filelist_addpackage(struct filelist_list *, struct package *);
+
+retvalue filelist_write(struct filelist_list *list, struct filetorelease *file);
+
+void filelist_free(/*@only@*/struct filelist_list *);
+
+retvalue fakefilelist(const char *filekey);
+retvalue filelists_translate(struct table *, struct table *);
+
+/* for use in routines reading the data: */
+struct filelistcompressor {
+ unsigned int offsets[256];
+ size_t size, len;
+ unsigned int dirdepth;
+ char *filelist;
+};
+retvalue filelistcompressor_setup(/*@out@*/struct filelistcompressor *);
+retvalue filelistcompressor_add(struct filelistcompressor *, const char *, size_t);
+retvalue filelistcompressor_finish(struct filelistcompressor *, /*@out@*/char **, /*@out@*/size_t *);
+void filelistcompressor_cancel(struct filelistcompressor *);
+#endif
diff --git a/files.c b/files.c
new file mode 100644
index 0000000..0f3b618
--- /dev/null
+++ b/files.c
@@ -0,0 +1,817 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include "error.h"
+#include "strlist.h"
+#include "filecntl.h"
+#include "names.h"
+#include "checksums.h"
+#include "dirs.h"
+#include "names.h"
+#include "files.h"
+#include "ignore.h"
+#include "filelist.h"
+#include "debfile.h"
+#include "pool.h"
+#include "database_p.h"
+
+static retvalue files_get_checksums(const char *filekey, /*@out@*/struct checksums **checksums_p) {
+ const char *checksums;
+ size_t checksumslen;
+ retvalue r;
+
+ r = table_gettemprecord(rdb_checksums, filekey,
+ &checksums, &checksumslen);
+ if (!RET_IS_OK(r))
+ return r;
+ return checksums_setall(checksums_p, checksums, checksumslen);
+}
+
+retvalue files_add_checksums(const char *filekey, const struct checksums *checksums) {
+ retvalue r;
+ const char *combined;
+ size_t combinedlen;
+
+ assert (rdb_checksums != NULL);
+ r = checksums_getcombined(checksums, &combined, &combinedlen);
+ if (!RET_IS_OK(r))
+ return r;
+ r = table_adduniqsizedrecord(rdb_checksums, filekey,
+ combined, combinedlen + 1, true, false);
+ if (!RET_IS_OK(r))
+ return r;
+ return pool_markadded(filekey);
+}
+
+static retvalue files_replace_checksums(const char *filekey, const struct checksums *checksums) {
+ retvalue r;
+ const char *combined;
+ size_t combinedlen;
+
+ assert (rdb_checksums != NULL);
+ r = checksums_getcombined(checksums, &combined, &combinedlen);
+ if (!RET_IS_OK(r))
+ return r;
+ return table_adduniqsizedrecord(rdb_checksums, filekey,
+ combined, combinedlen + 1, true, false);
+}
+
+/* remove file's md5sum from database */
+retvalue files_removesilent(const char *filekey) {
+ retvalue r;
+
+ if (rdb_contents != NULL)
+ (void)table_deleterecord(rdb_contents, filekey, true);
+ r = table_deleterecord(rdb_checksums, filekey, true);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Unable to forget unknown filekey '%s'.\n",
+ filekey);
+ return RET_ERROR_MISSING;
+ }
+ return r;
+}
+
+retvalue files_remove(const char *filekey) {
+ retvalue r;
+
+ r = files_removesilent(filekey);
+ if (RET_IS_OK(r))
+ return pool_markdeleted(filekey);
+ return r;
+}
+
+/* hardlink file with known checksums and add it to database */
+retvalue files_hardlinkandadd(const char *tempfile, const char *filekey, const struct checksums *checksums) {
+ retvalue r;
+
+ /* an additional check to make sure nothing tricks us into
+ * overwriting it by another file */
+ r = files_canadd(filekey, checksums);
+ if (!RET_IS_OK(r))
+ return r;
+ r = checksums_hardlink(global.outdir, filekey, tempfile, checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ return files_add_checksums(filekey, checksums);
+}
+
+/* check if file is already there (RET_NOTHING) or could be added (RET_OK)
+ * or RET_ERROR_WRONG_MD5SUM if filekey already has different md5sum */
+retvalue files_canadd(const char *filekey, const struct checksums *checksums) {
+ retvalue r;
+ struct checksums *indatabase;
+ bool improves;
+
+ r = files_get_checksums(filekey, &indatabase);
+ if (r == RET_NOTHING)
+ return RET_OK;
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (!checksums_check(indatabase, checksums, &improves)) {
+ fprintf(stderr,
+"File \"%s\" is already registered with different checksums!\n",
+ filekey);
+ checksums_printdifferences(stderr, indatabase, checksums);
+ checksums_free(indatabase);
+ return RET_ERROR_WRONG_MD5;
+
+ }
+ // TODO: sometimes the caller might want to have additional
+ // checksums from the database already, think about ways to
+ // make them available...
+ checksums_free(indatabase);
+ return RET_NOTHING;
+}
+
+
+/* check for file in the database and if not found there, if it can be detected */
+retvalue files_expect(const char *filekey, const struct checksums *checksums, bool warnifadded) {
+ retvalue r;
+ char *filename;
+ struct checksums *improvedchecksums = NULL;
+
+ r = files_canadd(filekey, checksums);
+ if (r == RET_NOTHING)
+ return RET_OK;
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* ready to add means missing, so have to look for the file itself: */
+ filename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+
+ /* first check if a possible manually put (or left over from previous
+ * downloads attepts) file is there and is correct */
+ r = checksums_test(filename, checksums, &improvedchecksums);
+ if (r == RET_ERROR_WRONG_MD5) {
+ fprintf(stderr,
+"Deleting unexpected file '%s'!\n"
+"(not in database and wrong in pool)\n ",
+ filename);
+ if (unlink(filename) == 0)
+ r = RET_NOTHING;
+ else {
+ int e = errno;
+ fprintf(stderr,
+"Error %d deleting '%s': %s!\n", e, filename, strerror(e));
+ }
+ }
+ free(filename);
+ if (!RET_IS_OK(r))
+ return r;
+
+ if (warnifadded)
+ fprintf(stderr,
+"Warning: readded existing file '%s' mysteriously missing from the checksum database.\n",
+ filekey);
+
+ // TODO: some callers might want the updated checksum when
+ // improves is true, how to get them there?
+
+ /* add found file to database */
+ if (improvedchecksums != NULL) {
+ r = files_add_checksums(filekey, improvedchecksums);
+ checksums_free(improvedchecksums);
+ } else
+ r = files_add_checksums(filekey, checksums);
+ assert (r != RET_NOTHING);
+ return r;
+}
+
+/* check for several files in the database and in the pool if missing */
+retvalue files_expectfiles(const struct strlist *filekeys, struct checksums *checksumsarray[]) {
+ int i;
+ retvalue r;
+
+ for (i = 0 ; i < filekeys->count ; i++) {
+ const char *filekey = filekeys->values[i];
+ const struct checksums *checksums = checksumsarray[i];
+
+ r = files_expect(filekey, checksums, verbose >= 0);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ /* File missing */
+ fprintf(stderr, "Missing file %s\n", filekey);
+ return RET_ERROR_MISSING;
+ }
+ }
+ return RET_OK;
+}
+
+static inline retvalue checkorimprove(const char *filekey, struct checksums **checksums_p) {
+ const struct checksums *checksums = *checksums_p;
+ struct checksums *indatabase;
+ bool improves;
+ retvalue r;
+
+ r = files_get_checksums(filekey, &indatabase);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing file %s\n", filekey);
+ return RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (!checksums_check(checksums, indatabase, &improves)) {
+ fprintf(stderr,
+"File \"%s\" is already registered with different checksums!\n",
+ filekey);
+ checksums_printdifferences(stderr, indatabase, checksums);
+ r = RET_ERROR_WRONG_MD5;
+ } else if (improves) {
+ r = checksums_combine(checksums_p, indatabase, NULL);
+ } else
+ r = RET_NOTHING;
+ checksums_free(indatabase);
+ return r;
+}
+
+
+/* check for several files in the database and update information,
+ * return RET_NOTHING if everything is OK and nothing needs improving */
+retvalue files_checkorimprove(const struct strlist *filekeys, struct checksums *checksumsarray[]) {
+ int i;
+ retvalue result, r;
+
+ result = RET_NOTHING;
+ for (i = 0 ; i < filekeys->count ; i++) {
+ r = checkorimprove(filekeys->values[i],
+ &checksumsarray[i]);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r))
+ result = RET_OK;
+ }
+ return result;
+}
+
+/* dump out all information */
+retvalue files_printmd5sums(void) {
+ retvalue result, r;
+ struct cursor *cursor;
+ const char *filekey, *checksum;
+
+ r = table_newglobalcursor(rdb_checksums, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (cursor_nexttempdata(rdb_checksums, cursor, &filekey, &checksum, NULL)) {
+ result = RET_OK;
+ (void)fputs(filekey, stdout);
+ (void)putchar(' ');
+ while (*checksum == ':') {
+ while (*checksum != ' ' && *checksum != '\0')
+ checksum++;
+ if (*checksum == ' ')
+ checksum++;
+ }
+ (void)fputs(checksum, stdout);
+ (void)putchar('\n');
+ }
+ r = cursor_close(rdb_checksums, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue files_printchecksums(void) {
+ retvalue result, r;
+ struct cursor *cursor;
+ const char *filekey, *checksum;
+
+ r = table_newglobalcursor(rdb_checksums, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (cursor_nexttempdata(rdb_checksums, cursor, &filekey, &checksum, NULL)) {
+ result = RET_OK;
+ (void)fputs(filekey, stdout);
+ (void)putchar(' ');
+ (void)fputs(checksum, stdout);
+ (void)putchar('\n');
+ if (interrupted()) {
+ result = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ }
+ r = cursor_close(rdb_checksums, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+/* callback for each registered file */
+retvalue files_foreach(per_file_action action, void *privdata) {
+ retvalue result, r;
+ struct cursor *cursor;
+ const char *filekey, *checksum;
+
+ r = table_newglobalcursor(rdb_checksums, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (cursor_nexttempdata(rdb_checksums, cursor, &filekey, &checksum, NULL)) {
+ if (interrupted()) {
+ RET_UPDATE(result, RET_ERROR_INTERRUPTED);
+ break;
+ }
+ r = action(privdata, filekey);
+ RET_UPDATE(result, r);
+ }
+ r = cursor_close(rdb_checksums, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+static retvalue checkpoolfile(const char *fullfilename, const struct checksums *expected, bool *improveable) {
+ struct checksums *actual;
+ retvalue r;
+ bool improves;
+
+ r = checksums_read(fullfilename, &actual);
+ if (RET_IS_OK(r)) {
+ if (!checksums_check(expected, actual, &improves)) {
+ fprintf(stderr, "WRONG CHECKSUMS of '%s':\n",
+ fullfilename);
+ checksums_printdifferences(stderr, expected, actual);
+ r = RET_ERROR_WRONG_MD5;
+ } else if (improves)
+ *improveable = true;
+ checksums_free(actual);
+ }
+ return r;
+}
+
+retvalue files_checkpool(bool fast) {
+ retvalue result, r;
+ struct cursor *cursor;
+ const char *filekey, *combined;
+ size_t combinedlen;
+ struct checksums *expected;
+ char *fullfilename;
+ bool improveable = false;
+
+ result = RET_NOTHING;
+ r = table_newglobalcursor(rdb_checksums, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+ while (cursor_nexttempdata(rdb_checksums, cursor,
+ &filekey, &combined, &combinedlen)) {
+ r = checksums_setall(&expected, combined, combinedlen);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ continue;
+ }
+ fullfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(fullfilename)) {
+ result = RET_ERROR_OOM;
+ checksums_free(expected);
+ break;
+ }
+ if (fast)
+ r = checksums_cheaptest(fullfilename, expected, true);
+ else
+ r = checkpoolfile(fullfilename, expected, &improveable);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing file '%s'!\n", fullfilename);
+ r = RET_ERROR_MISSING;
+ }
+ free(fullfilename);
+ checksums_free(expected);
+ RET_UPDATE(result, r);
+ }
+ r = cursor_close(rdb_checksums, cursor);
+ RET_ENDUPDATE(result, r);
+ if (improveable && verbose >= 0)
+ printf(
+"There were files with only some of the checksums this version of reprepro\n"
+"can compute recorded. To add those run reprepro collectnewchecksums.\n");
+ return result;
+}
+
+retvalue files_collectnewchecksums(void) {
+ retvalue result, r;
+ struct cursor *cursor;
+ const char *filekey, *all;
+ size_t alllen;
+ struct checksums *expected;
+ char *fullfilename;
+
+ result = RET_NOTHING;
+ r = table_newglobalcursor(rdb_checksums, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+ while (cursor_nexttempdata(rdb_checksums, cursor,
+ &filekey, &all, &alllen)) {
+ r = checksums_setall(&expected, all, alllen);
+ if (!RET_IS_OK(r)) {
+ RET_UPDATE(result, r);
+ continue;
+ }
+ if (checksums_iscomplete(expected)) {
+ checksums_free(expected);
+ continue;
+ }
+
+ fullfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(fullfilename)) {
+ result = RET_ERROR_OOM;
+ checksums_free(expected);
+ break;
+ }
+ r = checksums_complete(&expected, fullfilename);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing file '%s'!\n", fullfilename);
+ r = RET_ERROR_MISSING;
+ }
+ if (r == RET_ERROR_WRONG_MD5) {
+ fprintf(stderr,
+"ERROR: Cannot collect missing checksums for '%s'\n"
+"as the file in the pool does not match the already recorded checksums\n",
+ filekey);
+ }
+ free(fullfilename);
+ if (RET_IS_OK(r))
+ r = files_replace_checksums(filekey, expected);
+ checksums_free(expected);
+ RET_UPDATE(result, r);
+ }
+ r = cursor_close(rdb_checksums, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue files_detect(const char *filekey) {
+ struct checksums *checksums;
+ char *fullfilename;
+ retvalue r;
+
+ fullfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+ r = checksums_read(fullfilename, &checksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Error opening '%s'!\n", fullfilename);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ return r;
+ }
+ free(fullfilename);
+ r = files_add_checksums(filekey, checksums);
+ checksums_free(checksums);
+ return r;
+}
+
+struct rfd { bool reread; };
+
+static retvalue regenerate_filelist(void *data, const char *filekey) {
+ bool reread = ((struct rfd*)data)->reread;
+ size_t l = strlen(filekey);
+ char *debfilename;
+ char *filelist;
+ size_t fls;
+ retvalue r;
+
+ if (l <= 4 || memcmp(filekey+l-4, ".deb", 4) != 0)
+ return RET_NOTHING;
+
+ if (!reread && !table_recordexists(rdb_contents, filekey))
+ return RET_NOTHING;
+
+ debfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(debfilename))
+ return RET_ERROR_OOM;
+
+ r = getfilelist(&filelist, &fls, debfilename);
+ free(debfilename);
+ if (RET_IS_OK(r)) {
+ if (verbose > 0)
+ (void)puts(filekey);
+ if (verbose > 6) {
+ const char *p = filelist;
+ while (*p != '\0') {
+ (void)putchar(' ');
+ (void)puts(p);
+ p += strlen(p)+1;
+ }
+ }
+ r = table_adduniqsizedrecord(rdb_contents,
+ filekey, filelist, fls, true, true);
+ free(filelist);
+ }
+ return r;
+}
+
+retvalue files_regenerate_filelist(bool reread) {
+ struct rfd d;
+
+ d.reread = reread;
+ return files_foreach(regenerate_filelist, &d);
+}
+
+/* Include a yet unknown file into the pool */
+retvalue files_preinclude(const char *sourcefilename, const char *filekey, struct checksums **checksums_p) {
+ retvalue r;
+ struct checksums *checksums, *realchecksums;
+ bool improves;
+ char *fullfilename;
+
+ r = files_get_checksums(filekey, &checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ r = checksums_read(sourcefilename, &realchecksums);
+ if (r == RET_NOTHING)
+ r = RET_ERROR_MISSING;
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(checksums);
+ return r;
+ }
+ if (!checksums_check(checksums, realchecksums, &improves)) {
+ fprintf(stderr,
+"ERROR: '%s' cannot be included as '%s'.\n"
+"Already existing files can only be included again, if they are the same, but:\n",
+ sourcefilename, filekey);
+ checksums_printdifferences(stderr, checksums,
+ realchecksums);
+ checksums_free(checksums);
+ checksums_free(realchecksums);
+ return RET_ERROR_WRONG_MD5;
+ }
+ if (improves) {
+ r = checksums_combine(&checksums, realchecksums, NULL);
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(realchecksums);
+ checksums_free(checksums);
+ return r;
+ }
+ r = files_replace_checksums(filekey, checksums);
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(realchecksums);
+ checksums_free(checksums);
+ return r;
+ }
+ }
+ checksums_free(realchecksums);
+ // args, this breaks retvalue semantics!
+ if (checksums_p != NULL)
+ *checksums_p = checksums;
+ else
+ checksums_free(checksums);
+ return RET_NOTHING;
+ }
+ assert (sourcefilename != NULL);
+ fullfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+ (void)dirs_make_parent(fullfilename);
+ r = checksums_copyfile(fullfilename, sourcefilename, true, &checksums);
+ if (r == RET_ERROR_EXIST) {
+ // TODO: deal with already existing files!
+ fprintf(stderr, "File '%s' does already exist!\n",
+ fullfilename);
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Could not open '%s'!\n", sourcefilename);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ return r;
+ }
+ free(fullfilename);
+
+ r = files_add_checksums(filekey, checksums);
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(checksums);
+ return r;
+ }
+ if (checksums_p != NULL)
+ *checksums_p = checksums;
+ else
+ checksums_free(checksums);
+ return RET_OK;
+}
+
+static retvalue checkimproveorinclude(const char *sourcedir, const char *basefilename, const char *filekey, struct checksums **checksums_p, bool *improving) {
+ retvalue r;
+ struct checksums *checksums = NULL;
+ bool improves, copied = false;
+ char *fullfilename = files_calcfullfilename(filekey);
+
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+
+ if (checksums_iscomplete(*checksums_p)) {
+ r = checksums_cheaptest(fullfilename, *checksums_p, true);
+ if (r != RET_NOTHING) {
+ free(fullfilename);
+ return r;
+ }
+ } else {
+ r = checksums_read(fullfilename, &checksums);
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ return r;
+ }
+ }
+ if (r == RET_NOTHING) {
+ char *sourcefilename = calc_dirconcat(sourcedir, basefilename);
+
+ if (FAILEDTOALLOC(sourcefilename)) {
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+
+ fprintf(stderr,
+"WARNING: file %s was lost!\n"
+"(i.e. found in the database, but not in the pool)\n"
+"trying to compensate...\n",
+ filekey);
+ (void)dirs_make_parent(fullfilename);
+ r = checksums_copyfile(fullfilename, sourcefilename, false,
+ &checksums);
+ if (r == RET_ERROR_EXIST) {
+ fprintf(stderr,
+"File '%s' seems to be missing and existing at the same time!\n"
+"To confused to continue...\n",
+ fullfilename);
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Could not open '%s'!\n",
+ sourcefilename);
+ r = RET_ERROR_MISSING;
+ }
+ free(sourcefilename);
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ return r;
+ }
+ copied = true;
+ }
+
+ assert (checksums != NULL);
+
+ if (!checksums_check(*checksums_p, checksums, &improves)) {
+ if (copied) {
+ deletefile(fullfilename);
+ fprintf(stderr,
+"ERROR: Unexpected content of file '%s/%s'!\n", sourcedir, basefilename);
+ } else
+// TODO: if the database only listed some of the currently supported checksums,
+// and the caller of checkincludefile supplied some (which none yet does), but
+// not all (which needs at least three checksums, i.e. not applicaple before
+// sha256 get added), then this might also be called if the file in the pool
+// just has the same checksums as previously recorded (e.g. a md5sum collision)
+// but the new file was listed with another secondary hash than the original.
+// In that situation it might be a bit misleading...
+ fprintf(stderr,
+"ERROR: file %s is damaged!\n"
+"(i.e. found in the database, but with different checksums in the pool)\n",
+ filekey);
+ checksums_printdifferences(stderr, *checksums_p, checksums);
+ r = RET_ERROR_WRONG_MD5;
+ }
+ if (improves) {
+ r = checksums_combine(checksums_p, checksums, NULL);
+ if (RET_IS_OK(r))
+ *improving = true;
+ }
+ checksums_free(checksums);
+ free(fullfilename);
+ return r;
+}
+
+retvalue files_checkincludefile(const char *sourcedir, const char *basefilename, const char *filekey, struct checksums **checksums_p) {
+ char *sourcefilename, *fullfilename;
+ struct checksums *checksums;
+ retvalue r;
+ bool improves;
+
+ assert (*checksums_p != NULL);
+
+ r = files_get_checksums(filekey, &checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ /* there are three sources now:
+ * - the checksums from the database (may have some we
+ * do not even know about, and may miss some we can
+ * generate)
+ * - the checksums provided (typically only md5sum,
+ * as this comes from a .changes or .dsc)
+ * - the checksums of the file
+ *
+ * to make things more complicated, the file should only
+ * be read if needed, as this needs time.
+ * And it can happen the file got lost in the pool, then
+ * this is the best place to replace it.
+ */
+ if (!checksums_check(checksums, *checksums_p, &improves)) {
+ fprintf(stderr,
+"ERROR: '%s/%s' cannot be included as '%s'.\n"
+"Already existing files can only be included again, if they are the same, but:\n",
+ sourcedir, basefilename, filekey);
+ checksums_printdifferences(stderr, checksums,
+ *checksums_p);
+ checksums_free(checksums);
+ return RET_ERROR_WRONG_MD5;
+ }
+ r = RET_NOTHING;
+ if (improves)
+ r = checksums_combine(&checksums, *checksums_p, NULL);
+ if (!RET_WAS_ERROR(r))
+ r = checkimproveorinclude(sourcedir,
+ basefilename, filekey, &checksums, &improves);
+ if (!RET_WAS_ERROR(r) && improves)
+ r = files_replace_checksums(filekey, checksums);
+ if (RET_IS_OK(r))
+ r = RET_NOTHING;
+ /* return the combined checksum */
+ checksums_free(*checksums_p);
+ *checksums_p = checksums;
+ return r;
+ }
+
+ assert (sourcedir != NULL);
+ sourcefilename = calc_dirconcat(sourcedir, basefilename);
+ if (FAILEDTOALLOC(sourcefilename))
+ return RET_ERROR_OOM;
+
+ fullfilename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(fullfilename)) {
+ free(sourcefilename);
+ return RET_ERROR_OOM;
+ }
+
+ (void)dirs_make_parent(fullfilename);
+ r = checksums_copyfile(fullfilename, sourcefilename, true, &checksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Could not open '%s'!\n", sourcefilename);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ free(sourcefilename);
+ return r;
+ }
+ if (!checksums_check(*checksums_p, checksums, &improves)) {
+ deletefile(fullfilename);
+ fprintf(stderr, "ERROR: Unexpected content of file '%s'!\n",
+ sourcefilename);
+ checksums_printdifferences(stderr, *checksums_p, checksums);
+ r = RET_ERROR_WRONG_MD5;
+ }
+ free(sourcefilename);
+ free(fullfilename);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (improves) {
+ r = checksums_combine(checksums_p, checksums, NULL);
+ checksums_free(checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else
+ checksums_free(checksums);
+
+ return files_add_checksums(filekey, *checksums_p);
+}
+
+off_t files_getsize(const char *filekey) {
+ retvalue r;
+ off_t s;
+ struct checksums *checksums;
+
+ r = files_get_checksums(filekey, &checksums);
+ if (!RET_IS_OK(r))
+ return -1;
+ s = checksums_getfilesize(checksums);
+ checksums_free(checksums);
+ return s;
+}
diff --git a/files.h b/files.h
new file mode 100644
index 0000000..2dceeb2
--- /dev/null
+++ b/files.h
@@ -0,0 +1,86 @@
+#ifndef REPREPRO_FILES_H
+#define REPREPRO_FILES_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_NAMES_H
+#include "names.h"
+#endif
+
+struct checksums;
+struct checksumsarray;
+
+/* Add file's md5sum to database */
+retvalue files_add_checksums(const char *, const struct checksums *);
+
+/* remove file's md5sum from database */
+retvalue files_remove(const char * /*filekey*/);
+/* same but do not call pool_markremoved */
+retvalue files_removesilent(const char * /*filekey*/);
+
+/* check for file in the database and if not found there in the pool */
+retvalue files_expect(const char *, const struct checksums *, bool warnifreadded);
+/* same for multiple files */
+retvalue files_expectfiles(const struct strlist *, struct checksums **);
+
+/* check for several files in the database and update information */
+retvalue files_checkorimprove(const struct strlist *, struct checksums **);
+
+/* what to do with files */
+/* file should already be there, just make sure it is in the database */
+#define D_INPLACE -1
+/* copy the file to the given location, return RET_NOTHING, if already in place */
+#define D_COPY 0
+/* move the file in place: */
+#define D_MOVE 1
+/* move needed and delete unneeded files: */
+#define D_DELETE 2
+
+/* Include a given file into the pool
+ * return RET_NOTHING, if a file with the same checksums is already there
+ * return RET_OK, if copied and added
+ * return RET_ERROR_MISSING, if there is no file to copy.
+ * return RET_ERROR_WRONG_MD5 if wrong md5sum.
+ * (the original file is not deleted in that case, even if delete is positive)
+ */
+retvalue files_preinclude(const char *sourcefilename, const char *filekey, /*@null@*//*@out@*/struct checksums **);
+retvalue files_checkincludefile(const char *directory, const char *sourcefilename, const char *filekey, struct checksums **);
+
+typedef retvalue per_file_action(void *data, const char *filekey);
+
+/* callback for each registered file */
+retvalue files_foreach(per_file_action, void *);
+
+/* check if all files are corect. (skip md5sum if fast is true) */
+retvalue files_checkpool(bool /*fast*/);
+/* calculate all missing hashes */
+retvalue files_collectnewchecksums(void);
+
+/* dump out all information */
+retvalue files_printmd5sums(void);
+retvalue files_printchecksums(void);
+
+/* look for the given filekey and add it into the filesdatabase */
+retvalue files_detect(const char *);
+
+retvalue files_regenerate_filelist(bool redo);
+
+/* hardlink file with known checksums and add it to database */
+retvalue files_hardlinkandadd(const char * /*tempfile*/, const char * /*filekey*/, const struct checksums *);
+
+/* RET_NOTHING: file is already there
+ * RET_OK : could be added
+ * RET_ERROR_WRONG_MD5SUM: filekey is already there with different md5sum */
+retvalue files_canadd(const char *filekey, const struct checksums *);
+
+/* make a filekey to a fullfilename. return NULL if OutOfMemory */
+static inline char *files_calcfullfilename(const char *filekey) {
+ return calc_dirconcat(global.outdir, filekey);
+}
+off_t files_getsize(const char *);
+#endif
diff --git a/filterlist.c b/filterlist.c
new file mode 100644
index 0000000..d2f39eb
--- /dev/null
+++ b/filterlist.c
@@ -0,0 +1,599 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2005 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "configparser.h"
+#include "filterlist.h"
+
+static struct filterlistfile {
+ size_t reference_count;
+
+ char *filename;
+ size_t filename_len;
+
+ /*@owned@*//*@null@*/
+ struct filterlistitem *root;
+ /*@dependent@*//*@null@*/
+ const struct filterlistitem *last;
+
+ /*@owned@*//*@null@*/
+ struct filterlistfile *next;
+} *listfiles = NULL;
+
+struct filterlistitem {
+ /*@owned@*//*@null@*/
+ struct filterlistitem *next;
+ char *packagename;
+ char *version;
+ enum filterlisttype what;
+};
+
+static void filterlistitems_free(/*@null@*//*@only@*/struct filterlistitem *list) {
+ while (list != NULL) {
+ struct filterlistitem *next = list->next;
+ free(list->version);
+ free(list->packagename);
+ free(list);
+ list = next;
+ }
+}
+
+static void filterlistfile_unlock(struct filterlistfile *list) {
+ assert (list != NULL);
+
+ if (list->reference_count <= 1) {
+ struct filterlistfile **p = &listfiles;
+
+ assert (list->reference_count == 1);
+ if (list->reference_count == 0)
+ return;
+
+ while (*p != NULL && *p != list)
+ p = &(*p)->next;
+ assert (p != NULL);
+ if (*p == list) {
+ *p = list->next;
+ filterlistitems_free(list->root);
+ free(list->filename);
+ free(list);
+ }
+ } else
+ list->reference_count--;
+}
+
+static inline retvalue filterlistfile_parse(struct filterlistfile *n, const char *filename, FILE *f) {
+ char *lineend, *namestart, *nameend, *what, *version;
+ int cmp;
+ enum filterlisttype type;
+ struct filterlistitem *h;
+ char line[1001];
+ int lineno = 0;
+ struct filterlistitem **last = &n->root;
+
+ while (fgets(line, 1000, f) != NULL) {
+ lineno++;
+ lineend = strchr(line, '\n');
+ if (lineend == NULL) {
+ fprintf(stderr, "Overlong or unterminated line in '%s'!\n", filename);
+ return RET_ERROR;
+ }
+ while (lineend >= line && xisspace(*lineend))
+ *(lineend--) = '\0';
+ /* Ignore line only containing whitespace */
+ if (line[0] == '\0')
+ continue;
+ /* Ignore lines starting with a comment sign */
+ if (line[0] == '#')
+ continue;
+ namestart = line;
+ while (*namestart != '\0' && xisspace(*namestart))
+ namestart++;
+ nameend=namestart;
+ while (*nameend != '\0' && !xisspace(*nameend))
+ nameend++;
+ what = nameend;
+ while (*what != '\0' && xisspace(*what))
+ *(what++)='\0';
+ if (*what == '\0') {
+ fprintf(stderr,
+"Malformed line in '%s': %d!\n", filename, lineno);
+ return RET_ERROR;
+ }
+ version = NULL;
+ if (strcmp(what, "install") == 0) {
+ type = flt_install;
+ } else if (strcmp(what, "deinstall") == 0) {
+ type = flt_deinstall;
+ } else if (strcmp(what, "purge") == 0) {
+ type = flt_purge;
+ } else if (strcmp(what, "hold") == 0) {
+ type = flt_hold;
+ } else if (strcmp(what, "supersede") == 0) {
+ type = flt_supersede;
+ } else if (strcmp(what, "upgradeonly") == 0) {
+ type = flt_upgradeonly;
+ } else if (strcmp(what, "warning") == 0) {
+ type = flt_warning;
+ } else if (strcmp(what, "error") == 0) {
+ type = flt_error;
+ } else if (what[0] == '=') {
+ what++;
+ while (*what != '\0' && xisspace(*what))
+ what++;
+ version = what;
+ if (*version == '\0') {
+ fprintf(stderr,
+"Malformed line %d in '%s': missing version after '='!\n",
+ lineno, filename);
+ return RET_ERROR;
+ }
+ while (*what != '\0' && !xisspace(*what))
+ what++;
+ while (*what != '\0' && xisspace(*what))
+ *(what++) = '\0';
+ if (*what != '\0') {
+ fprintf(stderr,
+"Malformed line %d in '%s': space in version!\n",
+ lineno, filename);
+ return RET_ERROR;
+ }
+ type = flt_install;
+ } else {
+ fprintf(stderr,
+"Unknown status in '%s':%d: '%s'!\n", filename, lineno, what);
+ return RET_ERROR;
+ }
+ if (*last == NULL || strcmp(namestart, (*last)->packagename) < 0)
+ last = &n->root;
+ cmp = -1;
+ while (*last != NULL &&
+ (cmp=strcmp(namestart, (*last)->packagename)) > 0)
+ last = &((*last)->next);
+ if (cmp == 0) {
+ fprintf(stderr,
+"Two lines describing '%s' in '%s'!\n", namestart, filename);
+ return RET_ERROR;
+ }
+ h = zNEW(struct filterlistitem);
+ if (FAILEDTOALLOC(h)) {
+ return RET_ERROR_OOM;
+ }
+ h->next = *last;
+ *last = h;
+ h->what = type;
+ h->packagename = strdup(namestart);
+ if (FAILEDTOALLOC(h->packagename)) {
+ return RET_ERROR_OOM;
+ }
+ if (version == NULL)
+ h->version = NULL;
+ else {
+ h->version = strdup(version);
+ if (FAILEDTOALLOC(h->version))
+ return RET_ERROR_OOM;
+ }
+ }
+ n->last = *last;
+ return RET_OK;
+
+}
+
+static inline retvalue filterlistfile_read(struct filterlistfile *n, const char *filename) {
+ FILE *f;
+ retvalue r;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ fprintf(stderr, "Cannot open %s for reading: %s!\n",
+ filename, strerror(errno));
+ return RET_ERROR;
+ }
+ r = filterlistfile_parse(n, filename, f);
+
+ // Can this return an yet unseen error? was read-only..
+ (void)fclose(f);
+ return r;
+}
+
+static inline retvalue filterlistfile_getl(const char *filename, size_t len, struct filterlistfile **list) {
+ struct filterlistfile *p;
+ retvalue r;
+
+ for (p = listfiles ; p != NULL ; p = p->next) {
+ if (p->filename_len == len &&
+ strncmp(p->filename, filename, len) == 0) {
+ p->reference_count++;
+ *list = p;
+ return RET_OK;
+ }
+ }
+ p = zNEW(struct filterlistfile);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ p->reference_count = 1;
+ p->filename = strndup(filename, len);
+ p->filename_len = len;
+ if (FAILEDTOALLOC(p->filename)) {
+ free(p);
+ return RET_ERROR_OOM;
+ }
+ char *fullfilename = configfile_expandname(p->filename, NULL);
+ if (FAILEDTOALLOC(fullfilename))
+ r = RET_ERROR_OOM;
+ else {
+ r = filterlistfile_read(p, fullfilename);
+ free(fullfilename);
+ }
+
+ if (RET_IS_OK(r)) {
+ p->next = listfiles;
+ listfiles = p;
+ *list = p;
+ } else {
+ filterlistitems_free(p->root);
+ free(p->filename);
+ free(p);
+ }
+ return r;
+}
+
+static inline retvalue filterlistfile_get(/*@only@*/char *filename, /*@out@*/struct filterlistfile **list) {
+ struct filterlistfile *p;
+ retvalue r;
+ size_t len = strlen(filename);
+
+ for (p = listfiles ; p != NULL ; p = p->next) {
+ if (p->filename_len == len &&
+ strncmp(p->filename, filename, len) == 0) {
+ p->reference_count++;
+ *list = p;
+ free(filename);
+ return RET_OK;
+ }
+ }
+ p = zNEW(struct filterlistfile);
+ if (FAILEDTOALLOC(p)) {
+ free(filename);
+ return RET_ERROR_OOM;
+ }
+ p->reference_count = 1;
+ p->filename = filename;
+ p->filename_len = len;
+ if (FAILEDTOALLOC(p->filename)) {
+ free(p);
+ return RET_ERROR_OOM;
+ }
+ char *fullfilename = configfile_expandname(p->filename, NULL);
+ if (FAILEDTOALLOC(fullfilename))
+ r = RET_ERROR_OOM;
+ else {
+ r = filterlistfile_read(p, fullfilename);
+ free(fullfilename);
+ }
+
+ if (RET_IS_OK(r)) {
+ p->next = listfiles;
+ listfiles = p;
+ *list = p;
+ } else {
+ filterlistitems_free(p->root);
+ free(p->filename);
+ free(p);
+ }
+ return r;
+}
+
+void filterlist_release(struct filterlist *list) {
+ size_t i;
+
+ assert(list != NULL);
+
+ if (list->files != NULL) {
+ for (i = 0 ; i < list->count ; i++)
+ filterlistfile_unlock(list->files[i]);
+ free(list->files);
+ list->files = NULL;
+ } else {
+ assert (list->count == 0);
+ }
+}
+
+static const struct constant filterlisttype_listtypes[] = {
+ {"install", (int)flt_install},
+ {"hold", (int)flt_hold},
+ {"supersede", (int)flt_supersede},
+ {"deinstall", (int)flt_deinstall},
+ {"purge", (int)flt_purge},
+ {"upgradeonly", (int)flt_upgradeonly},
+ {"warning", (int)flt_warning},
+ {"error", (int)flt_error},
+ {NULL, 0}
+};
+
+retvalue filterlist_load(struct filterlist *list, struct configiterator *iter) {
+ enum filterlisttype defaulttype;
+ size_t count;
+ struct filterlistfile **files;
+ retvalue r;
+ char *filename;
+
+ r = config_getenum(iter, filterlisttype, listtypes, &defaulttype);
+ if (r == RET_NOTHING || r == RET_ERROR_UNKNOWNFIELD) {
+ fprintf(stderr,
+"Error parsing %s, line %u, column %u: Expected default action as first argument to FilterList: (one of install, purge, hold, ...)\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter));
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ count = 0;
+ files = NULL;
+ while ((r = config_getword(iter, &filename)) != RET_NOTHING) {
+ struct filterlistfile **n;
+
+ n = realloc(files, (count+1)*
+ sizeof(struct filterlistfile *));
+ if (FAILEDTOALLOC(n)) {
+ free(filename);
+ r = RET_ERROR_OOM;
+ } else {
+ n[count] = NULL;
+ files = n;
+ // TODO: make filename only
+ r = filterlistfile_get(filename, &files[count]);
+ if (RET_IS_OK(r))
+ count++;
+ }
+ if (RET_WAS_ERROR(r)) {
+ while (count > 0) {
+ count--;
+ filterlistfile_unlock(files[count]);
+ }
+ free(files);
+ return r;
+ }
+ }
+ list->count = count;
+ list->files = files;
+ list->defaulttype = defaulttype;
+ list->set = true;
+ return RET_OK;
+}
+
+static inline bool find(const char *name, /*@null@*/struct filterlistfile *list) {
+ int cmp;
+ /*@dependent@*/const struct filterlistitem *last = list->last;
+
+ assert (last != NULL);
+
+ if (last->next != NULL) {
+ cmp = strcmp(name, last->next->packagename);
+ if (cmp == 0) {
+ list->last = last->next;
+ return true;
+ }
+ }
+ if (last->next == NULL || cmp < 0) {
+ cmp = strcmp(name, last->packagename);
+ if (cmp == 0) {
+ return true;
+ } else if (cmp > 0)
+ return false;
+ last = list->root;
+ cmp = strcmp(name, last->packagename);
+ if (cmp == 0) {
+ list->last = list->root;
+ return true;
+ } else if (cmp < 0)
+ return false;
+ }
+ /* now we are after last */
+ while (last->next != NULL) {
+ cmp = strcmp(name, last->next->packagename);
+ if (cmp == 0) {
+ list->last = last->next;
+ return true;
+ }
+ if (cmp < 0) {
+ list->last = last;
+ return false;
+ }
+ last = last->next;
+ }
+ list->last = last;
+ return false;
+}
+
+enum filterlisttype filterlist_find(const char *name, const char *version, const struct filterlist *list) {
+ size_t i;
+ for (i = 0 ; i < list->count ; i++) {
+ if (list->files[i]->root == NULL)
+ continue;
+ if (!find(name, list->files[i]))
+ continue;
+ if (list->files[i]->last->version == NULL)
+ return list->files[i]->last->what;
+ if (strcmp(list->files[i]->last->version, version) == 0)
+ return list->files[i]->last->what;
+ }
+ return list->defaulttype;
+}
+
+struct filterlist cmdline_bin_filter = {
+ .count = 0,
+ .files = NULL,
+ /* as long as nothing added, this does not change anything.
+ * Once something is added, that will be auto_hold */
+ .defaulttype = flt_unchanged,
+ .set = false,
+};
+struct filterlist cmdline_src_filter = {
+ .count = 0,
+ .files = NULL,
+ /* as long as nothing added, this does not change anything.
+ * Once something is added, that will be auto_hold */
+ .defaulttype = flt_unchanged,
+ .set = false,
+};
+
+static retvalue filterlist_cmdline_init(struct filterlist *l) {
+ if (l->count == 0) {
+ l->files = nzNEW(2, struct filterlistfile *);
+ if (FAILEDTOALLOC(l->files))
+ return RET_ERROR_OOM;
+ l->files[0] = zNEW(struct filterlistfile);
+ if (FAILEDTOALLOC(l->files[0]))
+ return RET_ERROR_OOM;
+ l->files[0]->reference_count = 1;
+ l->count = 1;
+ }
+ return RET_OK;
+}
+
+retvalue filterlist_cmdline_add_file(bool src, const char *filename) {
+ retvalue r;
+ struct filterlist *l = src ? &cmdline_src_filter : &cmdline_bin_filter;
+ char *name;
+
+ r = filterlist_cmdline_init(l);
+ if (RET_WAS_ERROR(r))
+ return r;
+ l->set = true;
+ l->defaulttype = flt_auto_hold;
+
+ if (strcmp(filename, "-") == 0)
+ filename = "/dev/stdin";
+ name = strdup(filename);
+ if (FAILEDTOALLOC(name))
+ return RET_ERROR_OOM;
+ if (l->count > 1) {
+ struct filterlistfile **n;
+
+ n = realloc(l->files, (l->count + 1) *
+ sizeof(struct filterlistfile *));
+ if (FAILEDTOALLOC(n)) {
+ free(name);
+ return RET_ERROR_OOM;
+ }
+ n[l->count++] = NULL;
+ l->files = n;
+ } else {
+ /* already allocated in _init */
+ assert (l->count == 1);
+ l->count++;
+ }
+
+ return filterlistfile_get(name, &l->files[l->count - 1]);
+}
+
+retvalue filterlist_cmdline_add_pkg(bool src, const char *package) {
+ retvalue r;
+ enum filterlisttype what;
+ struct filterlist *l = src ? &cmdline_src_filter : &cmdline_bin_filter;
+ struct filterlistfile *f;
+ struct filterlistitem **p, *h;
+ char *name, *version;
+ const char *c;
+ int cmp;
+
+ r = filterlist_cmdline_init(l);
+ if (RET_WAS_ERROR(r))
+ return r;
+ l->set = true;
+ l->defaulttype = flt_auto_hold;
+
+ c = strchr(package, '=');
+ if (c != NULL) {
+ what = flt_install;
+ name = strndup(package, c - package);
+ if (FAILEDTOALLOC(name))
+ return RET_ERROR_OOM;
+ version = strdup(c + 1);
+ if (FAILEDTOALLOC(version)) {
+ free(name);
+ return RET_ERROR_OOM;
+ }
+ } else {
+ version = NULL;
+ c = strchr(package, ':');
+ if (c == NULL) {
+ what = flt_install;
+ name = strndup(package, c - package);
+ if (FAILEDTOALLOC(name))
+ return RET_ERROR_OOM;
+ } else {
+ const struct constant *t = filterlisttype_listtypes;
+ while (t->name != NULL) {
+ if (strcmp(c + 1, t->name) == 0) {
+ what = t->value;
+ break;
+ }
+ t++;
+ }
+ if (t->name == NULL) {
+ fprintf(stderr,
+"Error: unknown filter-outcome '%s' (expected 'install' or ...)\n",
+ c + 1);
+ return RET_ERROR;
+ }
+
+ }
+ name = strndup(package, c - package);
+ if (FAILEDTOALLOC(name))
+ return RET_ERROR_OOM;
+ }
+ f = l->files[0];
+ assert (f != NULL);
+ p = &f->root;
+ cmp = -1;
+ while (*p != NULL && (cmp = strcmp(name, (*p)->packagename)) > 0)
+ p = &((*p)->next);
+ if (cmp == 0) {
+ fprintf(stderr,
+"Package in command line filter two times: '%s'\n",
+ name);
+ free(name);
+ free(version);
+ return RET_ERROR;
+ }
+ h = zNEW(struct filterlistitem);
+ if (FAILEDTOALLOC(h)) {
+ free(name);
+ free(version);
+ return RET_ERROR_OOM;
+ }
+ h->next = *p;
+ *p = h;
+ h->what = what;
+ h->packagename = name;
+ h->version = version;
+ f->last = h;
+ return RET_OK;
+}
diff --git a/filterlist.h b/filterlist.h
new file mode 100644
index 0000000..3120db4
--- /dev/null
+++ b/filterlist.h
@@ -0,0 +1,41 @@
+#ifndef REPREPRO_FILTERLIST_H
+#define REPREPRO_FILTERLIST_H
+
+enum filterlisttype {
+ /* must be 0, so it is the default, when there is no list */
+ flt_install = 0,
+ flt_unchanged, /* special value used by the cmdline lists */
+ flt_auto_hold, /* special value used by the cmdline lists */
+ flt_purge,
+ flt_warning,
+ flt_deinstall,
+ flt_hold,
+ flt_supersede,
+ flt_upgradeonly,
+ flt_error
+};
+
+struct filterlistfile;
+
+struct filterlist {
+ size_t count;
+ struct filterlistfile **files;
+
+ /* to be used when not found */
+ enum filterlisttype defaulttype;
+ /* true if this is loaded from config */
+ bool set;
+};
+
+struct configiterator;
+retvalue filterlist_load(/*@out@*/struct filterlist *, struct configiterator *);
+
+void filterlist_release(struct filterlist *list);
+
+enum filterlisttype filterlist_find(const char *name, const char *version, const struct filterlist *);
+
+extern struct filterlist cmdline_bin_filter, cmdline_src_filter;
+retvalue filterlist_cmdline_add_pkg(bool, const char *);
+retvalue filterlist_cmdline_add_file(bool, const char *);
+
+#endif
diff --git a/freespace.c b/freespace.c
new file mode 100644
index 0000000..aa67c37
--- /dev/null
+++ b/freespace.c
@@ -0,0 +1,243 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <unistd.h>
+#include <strings.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "error.h"
+#include "database.h"
+#include "checksums.h"
+#include "freespace.h"
+
+struct device {
+ /*@null@*/struct device *next;
+ /* stat(2)'s st_dev number identifying this device */
+ dev_t id;
+ /* some directory in this filesystem */
+ char *somepath;
+ /* size of one block on this device according to statvfs(2) */
+ unsigned long blocksize;
+ /* blocks available for us */
+ fsblkcnt_t available;
+ /* blocks already known to be needed on that device */
+ fsblkcnt_t needed;
+ /* calculated block to keep free */
+ fsblkcnt_t reserved;
+};
+
+struct devices {
+ /*@null@*/struct device *root;
+ off_t reserved;
+};
+
+void space_free(struct devices *devices) {
+ struct device *d;
+
+ if (devices == NULL)
+ return;
+
+ while ((d = devices->root) != NULL) {
+ devices->root = d->next;
+
+ free(d->somepath);
+ free(d);
+ }
+ free(devices);
+}
+
+static retvalue device_find_or_create(struct devices *devices, dev_t id, const char *dirname, /*@out@*/struct device **result) {
+ struct device *d;
+ struct statvfs s;
+ int ret;
+
+ d = devices->root;
+
+ while (d != NULL && d->id != id)
+ d = d->next;
+
+ if (d != NULL) {
+ *result = d;
+ return RET_OK;
+ }
+
+ ret = statvfs(dirname, &s);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error judging free space for the filesystem '%s' belongs to: %d=%s\n"
+"(Take a look at --spacecheck in the manpage on how to modify checking.)\n",
+ dirname, e, strerror(e));
+ return RET_ERRNO(e);
+ }
+
+ d = NEW(struct device);
+ if (FAILEDTOALLOC(d))
+ return RET_ERROR_OOM;
+ d->next = devices->root;
+ d->id = id;
+ d->somepath = strdup(dirname);
+ if (FAILEDTOALLOC(d->somepath)) {
+ free(d);
+ return RET_ERROR_OOM;
+ }
+ d->blocksize = s.f_bsize;
+ /* use bfree when being root? but why run as root? */
+ d->available = s.f_bavail;
+ d->needed = 0;
+ /* always keep at least one megabyte spare */
+ d->reserved = devices->reserved/d->blocksize+1;
+ devices->root = d;
+ *result = d;
+ return RET_OK;
+}
+
+retvalue space_prepare(struct devices **devices, enum spacecheckmode mode, off_t reservedfordb, off_t reservedforothers) {
+ struct devices *n;
+ struct device *d;
+ struct stat s;
+ int ret;
+ retvalue r;
+
+ if (mode == scm_NONE) {
+ *devices = NULL;
+ return RET_OK;
+ }
+ assert (mode == scm_FULL);
+ n = NEW(struct devices);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->root = NULL;
+ n->reserved = reservedforothers;
+
+ ret = stat(global.dbdir, &s);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr, "Error stat'ing %s: %d=%s\n",
+ global.dbdir, e, strerror(e));
+ free(n);
+ return RET_ERRNO(e);
+ }
+ r = device_find_or_create(n, s.st_dev, global.dbdir, &d);
+ if (RET_WAS_ERROR(r)) {
+ space_free(n);
+ return r;
+ }
+ d->reserved += reservedfordb/d->blocksize+1;
+ *devices = n;
+ return RET_OK;
+}
+
+retvalue space_needed(struct devices *devices, const char *filename, const struct checksums *checksums) {
+ size_t l = strlen(filename);
+ char buffer[l+1];
+ struct stat s;
+ struct device *device;
+ int ret;
+ retvalue r;
+ fsblkcnt_t blocks;
+ off_t filesize;
+
+ if (devices == NULL)
+ return RET_NOTHING;
+
+ while (l > 0 && filename[l-1] != '/')
+ l--;
+ assert (l > 0);
+ memcpy(buffer, filename, l);
+ buffer[l] = '\0';
+
+ ret = stat(buffer, &s);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr, "Error stat'ing %s: %d=%s\n", filename,
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ r = device_find_or_create(devices, s.st_dev, buffer, &device);
+ if (RET_WAS_ERROR(r))
+ return r;
+ filesize = checksums_getfilesize(checksums);
+ blocks = (filesize + device->blocksize - 1) / device->blocksize;
+ device->needed += 1 + blocks;
+
+ return RET_OK;
+}
+
+retvalue space_check(struct devices *devices) {
+ struct device *device;
+ struct statvfs s;
+ int ret;
+ retvalue result = RET_OK;
+
+
+ if (devices == NULL)
+ return RET_NOTHING;
+
+ for (device = devices->root ; device != NULL ; device = device->next) {
+ /* recalculate free space, as created directories
+ * and other stuff might have changed it */
+
+ ret = statvfs(device->somepath, &s);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error judging free space for the filesystem '%s' belongs to: %d=%s\n"
+"(As this worked before in this run, something must have changed strangely)\n",
+ device->somepath,
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (device->blocksize != s.f_bsize) {
+ fprintf(stderr,
+"The block size of the filesystem belonging to '%s' has changed.\n"
+"Either something was mounted or unmounted while reprepro was running,\n"
+"or some symlinks were changed. Aborting as utterly confused.\n",
+ device->somepath);
+ }
+ device->available = s.f_bavail;
+ if (device->needed >= device->available) {
+ fprintf(stderr,
+"NOT ENOUGH FREE SPACE on filesystem 0x%lx (the filesystem '%s' is on)\n"
+"available blocks %llu, needed blocks %llu, block size is %llu.\n",
+ (unsigned long)device->id, device->somepath,
+ (unsigned long long)device->available,
+ (unsigned long long)device->needed,
+ (unsigned long long)device->blocksize);
+ result = RET_ERROR;
+ } else if (device->reserved >= device->available ||
+ device->needed >= device->available - device->reserved) {
+ fprintf(stderr,
+"NOT ENOUGH FREE SPACE on filesystem 0x%lx (the filesystem '%s' is on)\n"
+"available blocks %llu, needed blocks %llu (+%llu safety margin), block size is %llu.\n"
+"(Take a look at --spacecheck in the manpage for more information.)\n",
+ (unsigned long)device->id, device->somepath,
+ (unsigned long long)device->available,
+ (unsigned long long)device->needed,
+ (unsigned long long)device->reserved,
+ (unsigned long long)device->blocksize);
+ result = RET_ERROR;
+ }
+ }
+ return result;
+}
diff --git a/freespace.h b/freespace.h
new file mode 100644
index 0000000..ced5d70
--- /dev/null
+++ b/freespace.h
@@ -0,0 +1,20 @@
+#ifndef REPREPRO_FREESPACE_H
+#define REPREPRO_FREESPACE_H
+
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+
+struct devices;
+enum spacecheckmode { scm_NONE, /* scm_ASSUMESINGLEFS, */ scm_FULL };
+
+retvalue space_prepare(/*@out@*/struct devices **, enum spacecheckmode, off_t /*reservedfordb*/, off_t /*reservedforothers*/);
+
+struct checksums;
+retvalue space_needed(/*@null@*/struct devices *, const char * /*filename*/, const struct checksums *);
+
+retvalue space_check(/*@null@*/struct devices *);
+
+void space_free(/*@only@*//*@null@*/struct devices *);
+
+#endif
diff --git a/globals.h b/globals.h
new file mode 100644
index 0000000..4af3670
--- /dev/null
+++ b/globals.h
@@ -0,0 +1,111 @@
+#ifndef REPREPRO_GLOBALS_H
+#define REPREPRO_GLOBALS_H
+
+#include <string.h>
+
+#ifdef AVOID_CHECKPROBLEMS
+# define bool _Bool
+# define true (1==1)
+# define false (0==42)
+/* avoid problems with __builtin_expect being long instead of boolean */
+# define __builtin_expect(a, b) (a)
+# define __builtin_constant_p(a) (__builtin_constant_p(a) != 0)
+#else
+# if HAVE_STDBOOL_H
+# include <stdbool.h>
+# else
+# if ! HAVE__BOOL
+typedef int _Bool;
+# endif
+# define true (1==1)
+# define false (0==42)
+# endif
+#endif
+
+#define xisspace(c) (isspace(c)!=0)
+#define xisblank(c) (isblank(c)!=0)
+#define xisdigit(c) (isdigit(c)!=0)
+
+#define READONLY true
+#define READWRITE false
+
+#define ISSET(a, b) ((a & b) != 0)
+#define NOTSET(a, b) ((a & b) == 0)
+
+/* sometimes something is initializes though the value is never used to
+ * work around some gcc uninitialized-use false-positives */
+#define SETBUTNOTUSED(a) a
+
+#ifdef SPLINT
+#define UNUSED(a) /*@unused@*/ a
+#define NORETURN
+#define likely(a) (a)
+#define unlikely(a) (a)
+#else
+#define likely(a) (!(__builtin_expect(!(a), false)))
+#define unlikely(a) __builtin_expect(a, false)
+#define NORETURN __attribute((noreturn))
+#ifndef NOUNUSEDATTRIBUTE
+#define UNUSED(a) a __attribute((unused))
+#else
+#define UNUSED(a) a
+#endif
+#endif
+
+#define ARRAYCOUNT(a) (sizeof(a)/sizeof(a[0]))
+
+enum config_option_owner { CONFIG_OWNER_DEFAULT=0,
+ CONFIG_OWNER_FILE,
+ CONFIG_OWNER_ENVIRONMENT,
+ CONFIG_OWNER_CMDLINE};
+#ifndef _D_EXACT_NAMLEN
+#define _D_EXACT_NAMLEN(r) (strlen((r)->d_name))
+#endif
+/* for systems defining NULL to 0 instead of the nicer (void*)0 */
+#define ENDOFARGUMENTS ((char *)0)
+
+/* global information */
+extern int verbose;
+extern struct global_config {
+ const char *basedir;
+ const char *dbdir;
+ const char *outdir;
+ const char *distdir;
+ const char *confdir;
+ const char *methoddir;
+ const char *logdir;
+ const char *listdir;
+ const char *morguedir;
+ /* flags: */
+ bool keepdirectories;
+ bool keeptemporaries;
+ bool onlysmalldeletes;
+ /* verbosity of downloading statistics */
+ int showdownloadpercent;
+} global;
+
+enum compression { c_none, c_gzip, c_bzip2, c_lzma, c_xz, c_lunzip, c_zstd, c_COUNT };
+
+#define setzero(type, pointer) ({type *__var = pointer; memset(__var, 0, sizeof(type));})
+#define NEW(type) ((type *)malloc(sizeof(type)))
+#define nNEW(num, type) ((type *)malloc((num) * sizeof(type)))
+#define zNEW(type) ((type *)calloc(1, sizeof(type)))
+#define nzNEW(num, type) ((type *)calloc(num, sizeof(type)))
+#define arrayinsert(type, array, position, length) ({type *__var = array; memmove(__var + (position) + 1, __var + (position), sizeof(type) * ((length) - (position)));})
+
+// strcmp2 behaves like strcmp, but allows both strings to be NULL
+inline int strcmp2(const char *s1, const char *s2) {
+ if (s1 == NULL || s2 == NULL) {
+ if (s1 == NULL && s2 == NULL) {
+ return 0;
+ } else if (s1 == NULL) {
+ return -1;
+ } else {
+ return 1;
+ }
+ } else {
+ return strcmp(s1, s2);
+ }
+}
+
+#endif
diff --git a/globmatch.c b/globmatch.c
new file mode 100644
index 0000000..75f52d9
--- /dev/null
+++ b/globmatch.c
@@ -0,0 +1,187 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2009 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <limits.h>
+#include <stdint.h>
+#include <string.h>
+#ifdef TEST_GLOBMATCH
+#include <stdio.h>
+#include <stdlib.h>
+#endif
+#include "error.h"
+#include "globmatch.h"
+
+#ifdef NOPARANOIA
+#define Assert(a) /* */
+#else
+#define Assert(a) assert(a)
+#endif
+
+/* check if a string matches a pattern, the pattern may
+ contain * and ?.
+
+ This algorithm should be in O( strlen(pattern) * strlen(string) )
+*/
+
+bool globmatch(const char *string, const char *pattern) {
+ int i, l = strlen(pattern);
+ int smallest_possible = 0, largest_possible = 0;
+ bool possible[ l + 1 ];
+ const char *p;
+
+ if (strlen(pattern) > (size_t)INT_MAX)
+ return false;
+
+ memset(possible, 0, sizeof(possible));
+ /* the first character must match the first pattern character
+ or the first one after the first star */
+ possible[smallest_possible] = true;
+ while (pattern[largest_possible] == '*')
+ largest_possible++;
+ Assert (largest_possible <= l);
+ possible[largest_possible] = true;
+
+ for (p = string ; *p != '\0' ; p++) {
+ Assert (largest_possible >= smallest_possible);
+ for (i = largest_possible ; i >= smallest_possible ; i--) {
+ if (!possible[i])
+ continue;
+ /* no character matches the end of the pattern: */
+ if (pattern[i] == '\0') {
+ Assert (i == l);
+ possible[i] = false;
+ do {
+ if (largest_possible <=
+ smallest_possible)
+ return false;
+ largest_possible--;
+ } while (!possible[largest_possible]);
+ i = largest_possible + 1;
+ continue;
+ }
+ Assert (i < l);
+ if (pattern[i] == '*') {
+ int j = i + 1;
+
+ while (pattern[j] == '*')
+ j++;
+ /* all the '*' match one character: */
+ Assert (j <= l);
+ possible[j] = true;
+ if (j > largest_possible)
+ largest_possible = j;
+ /* or more than one */
+ continue;
+ }
+ if (pattern[i] == '[') {
+ int j = i+1;
+ bool matches = false, negate = false;
+
+ if (pattern[j] == '!' || pattern[j] == '^') {
+ j++;
+ negate = true;
+ }
+ if (pattern[j] == '\0')
+ return false;
+ do {
+ if (pattern[j+1] == '-' &&
+ pattern[j+2] != ']' &&
+ pattern[j+2] != '\0') {
+ if (*p >= pattern[j] &&
+ *p <= pattern[j+2])
+ matches = true;
+ j += 3;
+ } else {
+ if (*p == pattern[j])
+ matches = true;
+ j++;
+ }
+ if (pattern[j] == '\0') {
+ /* stray [ matches nothing */
+ return false;
+ }
+ } while (pattern[j] != ']');
+ j++;
+ Assert (j <= l);
+ if (negate)
+ matches = !matches;
+ if (matches) {
+ possible[j] = true;
+ /* if the next character is a star,
+ that might also match 0 characters */
+ while (pattern[j] == '*')
+ j++;
+ Assert (j <= l);
+ possible[j] = true;
+ if (j > largest_possible)
+ largest_possible = j;
+ }
+ } else if (pattern[i] == '?' || pattern[i] == *p) {
+ int j = i + 1;
+ possible[j] = true;
+ /* if the next character is a star,
+ that might also match 0 characters */
+ while (pattern[j] == '*')
+ j++;
+ Assert (j <= l);
+ possible[j] = true;
+ if (j > largest_possible)
+ largest_possible = j;
+ }
+ possible[i] = false;
+ if (i == smallest_possible) {
+ smallest_possible++;
+ while (!possible[smallest_possible]) {
+ if (smallest_possible >=
+ largest_possible)
+ return false;
+ smallest_possible++;
+ }
+ Assert (smallest_possible <= l);
+ }
+ if (i == largest_possible) {
+ do {
+ if (largest_possible <=
+ smallest_possible)
+ return false;
+ largest_possible--;
+ } while (!possible[largest_possible]);
+ Assert (largest_possible >= 0);
+ }
+ }
+ }
+ /* end of string matches end of pattern,
+ if largest got < smallest, then this is also false */
+ return possible[l];
+}
+
+#ifdef TEST_GLOBMATCH
+int main(int argc, const char *argv[]) {
+ if (argc != 3) {
+ fputs("Wrong number of arguments!\n", stderr);
+ exit(EXIT_FAILURE);
+ }
+ if (globmatch(argv[2], argv[1])) {
+ puts("true");
+ return 0;
+ } else {
+ puts("false");
+ return 0;
+ }
+}
+#endif
diff --git a/globmatch.h b/globmatch.h
new file mode 100644
index 0000000..0ede2ec
--- /dev/null
+++ b/globmatch.h
@@ -0,0 +1,7 @@
+#ifndef REPREPRO_GLOBMATCH_H
+#define REPREPRO_GLOBMATCH_H
+
+bool globmatch(const char * /*string*/, const char */*pattern*/);
+
+#endif
+
diff --git a/guesscomponent.c b/guesscomponent.c
new file mode 100644
index 0000000..8a13fc7
--- /dev/null
+++ b/guesscomponent.c
@@ -0,0 +1,115 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <string.h>
+#include <strings.h>
+#include <stdio.h>
+#include "error.h"
+#include "guesscomponent.h"
+
+/* Guess which component to use:
+ * - if the user gave one, use that one.
+ * - if the section is a componentname, use this one
+ * - if the section starts with a componentname/, use this one
+ * - if the section ends with a /componentname, use this one
+ * - if the section/ is the start of a componentname, use this one
+ * - use the first component in the list
+ */
+
+retvalue guess_component(const char *codename, const struct atomlist *components, const char *package, const char *section, component_t givencomponent, component_t *guess) {
+ int i;
+ size_t section_len;
+
+ if (verbose >= 15) {
+ fprintf(stderr, "trace: guess_component(codename=%s, components=[", codename);
+ (void)atomlist_fprint(stderr, at_component, components);
+ fprintf(stderr, "], package=%s, section=%s, givencomponent=%s) called.\n",
+ package, section, atoms_components[givencomponent]);
+ }
+
+ if (atom_defined(givencomponent)) {
+ if (!atomlist_in(components, givencomponent)) {
+ (void)fprintf(stderr,
+"Could not find '%s' in components of '%s': '",
+ atoms_components[givencomponent],
+ codename);
+ (void)atomlist_fprint(stderr,
+ at_component, components);
+ (void)fputs("'\n", stderr);
+ return RET_ERROR;
+ }
+ *guess = givencomponent;
+ return RET_OK;
+ }
+ if (section == NULL) {
+ fprintf(stderr,
+"Found no section for '%s', so I cannot guess the component to put it in!\n",
+ package);
+ return RET_ERROR;
+ }
+ if (components->count <= 0) {
+ fprintf(stderr,
+"I do not find any components in '%s', so there is no chance I cannot even take one by guessing!\n",
+ codename);
+ return RET_ERROR;
+ }
+ section_len = strlen(section);
+
+ for (i = 0 ; i < components->count ; i++) {
+ const char *component = atoms_components[components->atoms[i]];
+
+ if (strcmp(section, component) == 0) {
+ *guess = components->atoms[i];
+ return RET_OK;
+ }
+ }
+ for (i = 0 ; i < components->count ; i++) {
+ const char *component = atoms_components[components->atoms[i]];
+ size_t len = strlen(component);
+
+ if (len<section_len && section[len] == '/' &&
+ strncmp(section, component, len) == 0) {
+ *guess = components->atoms[i];
+ return RET_OK;
+ }
+ }
+ for (i = 0 ; i < components->count ; i++) {
+ const char *component = atoms_components[components->atoms[i]];
+ size_t len = strlen(component);
+
+ if (len<section_len && section[section_len-len-1] == '/' &&
+ strncmp(section+section_len-len, component, len)
+ == 0) {
+ *guess = components->atoms[i];
+ return RET_OK;
+ }
+ }
+ for (i = 0 ; i < components->count ; i++) {
+ const char *component = atoms_components[components->atoms[i]];
+
+ if (strncmp(section, component, section_len) == 0 &&
+ component[section_len] == '/') {
+ *guess = components->atoms[i];
+ return RET_OK;
+ }
+
+ }
+ *guess = components->atoms[0];
+ return RET_OK;
+}
diff --git a/guesscomponent.h b/guesscomponent.h
new file mode 100644
index 0000000..a91daa0
--- /dev/null
+++ b/guesscomponent.h
@@ -0,0 +1,14 @@
+#ifndef REPREPRO_GUESSCOMPONENT_H
+#define REPREPRO_GUESSCOMPONENT_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+
+retvalue guess_component(const char * /*codename*/, const struct atomlist * /*components*/, const char * /*package*/, const char * /*section*/, component_t, /*@out@*/component_t *);
+
+#endif
diff --git a/hooks.c b/hooks.c
new file mode 100644
index 0000000..6f89516
--- /dev/null
+++ b/hooks.c
@@ -0,0 +1,62 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2007,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+/* general helpers infrastructure for all hooks: */
+
+#include <config.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "error.h"
+#include "hooks.h"
+
+void sethookenvironment(const char *causing_file, const char *causing_rule, const char *suite_from, const char *exitcode) {
+ if (exitcode != NULL)
+ setenv("REPREPRO_EXIT_CODE", exitcode, true);
+ else
+ unsetenv("REPREPRO_EXIT_CODE");
+ if (causing_file != NULL)
+ setenv("REPREPRO_CAUSING_FILE", causing_file, true);
+ else
+ unsetenv("REPREPRO_CAUSING_FILE");
+ if (causing_rule != NULL)
+ setenv("REPREPRO_CAUSING_RULE", causing_rule, true);
+ else
+ unsetenv("REPREPRO_CAUSING_RULE");
+ if (suite_from != NULL)
+ setenv("REPREPRO_FROM", suite_from, true);
+ else
+ unsetenv("REPREPRO_FROM");
+ if (atom_defined(causingcommand))
+ setenv("REPREPRO_CAUSING_COMMAND",
+ atoms_commands[causingcommand],
+ true);
+ else
+ unsetenv("REPREPRO_CAUSING_COMMAND");
+ setenv("REPREPRO_BASE_DIR", global.basedir, true);
+ setenv("REPREPRO_OUT_DIR", global.outdir, true);
+ setenv("REPREPRO_CONF_DIR", global.confdir, true);
+ setenv("REPREPRO_CONFIG_DIR", global.confdir, true);
+ setenv("REPREPRO_DIST_DIR", global.distdir, true);
+ setenv("REPREPRO_LOG_DIR", global.logdir, true);
+}
+
+/* global variables to denote current state */
+const char *causingfile = NULL; /* only valid while being called */
+command_t causingcommand = atom_unknown; /* valid till end of program */
+
diff --git a/hooks.h b/hooks.h
new file mode 100644
index 0000000..dc3681f
--- /dev/null
+++ b/hooks.h
@@ -0,0 +1,16 @@
+#ifndef REPREPRO_HOOKS_H
+#define REPREPRO_HOOKS_H
+
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+
+/* the command currently processed (may not changed till all loggers are run) */
+extern command_t causingcommand;
+/* file causing the current actions (may change so may need to be saved for queued actions)*/
+extern /*@null@*/ const char *causingfile;
+
+/* for other hooks */
+void sethookenvironment(/*@null@*/const char *, /*@null@*/const char *, /*@null@*/const char *, /*@null@*/const char *);
+
+#endif
diff --git a/ignore.c b/ignore.c
new file mode 100644
index 0000000..bff54b3
--- /dev/null
+++ b/ignore.c
@@ -0,0 +1,101 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2005 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "ignore.h"
+
+int ignored[IGN_COUNT];
+bool ignore[IGN_COUNT];
+static enum config_option_owner owner_ignore[IGN_COUNT];
+
+static const char * const ignores[] = {
+#define IGN(what) #what ,
+ VALID_IGNORES
+#undef IGN
+};
+
+bool print_ignore_type_message(bool i, enum ignore what) {
+ ignored[what]++;
+ if (ignore[what])
+ fprintf(stderr, "%s as --ignore=%s given.\n",
+ i ? "Ignoring" : "Not rejecting",
+ ignores[what]);
+ else
+ fprintf(stderr, "To ignore use --ignore=%s.\n",
+ ignores[what]);
+ return ignore[what];
+}
+
+static retvalue set(const char *given, size_t len, bool newvalue, enum config_option_owner newowner) {
+ int i;
+
+ //TODO: allow multiple values sperated by some sign here...
+
+ for (i = 0 ; i < IGN_COUNT ; i++) {
+ if (strncmp(given, ignores[i], len) == 0 &&
+ ignores[i][len] == '\0') {
+ if (owner_ignore[i] <= newowner) {
+ ignore[i] = newvalue;
+ owner_ignore[i] = newowner;
+ }
+ break;
+ }
+ }
+ if (i == IGN_COUNT) {
+ char *str = strndup(given, len);
+ if (IGNORING(ignore,
+"Unknown --ignore value: '%s'!\n", (str!=NULL)?str:given)) {
+ free(str);
+ return RET_NOTHING;
+ } else {
+ free(str);
+ return RET_ERROR;
+ }
+ } else
+ return RET_OK;
+}
+
+retvalue set_ignore(const char *given, bool newvalue, enum config_option_owner newowner) {
+ const char *g, *p;
+ retvalue r;
+
+ assert (given != NULL);
+
+ g = given;
+
+ while (true) {
+ p = g;
+ while (*p != '\0' && *p != ',')
+ p++;
+ if (p == g) {
+ fprintf(stderr,
+"Empty ignore option in --ignore='%s'!\n",
+ given);
+ return RET_ERROR_MISSING;
+ }
+ r = set(g, p - g, newvalue, newowner);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (*p == '\0')
+ return RET_OK;
+ g = p+1;
+ }
+}
diff --git a/ignore.h b/ignore.h
new file mode 100644
index 0000000..d2c0c5d
--- /dev/null
+++ b/ignore.h
@@ -0,0 +1,67 @@
+#ifndef REPREPRO_FORCE_H
+#define REPREPRO_FORCE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#endif
+
+#define VALID_IGNORES \
+ IGN(ignore) \
+ IGN(forbiddenchar) \
+ IGN(8bit) \
+ IGN(emptyfilenamepart) \
+ IGN(spaceonlyline) \
+ IGN(malformedchunk) \
+ IGN(unknownfield) \
+ IGN(wrongdistribution) \
+ IGN(missingfield) \
+ IGN(brokenold) \
+ IGN(brokenversioncmp) \
+ IGN(extension) \
+ IGN(unusedarch) \
+ IGN(surprisingarch) \
+ IGN(surprisingbinary) \
+ IGN(wrongsourceversion) \
+ IGN(wrongversion) \
+ IGN(dscinbinnmu) \
+ IGN(brokensignatures) \
+ IGN(uploaders) \
+ IGN(undefinedtarget) \
+ IGN(undefinedtracking) \
+ IGN(unusedoption) \
+ IGN(flatandnonflat) \
+ IGN(expiredkey) \
+ IGN(revokedkey) \
+ IGN(expiredsignature) \
+ IGN(wrongarchitecture) \
+ IGN(oldfile) \
+ IGN(longkeyid) \
+ IGN(missingfile) \
+ IGN(conflictingarchall)
+
+
+enum ignore {
+#define IGN(what) IGN_ ## what,
+ VALID_IGNORES
+#undef IGN
+
+ IGN_COUNT
+};
+
+extern int ignored[IGN_COUNT];
+extern bool ignore[IGN_COUNT];
+
+/* Having that as function avoids those strings to be duplacated everywhere */
+bool print_ignore_type_message(bool, enum ignore);
+
+#define IGNORING__(ignoring, what, ...) ({ \
+ fprintf(stderr, ## __VA_ARGS__); \
+ print_ignore_type_message(ignoring, IGN_ ## what ); \
+})
+#define IGNORING(what, ...) IGNORING__(true, what, __VA_ARGS__)
+#define IGNORING_(what, ...) IGNORING__(false, what, __VA_ARGS__)
+#define IGNORABLE(what) ignore[IGN_ ## what]
+
+retvalue set_ignore(const char *, bool, enum config_option_owner);
+
+#endif
diff --git a/incoming.c b/incoming.c
new file mode 100644
index 0000000..b1ee8a0
--- /dev/null
+++ b/incoming.c
@@ -0,0 +1,2643 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006,2007,2008,2009,2010 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <string.h>
+#include <strings.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <dirent.h>
+#include <time.h>
+#include <sys/stat.h>
+#include "error.h"
+#include "ignore.h"
+#include "mprintf.h"
+#include "filecntl.h"
+#include "strlist.h"
+#include "dirs.h"
+#include "names.h"
+#include "checksums.h"
+#include "chunks.h"
+#include "target.h"
+#include "signature.h"
+#include "binaries.h"
+#include "sources.h"
+#include "dpkgversions.h"
+#include "uploaderslist.h"
+#include "guesscomponent.h"
+#include "log.h"
+#include "override.h"
+#include "tracking.h"
+#include "incoming.h"
+#include "files.h"
+#include "configparser.h"
+#include "byhandhook.h"
+#include "changes.h"
+
+enum permitflags {
+ /* do not error out on unused files */
+ pmf_unused_files = 0,
+ /* do not error out if there already is a newer package */
+ pmf_oldpackagenewer,
+ /* do not error out if there are unadvertised binary files */
+ pmf_unlistedbinaries,
+ pmf_COUNT /* must be last */
+};
+enum cleanupflags {
+ /* delete everything referenced by a .changes file
+ * when it is not accepted */
+ cuf_on_deny = 0,
+ /* check owner when deleting on_deny */
+ cuf_on_deny_check_owner,
+ /* delete everything referenced by a .changes on errors
+ * after accepting that .changes file*/
+ cuf_on_error,
+ /* delete unused files after successfully
+ * processing the used ones */
+ cuf_unused_files,
+ /* same but restricted to .buildinfo files */
+ cuf_unused_buildinfo_files,
+ cuf_COUNT /* must be last */
+};
+enum optionsflags {
+ /* only put _all.deb comes with those of some architecture,
+ * only put in those architectures */
+ iof_limit_arch_all = 0,
+ /* allow .changes file to specify multiple distributions */
+ iof_multiple_distributions,
+ iof_COUNT /* must be last */
+};
+
+struct incoming {
+ /* by incoming_parse: */
+ char *name;
+ char *directory;
+ char *morguedir;
+ char *tempdir;
+ char *logdir;
+ struct strlist allow;
+ struct distribution **allow_into;
+ struct distribution *default_into;
+ /* by incoming_prepare: */
+ struct strlist files;
+ bool *processed;
+ bool *delete;
+ bool permit[pmf_COUNT];
+ bool cleanup[cuf_COUNT];
+ bool options[iof_COUNT];
+ /* only to ease parsing: */
+ const char *filename; /* only valid while parsing! */
+ size_t lineno;
+};
+#define BASENAME(i, ofs) (i)->files.values[ofs]
+/* the changes file is always the first one listed */
+#define changesfile(c) (c->files)
+
+static void incoming_free(/*@only@*/ struct incoming *i) {
+ if (i == NULL)
+ return;
+ free(i->name);
+ free(i->morguedir);
+ free(i->tempdir);
+ free(i->logdir);
+ free(i->directory);
+ strlist_done(&i->allow);
+ free(i->allow_into);
+ strlist_done(&i->files);
+ free(i->processed);
+ free(i->delete);
+ free(i);
+}
+
+static retvalue incoming_prepare(struct incoming *i) {
+ DIR *dir;
+ struct dirent *ent;
+ retvalue r;
+ int ret;
+
+ /* TODO: decide whether to clean this directory first ... */
+ r = dirs_make_recursive(i->tempdir);
+ if (RET_WAS_ERROR(r))
+ return r;
+ dir = opendir(i->directory);
+ if (dir == NULL) {
+ int e = errno;
+ fprintf(stderr, "Cannot scan '%s': %s\n",
+ i->directory, strerror(e));
+ return RET_ERRNO(e);
+ }
+ while ((ent = readdir(dir)) != NULL) {
+ if (ent->d_name[0] == '.')
+ continue;
+ /* this should be impossible to hit.
+ * but given utf-8 encoding filesystems and
+ * overlong slashes, better check than be sorry */
+ if (strchr(ent->d_name, '/') != NULL)
+ continue;
+ r = strlist_add_dup(&i->files, ent->d_name) ;
+ if (RET_WAS_ERROR(r)) {
+ (void)closedir(dir);
+ return r;
+ }
+ }
+ ret = closedir(dir);
+ if (ret != 0) {
+ int e = errno;
+ fprintf(stderr, "Error scanning '%s': %s\n",
+ i->directory, strerror(e));
+ return RET_ERRNO(e);
+ }
+ i->processed = nzNEW(i->files.count, bool);
+ if (FAILEDTOALLOC(i->processed))
+ return RET_ERROR_OOM;
+ i->delete = nzNEW(i->files.count, bool);
+ if (FAILEDTOALLOC(i->delete))
+ return RET_ERROR_OOM;
+ return RET_OK;
+}
+
+struct read_incoming_data {
+ /*@temp@*/const char *name;
+ /*@temp@*/struct distribution *distributions;
+ struct incoming *i;
+};
+
+static retvalue translate(struct distribution *distributions, struct strlist *names, struct distribution ***r) {
+ struct distribution **d;
+ int j;
+
+ d = nzNEW(names->count, struct distribution *);
+ if (FAILEDTOALLOC(d))
+ return RET_ERROR_OOM;
+ for (j = 0 ; j < names->count ; j++) {
+ d[j] = distribution_find(distributions, names->values[j]);
+ if (d[j] == NULL) {
+ free(d);
+ return RET_ERROR;
+ }
+ }
+ *r = d;
+ return RET_OK;
+}
+
+CFstartparse(incoming) {
+ CFstartparseVAR(incoming, result_p);
+ struct incoming *i;
+
+ i = zNEW(struct incoming);
+ if (FAILEDTOALLOC(i))
+ return RET_ERROR_OOM;
+ *result_p = i;
+ return RET_OK;
+}
+
+CFfinishparse(incoming) {
+ CFfinishparseVARS(incoming, i, last, d);
+
+ if (!complete || strcmp(i->name, d->name) != 0) {
+ incoming_free(i);
+ return RET_NOTHING;
+ }
+ if (d->i != NULL) {
+ fprintf(stderr,
+"Multiple definitions of '%s': first started at line %u of %s, second at line %u of %s!\n",
+ d->name,
+ (unsigned int)d->i->lineno, d->i->filename,
+ config_firstline(iter), config_filename(iter));
+ incoming_free(i);
+ incoming_free(d->i);
+ d->i = NULL;
+ return RET_ERROR;
+ }
+ if (i->logdir != NULL && i->logdir[0] != '/') {
+ char *n = calc_dirconcat(global.basedir, i->logdir);
+ if (FAILEDTOALLOC(n)) {
+ incoming_free(i);
+ return RET_ERROR_OOM;
+ }
+ free(i->logdir);
+ i->logdir = n;
+ }
+ if (i->morguedir != NULL && i->morguedir[0] != '/') {
+ char *n = calc_dirconcat(global.basedir, i->morguedir);
+ if (FAILEDTOALLOC(n)) {
+ incoming_free(i);
+ return RET_ERROR_OOM;
+ }
+ free(i->morguedir);
+ i->morguedir = n;
+ }
+ if (i->tempdir[0] != '/') {
+ char *n = calc_dirconcat(global.basedir, i->tempdir);
+ if (FAILEDTOALLOC(n)) {
+ incoming_free(i);
+ return RET_ERROR_OOM;
+ }
+ free(i->tempdir);
+ i->tempdir = n;
+ }
+ if (i->directory[0] != '/') {
+ char *n = calc_dirconcat(global.basedir, i->directory);
+ if (FAILEDTOALLOC(n)) {
+ incoming_free(i);
+ return RET_ERROR_OOM;
+ }
+ free(i->directory);
+ i->directory = n;
+ }
+ if (i->default_into == NULL && i->allow.count == 0) {
+ fprintf(stderr,
+"There is neither an 'Allow' nor a 'Default' definition in rule '%s'\n"
+"(starting at line %u, ending at line %u of %s)!\n"
+"Aborting as nothing would be let in.\n",
+ d->name,
+ config_firstline(iter), config_line(iter),
+ config_filename(iter));
+ incoming_free(i);
+ return RET_ERROR;
+ }
+ if (i->morguedir != NULL && !i->cleanup[cuf_on_deny]
+ && !i->cleanup[cuf_on_error]
+ && !i->cleanup[cuf_unused_buildinfo_files]
+ && !i->cleanup[cuf_unused_files]) {
+ fprintf(stderr,
+"Warning: There is a 'MorgueDir' but no 'Cleanup' to act on in rule '%s'\n"
+"(starting at line %u, ending at line %u of %s)!\n",
+ d->name,
+ config_firstline(iter), config_line(iter),
+ config_filename(iter));
+ }
+
+ d->i = i;
+ i->filename = config_filename(iter);
+ i->lineno = config_firstline(iter);
+ /* only suppreses the last unused warning: */
+ *last = i;
+ return RET_OK;
+}
+
+CFSETPROC(incoming, default) {
+ CFSETPROCVARS(incoming, i, d);
+ char *default_into;
+ retvalue r;
+
+ r = config_getonlyword(iter, headername, NULL, &default_into);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ i->default_into = distribution_find(d->distributions, default_into);
+ free(default_into);
+ return (i->default_into == NULL)?RET_ERROR:RET_OK;
+}
+
+CFSETPROC(incoming, allow) {
+ CFSETPROCVARS(incoming, i, d);
+ struct strlist allow_into;
+ retvalue r;
+
+ r = config_getsplitwords(iter, headername, &i->allow, &allow_into);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (i->allow.count == allow_into.count);
+ r = translate(d->distributions, &allow_into, &i->allow_into);
+ strlist_done(&allow_into);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+
+CFSETPROC(incoming, permit) {
+ CFSETPROCVARS(incoming, i, d);
+ static const struct constant permitconstants[] = {
+ { "unused_files", pmf_unused_files},
+ { "older_version", pmf_oldpackagenewer},
+ { "unlisted_binaries", pmf_unlistedbinaries},
+ /* not yet implemented:
+ { "downgrade", pmf_downgrade},
+ */
+ { NULL, -1}
+ };
+
+ if (IGNORABLE(unknownfield))
+ return config_getflags(iter, headername, permitconstants,
+ i->permit, true, "");
+ else if (i->name == NULL)
+ return config_getflags(iter, headername, permitconstants,
+ i->permit, false,
+"\n(try put Name: before Permit: to ignore if it is from the wrong rule");
+ else if (strcmp(i->name, d->name) != 0)
+ return config_getflags(iter, headername, permitconstants,
+ i->permit, true,
+" (but not within the rule we are interested in.)");
+ else
+ return config_getflags(iter, headername, permitconstants,
+ i->permit, false,
+" (use --ignore=unknownfield to ignore this)\n");
+
+}
+
+CFSETPROC(incoming, cleanup) {
+ CFSETPROCVARS(incoming, i, d);
+ static const struct constant cleanupconstants[] = {
+ { "unused_files", cuf_unused_files},
+ { "unused_buildinfo_files", cuf_unused_buildinfo_files},
+ { "on_deny", cuf_on_deny},
+ /* not yet implemented
+ { "on_deny_check_owner", cuf_on_deny_check_owner},
+ */
+ { "on_error", cuf_on_error},
+ { NULL, -1}
+ };
+
+ if (IGNORABLE(unknownfield))
+ return config_getflags(iter, headername, cleanupconstants,
+ i->cleanup, true, "");
+ else if (i->name == NULL)
+ return config_getflags(iter, headername, cleanupconstants,
+ i->cleanup, false,
+"\n(try put Name: before Cleanup: to ignore if it is from the wrong rule");
+ else if (strcmp(i->name, d->name) != 0)
+ return config_getflags(iter, headername, cleanupconstants,
+ i->cleanup, true,
+" (but not within the rule we are interested in.)");
+ else
+ return config_getflags(iter, headername, cleanupconstants,
+ i->cleanup, false,
+" (use --ignore=unknownfield to ignore this)\n");
+}
+
+CFSETPROC(incoming, options) {
+ CFSETPROCVARS(incoming, i, d);
+ static const struct constant optionsconstants[] = {
+ { "limit_arch_all", iof_limit_arch_all},
+ { "multiple_distributions", iof_multiple_distributions},
+ { NULL, -1}
+ };
+
+ if (IGNORABLE(unknownfield))
+ return config_getflags(iter, headername, optionsconstants,
+ i->options, true, "");
+ else if (i->name == NULL)
+ return config_getflags(iter, headername, optionsconstants,
+ i->options, false,
+"\n(try put Name: before Options: to ignore if it is from the wrong rule");
+ else if (strcmp(i->name, d->name) != 0)
+ return config_getflags(iter, headername, optionsconstants,
+ i->options, true,
+" (but not within the rule we are interested in.)");
+ else
+ return config_getflags(iter, headername, optionsconstants,
+ i->options, false,
+" (use --ignore=unknownfield to ignore this)\n");
+}
+
+CFvalueSETPROC(incoming, name)
+CFdirSETPROC(incoming, logdir)
+CFdirSETPROC(incoming, tempdir)
+CFdirSETPROC(incoming, morguedir)
+CFdirSETPROC(incoming, directory)
+CFtruthSETPROC2(incoming, multiple, options[iof_multiple_distributions])
+
+static const struct configfield incomingconfigfields[] = {
+ CFr("Name", incoming, name),
+ CFr("TempDir", incoming, tempdir),
+ CFr("IncomingDir", incoming, directory),
+ CF("MorgueDir", incoming, morguedir),
+ CF("Default", incoming, default),
+ CF("Allow", incoming, allow),
+ CF("Multiple", incoming, multiple),
+ CF("Options", incoming, options),
+ CF("Cleanup", incoming, cleanup),
+ CF("Permit", incoming, permit),
+ CF("Logdir", incoming, logdir)
+};
+
+static retvalue incoming_init(struct distribution *distributions, const char *name, /*@out@*/struct incoming **result) {
+ retvalue r;
+ struct read_incoming_data imports;
+
+ imports.name = name;
+ imports.distributions = distributions;
+ imports.i = NULL;
+
+ r = configfile_parse("incoming", IGNORABLE(unknownfield),
+ startparseincoming, finishparseincoming,
+ "incoming rule",
+ incomingconfigfields, ARRAYCOUNT(incomingconfigfields),
+ &imports);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (imports.i == NULL) {
+ fprintf(stderr,
+"No definition for '%s' found in '%s/incoming'!\n",
+ name, global.confdir);
+ return RET_ERROR_MISSING;
+ }
+
+ r = incoming_prepare(imports.i);
+ if (RET_WAS_ERROR(r)) {
+ incoming_free(imports.i);
+ return r;
+ }
+ *result = imports.i;
+ return r;
+}
+
+struct candidate {
+ /* from candidate_read */
+ int ofs;
+ char *control;
+ struct signatures *signatures;
+ /* from candidate_parse */
+ char *source, *sourceversion, *changesversion;
+ struct strlist distributions,
+ architectures,
+ binaries;
+ bool isbinNMU;
+ struct candidate_file {
+ /* set by _addfileline */
+ struct candidate_file *next;
+ int ofs; /* to basename in struct incoming->files */
+ filetype type;
+ /* all NULL if it is the .changes itself,
+ * otherwise the data from the .changes for this file: */
+ char *section;
+ char *priority;
+ architecture_t architecture;
+ char *name;
+ /* like above, but updated once files are copied */
+ struct checksums *checksums;
+ /* set later */
+ bool used;
+ char *tempfilename;
+ /* distribution-unspecific contents of the packages */
+ /* - only for FE_BINARY types: */
+ struct deb_headers deb;
+ /* - only for fe_DSC types */
+ struct dsc_headers dsc;
+ /* only valid while parsing */
+ struct hashes h;
+ } *files;
+ struct candidate_perdistribution {
+ struct candidate_perdistribution *next;
+ struct distribution *into;
+ bool skip;
+ struct candidate_package {
+ /* a package is something installing files, including
+ * the pseudo-package for the .changes file, if that is
+ * to be included */
+ struct candidate_package *next;
+ const struct candidate_file *master;
+ component_t component;
+ packagetype_t packagetype;
+ struct strlist filekeys;
+ /* a list of pointers to the files belonging to those
+ * filekeys, NULL if it does not need linking/copying */
+ const struct candidate_file **files;
+ /* only for FE_PACKAGE: */
+ char *control;
+ /* only for fe_DSC */
+ char *directory;
+ /* true if skipped because already there or newer */
+ bool skip;
+ } *packages;
+ struct byhandfile {
+ struct byhandfile *next;
+ const struct candidate_file *file;
+ const struct byhandhook *hook;
+ } *byhandhookstocall;
+ } *perdistribution;
+ /* the logsubdir, and the list of files to put there,
+ * otherwise both NULL */
+ char *logsubdir;
+ int logcount;
+ const struct candidate_file **logfiles;
+};
+
+static void candidate_file_free(/*@only@*/struct candidate_file *f) {
+ checksums_free(f->checksums);
+ free(f->section);
+ free(f->priority);
+ free(f->name);
+ if (FE_BINARY(f->type))
+ binaries_debdone(&f->deb);
+ if (f->type == fe_DSC)
+ sources_done(&f->dsc);
+ if (f->tempfilename != NULL) {
+ (void)unlink(f->tempfilename);
+ free(f->tempfilename);
+ f->tempfilename = NULL;
+ }
+ free(f);
+}
+
+static void candidate_package_free(/*@only@*/struct candidate_package *p) {
+ free(p->control);
+ free(p->directory);
+ strlist_done(&p->filekeys);
+ free(p->files);
+ free(p);
+}
+
+static void candidate_free(/*@only@*/struct candidate *c) {
+ if (c == NULL)
+ return;
+ free(c->control);
+ signatures_free(c->signatures);
+ free(c->source);
+ free(c->sourceversion);
+ free(c->changesversion);
+ strlist_done(&c->distributions);
+ strlist_done(&c->architectures);
+ strlist_done(&c->binaries);
+ while (c->perdistribution != NULL) {
+ struct candidate_perdistribution *d = c->perdistribution;
+ c->perdistribution = d->next;
+
+ while (d->packages != NULL) {
+ struct candidate_package *p = d->packages;
+ d->packages = p->next;
+ candidate_package_free(p);
+ }
+ while (d->byhandhookstocall != NULL) {
+ struct byhandfile *h = d->byhandhookstocall;
+ d->byhandhookstocall = h->next;
+ free(h);
+ }
+ free(d);
+ }
+ while (c->files != NULL) {
+ struct candidate_file *f = c->files;
+ c->files = f->next;
+ candidate_file_free(f);
+ }
+ free(c->logsubdir);
+ free(c->logfiles);
+ free(c);
+}
+
+static retvalue candidate_newdistribution(struct candidate *c, struct distribution *distribution) {
+ struct candidate_perdistribution *n, **pp = &c->perdistribution;
+
+ while (*pp != NULL) {
+ if ((*pp)->into == distribution)
+ return RET_NOTHING;
+ pp = &(*pp)->next;
+ }
+ n = zNEW(struct candidate_perdistribution);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->into = distribution;
+ *pp = n;
+ return RET_OK;
+}
+
+static struct candidate_package *candidate_newpackage(struct candidate_perdistribution *fordistribution, const struct candidate_file *master) {
+ struct candidate_package *n, **pp = &fordistribution->packages;
+
+ while (*pp != NULL)
+ pp = &(*pp)->next;
+ n = zNEW(struct candidate_package);
+ if (FAILEDTOALLOC(n))
+ return NULL;
+ n->component = atom_unknown;
+ n->packagetype = atom_unknown;
+ n->master = master;
+ *pp = n;
+ return n;
+}
+
+static retvalue candidate_usefile(const struct incoming *i, const struct candidate *c, struct candidate_file *file);
+
+static retvalue candidate_read(struct incoming *i, int ofs, struct candidate **result, bool *broken) {
+ struct candidate *n;
+ retvalue r;
+
+ n = zNEW(struct candidate);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->ofs = ofs;
+ /* first file of any .changes file is the file itself */
+ n->files = zNEW(struct candidate_file);
+ if (FAILEDTOALLOC(n->files)) {
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ n->files->ofs = n->ofs;
+ n->files->type = fe_CHANGES;
+ r = candidate_usefile(i, n, n->files);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ candidate_free(n);
+ return r;
+ }
+ assert (n->files->tempfilename != NULL);
+ r = signature_readsignedchunk(n->files->tempfilename, BASENAME(i, ofs),
+ &n->control, &n->signatures, broken);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ candidate_free(n);
+ return r;
+ }
+ *result = n;
+ return RET_OK;
+}
+
+static retvalue candidate_addfileline(struct incoming *i, struct candidate *c, const char *fileline) {
+ struct candidate_file **p, *n;
+ char *basefilename;
+ retvalue r;
+
+ n = zNEW(struct candidate_file);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+
+ r = changes_parsefileline(fileline, &n->type, &basefilename,
+ &n->h.hashes[cs_md5sum], &n->h.hashes[cs_length],
+ &n->section, &n->priority, &n->architecture,
+ &n->name);
+ if (RET_WAS_ERROR(r)) {
+ free(n);
+ return r;
+ }
+ n->ofs = strlist_ofs(&i->files, basefilename);
+ if (n->ofs < 0) {
+ fprintf(stderr,
+"In '%s': file '%s' not found in the incoming dir!\n",
+ i->files.values[c->ofs], basefilename);
+ free(basefilename);
+ candidate_file_free(n);
+ return RET_ERROR_MISSING;
+ }
+ free(basefilename);
+
+ p = &c->files;
+ while (*p != NULL)
+ p = &(*p)->next;
+ *p = n;
+ return RET_OK;
+}
+
+static retvalue candidate_addhashes(struct incoming *i, struct candidate *c, enum checksumtype cs, const struct strlist *lines) {
+ int j;
+
+ for (j = 0 ; j < lines->count ; j++) {
+ const char *fileline = lines->values[j];
+ struct candidate_file *f;
+ const char *basefilename;
+ struct hash_data hash, size;
+ retvalue r;
+
+ r = hashline_parse(BASENAME(i, c->ofs), fileline, cs,
+ &basefilename, &hash, &size);
+ if (!RET_IS_OK(r))
+ return r;
+ f = c->files;
+ while (f != NULL && strcmp(BASENAME(i, f->ofs), basefilename) != 0)
+ f = f->next;
+ if (f == NULL) {
+ fprintf(stderr,
+"Warning: Ignoring file '%s' listed in '%s' but not in '%s' of '%s'!\n",
+ basefilename, changes_checksum_names[cs],
+ changes_checksum_names[cs_md5sum],
+ BASENAME(i, c->ofs));
+ continue;
+ }
+ if (f->h.hashes[cs_length].len != size.len ||
+ memcmp(f->h.hashes[cs_length].start,
+ size.start, size.len) != 0) {
+ fprintf(stderr,
+"Error: Different size of '%s' listed in '%s' and '%s' of '%s'!\n",
+ basefilename, changes_checksum_names[cs],
+ changes_checksum_names[cs_md5sum],
+ BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ f->h.hashes[cs] = hash;
+ }
+ return RET_OK;
+}
+
+static retvalue candidate_finalizechecksums(struct candidate *c) {
+ struct candidate_file *f;
+ retvalue r;
+
+ /* store collected hashes as checksums structs,
+ * starting after .changes file: */
+ for (f = c->files->next ; f != NULL ; f = f->next) {
+ r = checksums_initialize(&f->checksums, f->h.hashes);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static retvalue candidate_parse(struct incoming *i, struct candidate *c) {
+ retvalue r;
+ struct strlist filelines[cs_hashCOUNT];
+ enum checksumtype cs;
+ int j;
+#define R if (RET_WAS_ERROR(r)) return r;
+#define E(err, ...) { \
+ if (r == RET_NOTHING) { \
+ fprintf(stderr, "In '%s': " err "\n", \
+ BASENAME(i, c->ofs), ## __VA_ARGS__); \
+ r = RET_ERROR; \
+ } \
+ if (RET_WAS_ERROR(r)) return r; \
+ }
+ r = chunk_getnameandversion(c->control, "Source", &c->source,
+ &c->sourceversion);
+ E("Missing 'Source' field!");
+ r = propersourcename(c->source);
+ E("Malforce Source name!");
+ if (c->sourceversion != NULL) {
+ r = properversion(c->sourceversion);
+ E("Malforce Source Version number!");
+ }
+ r = chunk_getwordlist(c->control, "Architecture", &c->architectures);
+ E("Missing 'Architecture' field!");
+ r = chunk_getwordlist(c->control, "Binary", &c->binaries);
+ if (r == RET_NOTHING)
+ strlist_init(&c->binaries);
+ else if (RET_WAS_ERROR(r))
+ return r;
+ r = chunk_getvalue(c->control, "Version", &c->changesversion);
+ E("Missing 'Version' field!");
+ r = properversion(c->changesversion);
+ E("Malforce Version number!");
+ // TODO: logic to detect binNMUs to warn against sources?
+ if (c->sourceversion == NULL) {
+ c->sourceversion = strdup(c->changesversion);
+ if (FAILEDTOALLOC(c->sourceversion))
+ return RET_ERROR_OOM;
+ c->isbinNMU = false;
+ } else {
+ int cmp;
+
+ r = dpkgversions_cmp(c->sourceversion, c->changesversion, &cmp);
+ R;
+ c->isbinNMU = cmp != 0;
+ }
+ r = chunk_getwordlist(c->control, "Distribution", &c->distributions);
+ E("Missing 'Distribution' field!");
+ r = chunk_getextralinelist(c->control,
+ changes_checksum_names[cs_md5sum],
+ &filelines[cs_md5sum]);
+ E("Missing '%s' field!", changes_checksum_names[cs_md5sum]);
+ for (j = 0 ; j < filelines[cs_md5sum].count ; j++) {
+ r = candidate_addfileline(i, c, filelines[cs_md5sum].values[j]);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&filelines[cs_md5sum]);
+ return r;
+ }
+ }
+ for (cs = cs_firstEXTENDED ; cs < cs_hashCOUNT ; cs++) {
+ r = chunk_getextralinelist(c->control,
+ changes_checksum_names[cs], &filelines[cs]);
+
+ if (RET_IS_OK(r))
+ r = candidate_addhashes(i, c, cs, &filelines[cs]);
+ else
+ strlist_init(&filelines[cs]);
+
+ if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum)
+ strlist_done(&filelines[cs]);
+ return r;
+ }
+ }
+ r = candidate_finalizechecksums(c);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++)
+ strlist_done(&filelines[cs]);
+ R;
+ if (c->files == NULL || c->files->next == NULL) {
+ fprintf(stderr, "In '%s': Empty 'Files' section!\n",
+ BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue candidate_earlychecks(struct incoming *i, struct candidate *c) {
+ struct candidate_file *file;
+ retvalue r;
+
+ // TODO: allow being more permissive,
+ // that will need some more checks later, though
+ r = propersourcename(c->source);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = properversion(c->sourceversion);
+ if (RET_WAS_ERROR(r))
+ return r;
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->type != fe_CHANGES && file->type != fe_BYHAND &&
+ file->type != fe_LOG &&
+ !atom_defined(file->architecture)) {
+ fprintf(stderr,
+"'%s' contains '%s' not matching an valid architecture in any distribution known!\n",
+ BASENAME(i, c->ofs),
+ BASENAME(i, file->ofs));
+ return RET_ERROR;
+ }
+ if (!FE_PACKAGE(file->type))
+ continue;
+ assert (atom_defined(file->architecture));
+ if (strlist_in(&c->architectures,
+ atoms_architectures[file->architecture]))
+ continue;
+ fprintf(stderr,
+"'%s' is not listed in the Architecture header of '%s' but file '%s' looks like it!\n",
+ atoms_architectures[file->architecture],
+ BASENAME(i, c->ofs), BASENAME(i, file->ofs));
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+/* Is used before any other candidate fields are set */
+static retvalue candidate_usefile(const struct incoming *i, const struct candidate *c, struct candidate_file *file) {
+ const char *basefilename;
+ char *origfile, *tempfilename;
+ struct checksums *readchecksums;
+ retvalue r;
+ bool improves;
+ const char *p;
+
+ if (file->used && file->tempfilename != NULL)
+ return RET_OK;
+ assert(file->tempfilename == NULL);
+ basefilename = BASENAME(i, file->ofs);
+ for (p = basefilename; *p != '\0' ; p++) {
+ if ((0x80 & *(const unsigned char *)p) != 0) {
+ fprintf(stderr,
+"Invalid filename '%s' listed in '%s': contains 8-bit characters\n",
+ basefilename, BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ }
+ tempfilename = calc_dirconcat(i->tempdir, basefilename);
+ if (FAILEDTOALLOC(tempfilename))
+ return RET_ERROR_OOM;
+ origfile = calc_dirconcat(i->directory, basefilename);
+ if (FAILEDTOALLOC(origfile)) {
+ free(tempfilename);
+ return RET_ERROR_OOM;
+ }
+ r = checksums_copyfile(tempfilename, origfile, true, &readchecksums);
+ free(origfile);
+ if (RET_WAS_ERROR(r)) {
+ free(tempfilename);
+ return r;
+ }
+ if (file->checksums == NULL) {
+ file->checksums = readchecksums;
+ file->tempfilename = tempfilename;
+ file->used = true;
+ return RET_OK;
+ }
+ if (!checksums_check(file->checksums, readchecksums, &improves)) {
+ fprintf(stderr,
+"ERROR: File '%s' does not match expectations:\n",
+ basefilename);
+ checksums_printdifferences(stderr,
+ file->checksums, readchecksums);
+ checksums_free(readchecksums);
+ deletefile(tempfilename);
+ free(tempfilename);
+ return RET_ERROR_WRONG_MD5;
+ }
+ if (improves) {
+ r = checksums_combine(&file->checksums, readchecksums, NULL);
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(readchecksums);
+ deletefile(tempfilename);
+ free(tempfilename);
+ return r;
+ }
+ }
+ checksums_free(readchecksums);
+ file->tempfilename = tempfilename;
+ file->used = true;
+ return RET_OK;
+}
+
+static inline retvalue getsectionprioritycomponent(const struct incoming *i, const struct candidate *c, const struct distribution *into, const struct candidate_file *file, const char *name, const struct overridedata *oinfo, /*@out@*/const char **section_p, /*@out@*/const char **priority_p, /*@out@*/component_t *component) {
+ retvalue r;
+ const char *section, *priority, *forcecomponent;
+ component_t fc;
+
+ section = override_get(oinfo, SECTION_FIELDNAME);
+ if (section == NULL) {
+ // TODO: warn about disparities here?
+ section = file->section;
+ }
+ if (section == NULL || strcmp(section, "-") == 0) {
+ fprintf(stderr, "No section found for '%s' ('%s' in '%s')!\n",
+ name,
+ BASENAME(i, file->ofs), BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ priority = override_get(oinfo, PRIORITY_FIELDNAME);
+ if (priority == NULL) {
+ // TODO: warn about disparities here?
+ priority = file->priority;
+ }
+ if (priority == NULL || strcmp(priority, "-") == 0) {
+ fprintf(stderr, "No priority found for '%s' ('%s' in '%s')!\n",
+ name,
+ BASENAME(i, file->ofs), BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+
+ forcecomponent = override_get(oinfo, "$Component");
+ if (forcecomponent != NULL) {
+ fc = component_find(forcecomponent);
+ if (!atom_defined(fc)) {
+ fprintf(stderr,
+"Unknown component '%s' (in $Component in override file for '%s'\n",
+ forcecomponent, name);
+ return RET_ERROR;
+ }
+ /* guess_component will check if that is valid for this
+ * distribution */
+ } else
+ fc = atom_unknown;
+ r = guess_component(into->codename, &into->components,
+ BASENAME(i, file->ofs), section,
+ fc, component);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ *section_p = section;
+ *priority_p = priority;
+ return RET_OK;
+}
+
+static retvalue candidate_read_deb(struct incoming *i, struct candidate *c, struct candidate_file *file) {
+ retvalue r;
+ size_t l;
+ char *base;
+ const char *packagenametocheck;
+
+ r = binaries_readdeb(&file->deb, file->tempfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (strcmp(file->name, file->deb.name) != 0) {
+ // TODO: add permissive thing to ignore this
+ fprintf(stderr,
+"Name part of filename ('%s') and name within the file ('%s') do not match for '%s' in '%s'!\n",
+ file->name, file->deb.name,
+ BASENAME(i, file->ofs), BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ if (file->architecture != file->deb.architecture) {
+ // TODO: add permissive thing to ignore this in some cases
+ // but do not forget to look into into->architectures then
+ fprintf(stderr,
+"Architecture '%s' of '%s' does not match '%s' specified in '%s'!\n",
+ atoms_architectures[file->deb.architecture],
+ BASENAME(i, file->ofs),
+ atoms_architectures[file->architecture],
+ BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ if (strcmp(c->source, file->deb.source) != 0) {
+ // TODO: add permissive thing to ignore this
+ // (beware if tracking is active)
+ fprintf(stderr,
+"Source header '%s' of '%s' and source name '%s' within the file '%s' do not match!\n",
+ c->source, BASENAME(i, c->ofs),
+ file->deb.source, BASENAME(i, file->ofs));
+ return RET_ERROR;
+ }
+ if (strcmp(c->sourceversion, file->deb.sourceversion) != 0) {
+ // TODO: add permissive thing to ignore this
+ // (beware if tracking is active)
+ fprintf(stderr,
+"Source version '%s' of '%s' and source version '%s' within the file '%s' do not match!\n",
+ c->sourceversion, BASENAME(i, c->ofs),
+ file->deb.sourceversion, BASENAME(i, file->ofs));
+ return RET_ERROR;
+ }
+
+ packagenametocheck = file->deb.name;
+ l = strlen(file->deb.name);
+ if (l > sizeof("-dbgsym")-1 &&
+ strcmp(file->deb.name + l - (sizeof("dbgsym")), "-dbgsym") == 0) {
+ base = strndup(file->deb.name, l - (sizeof("dbgsym")));
+ if (FAILEDTOALLOC(base))
+ return RET_ERROR_OOM;
+ packagenametocheck = base;
+ } else {
+ base = NULL;
+ }
+
+ if (! strlist_in(&c->binaries, packagenametocheck)
+ && !i->permit[pmf_unlistedbinaries]) {
+ fprintf(stderr,
+"Name '%s' of binary '%s' is not listed in Binaries header of '%s'!\n"
+"(use Permit: unlisted_binaries in conf/incoming to ignore this error)\n",
+ packagenametocheck, BASENAME(i, file->ofs),
+ BASENAME(i, c->ofs));
+ free(base);
+ return RET_ERROR;
+ }
+ free(base);
+ r = properpackagename(file->deb.name);
+ if (RET_IS_OK(r))
+ r = propersourcename(file->deb.source);
+ if (RET_IS_OK(r))
+ r = properversion(file->deb.version);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+
+static retvalue candidate_read_dsc(struct incoming *i, struct candidate_file *file) {
+ retvalue r;
+ bool broken = false;
+ char *p;
+
+ r = sources_readdsc(&file->dsc, file->tempfilename,
+ BASENAME(i, file->ofs), &broken);
+ if (RET_WAS_ERROR(r))
+ return r;
+ p = calc_source_basename(file->dsc.name,
+ file->dsc.version);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ r = checksumsarray_include(&file->dsc.files, p, file->checksums);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ // TODO: take a look at "broken"...
+ return RET_OK;
+}
+
+static retvalue candidate_read_files(struct incoming *i, struct candidate *c) {
+ struct candidate_file *file;
+ retvalue r;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+
+ if (!FE_PACKAGE(file->type))
+ continue;
+ r = candidate_usefile(i, c, file);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert(file->tempfilename != NULL);
+
+ if (FE_BINARY(file->type))
+ r = candidate_read_deb(i, c, file);
+ else if (file->type == fe_DSC)
+ r = candidate_read_dsc(i, file);
+ else {
+ r = RET_ERROR;
+ assert (FE_BINARY(file->type) || file->type == fe_DSC);
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static retvalue candidate_preparebuildinfos(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per) {
+ retvalue r;
+ struct candidate_package *package;
+ struct candidate_file *firstbuildinfo = NULL, *file;
+ component_t component = component_strange;
+ int count = 0;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->type == fe_BUILDINFO) {
+ count++;
+ if (firstbuildinfo == NULL)
+ firstbuildinfo = file;
+ }
+ }
+ if (count == 0)
+ return RET_NOTHING;
+
+ /* search for a component to use */
+ for (package = per->packages ; package != NULL ;
+ package = package->next) {
+ if (atom_defined(package->component)) {
+ component = package->component;
+ break;
+ }
+ }
+ if (!atom_defined(component)) {
+ /* How can this happen? */
+ fprintf(stderr,
+"Found no component to put %s into. (Why is there a buildinfo processed without an corresponding package?)\n", firstbuildinfo->name);
+ return RET_ERROR;
+ }
+
+ /* pseudo package containing buildinfo files */
+ package = candidate_newpackage(per, firstbuildinfo);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+ r = strlist_init_n(count, &package->filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ package->files = nzNEW(count, const struct candidate_file *);
+ if (FAILEDTOALLOC(package->files))
+ return RET_ERROR_OOM;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+ char *filekey;
+
+ if (file->type != fe_BUILDINFO)
+ continue;
+
+ r = candidate_usefile(i, c, file);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ // TODO: add same checks on the basename contents?
+
+ filekey = calc_filekey(component, c->source, BASENAME(i, file->ofs));
+ if (FAILEDTOALLOC(filekey))
+ return RET_ERROR_OOM;
+
+ r = files_canadd(filekey, file->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r))
+ package->files[package->filekeys.count] = file;
+ r = strlist_add(&package->filekeys, filekey);
+ assert (r == RET_OK);
+ }
+ assert (package->filekeys.count == count);
+ return RET_OK;
+}
+
+
+
+static retvalue candidate_preparechangesfile(const struct candidate *c, struct candidate_perdistribution *per) {
+ retvalue r;
+ char *basefilename, *filekey;
+ struct candidate_package *package;
+ struct candidate_file *file;
+ component_t component = component_strange;
+ assert (c->files != NULL && c->files->ofs == c->ofs);
+
+ /* search for a component to use */
+ for (package = per->packages ; package != NULL ;
+ package = package->next) {
+ if (atom_defined(package->component)) {
+ component = package->component;
+ break;
+ }
+ }
+ file = changesfile(c);
+
+ /* make sure the file is already copied */
+ assert (file->used);
+ assert (file->checksums != NULL);
+
+ /* pseudo package containing the .changes file */
+ package = candidate_newpackage(per, c->files);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+
+ basefilename = calc_changes_basename(c->source, c->changesversion,
+ &c->architectures);
+ if (FAILEDTOALLOC(basefilename))
+ return RET_ERROR_OOM;
+
+ filekey = calc_filekey(component, c->source, basefilename);
+ free(basefilename);
+ if (FAILEDTOALLOC(filekey))
+ return RET_ERROR_OOM;
+
+ r = strlist_init_singleton(filekey, &package->filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (package->filekeys.count == 1);
+ filekey = package->filekeys.values[0];
+ package->files = zNEW(const struct candidate_file *);
+ if (FAILEDTOALLOC(package->files))
+ return RET_ERROR_OOM;
+ r = files_canadd(filekey, file->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r))
+ package->files[0] = file;
+ return RET_OK;
+}
+
+static retvalue prepare_deb(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per, const struct candidate_file *file) {
+ const char *section, *priority;
+ const char *filekey;
+ const struct overridedata *oinfo;
+ struct candidate_package *package;
+ const struct distribution *into = per->into;
+ retvalue r;
+
+ assert (FE_BINARY(file->type));
+ assert (file->tempfilename != NULL);
+ assert (file->deb.name != NULL);
+
+ package = candidate_newpackage(per, file);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+ assert (file == package->master);
+ if (file->type == fe_DEB)
+ package->packagetype = pt_deb;
+ else if (file->type == fe_DDEB)
+ package->packagetype = pt_ddeb;
+ else
+ package->packagetype = pt_udeb;
+
+ /* we use the deb overrides for ddebs too - ddebs aren't
+ * meant to have overrides so this is probably fine */
+ oinfo = override_search(file->type==fe_UDEB?into->overrides.udeb:
+ into->overrides.deb,
+ file->name);
+
+ r = getsectionprioritycomponent(i, c, into, file,
+ file->name, oinfo,
+ &section, &priority, &package->component);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (file->type == fe_DDEB &&
+ !atomlist_in(&into->ddebcomponents, package->component)) {
+ fprintf(stderr,
+"Cannot put file '%s' of '%s' into component '%s',\n"
+"as it is not listed in DDebComponents of '%s'!\n",
+ BASENAME(i, file->ofs), BASENAME(i, c->ofs),
+ atoms_components[package->component],
+ into->codename);
+ return RET_ERROR;
+ }
+ if (file->type == fe_UDEB &&
+ !atomlist_in(&into->udebcomponents, package->component)) {
+ fprintf(stderr,
+"Cannot put file '%s' of '%s' into component '%s',\n"
+"as it is not listed in UDebComponents of '%s'!\n",
+ BASENAME(i, file->ofs), BASENAME(i, c->ofs),
+ atoms_components[package->component],
+ into->codename);
+ return RET_ERROR;
+ }
+ r = binaries_calcfilekeys(package->component, &file->deb,
+ package->packagetype, &package->filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (package->filekeys.count == 1);
+ filekey = package->filekeys.values[0];
+ package->files = zNEW(const struct candidate_file *);
+ if (FAILEDTOALLOC(package->files))
+ return RET_ERROR_OOM;
+ r = files_canadd(filekey, file->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r))
+ package->files[0] = file;
+ r = binaries_complete(&file->deb, filekey, file->checksums, oinfo,
+ section, priority, &package->control);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+
+static retvalue prepare_source_file(const struct incoming *i, const struct candidate *c, const char *filekey, const char *basefilename, struct checksums **checksums_p, int package_ofs, /*@out@*/const struct candidate_file **foundfile_p){
+ struct candidate_file *f;
+ const struct checksums * const checksums = *checksums_p;
+ retvalue r;
+ bool improves;
+
+ f = c->files;
+ while (f != NULL && (f->checksums == NULL ||
+ strcmp(BASENAME(i, f->ofs), basefilename) != 0))
+ f = f->next;
+
+ if (f == NULL) {
+ r = files_canadd(filekey, checksums);
+ if (!RET_IS_OK(r))
+ return r;
+ /* no file by this name and also no file with these
+ * characteristics in the pool, look for differently-named
+ * file with the same characteristics: */
+
+ f = c->files;
+ while (f != NULL && (f->checksums == NULL ||
+ !checksums_check(f->checksums,
+ checksums, NULL)))
+ f = f->next;
+
+ if (f == NULL) {
+ fprintf(stderr,
+"file '%s' is needed for '%s', not yet registered in the pool and not found in '%s'\n",
+ basefilename, BASENAME(i, package_ofs),
+ BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ /* otherwise proceed with the found file: */
+ }
+
+ if (!checksums_check(f->checksums, checksums, &improves)) {
+ fprintf(stderr,
+"file '%s' has conflicting checksums listed in '%s' and '%s'!\n",
+ basefilename,
+ BASENAME(i, c->ofs),
+ BASENAME(i, package_ofs));
+ return RET_ERROR;
+ }
+ if (improves) {
+ /* put additional checksums from the .dsc to the information
+ * found in .changes, so that a file matching those in .changes
+ * but not in .dsc is detected */
+ r = checksums_combine(&f->checksums, checksums, NULL);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ r = files_canadd(filekey, f->checksums);
+ if (r == RET_NOTHING) {
+ /* already in the pool, mark as used (in the sense
+ * of "only not needed because it is already there") */
+ f->used = true;
+
+ } else if (RET_IS_OK(r)) {
+ /* don't have this file in the pool, make sure it is ready
+ * here */
+
+ r = candidate_usefile(i, c, f);
+ if (RET_WAS_ERROR(r))
+ return r;
+ // TODO: update checksums to now received checksums?
+ *foundfile_p = f;
+ }
+ if (!RET_WAS_ERROR(r) && !checksums_iscomplete(checksums)) {
+ /* update checksums so the source index can show them */
+ r = checksums_combine(checksums_p, f->checksums, NULL);
+ }
+ return r;
+}
+
+static retvalue prepare_dsc(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per, const struct candidate_file *file) {
+ const char *section, *priority;
+ const struct overridedata *oinfo;
+ struct candidate_package *package;
+ const struct distribution *into = per->into;
+ retvalue r;
+ int j;
+
+ assert (file->type == fe_DSC);
+ assert (file->tempfilename != NULL);
+ assert (file->dsc.name != NULL);
+
+ package = candidate_newpackage(per, file);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+ assert (file == package->master);
+ package->packagetype = pt_dsc;
+
+ if (c->isbinNMU) {
+ // TODO: add permissive thing to ignore this
+ fprintf(stderr,
+"Source package ('%s') in '%s', which look like a binNMU (as '%s' and '%s' differ)!\n",
+ BASENAME(i, file->ofs), BASENAME(i, c->ofs),
+ c->sourceversion, c->changesversion);
+ return RET_ERROR;
+ }
+
+ if (strcmp(file->name, file->dsc.name) != 0) {
+ // TODO: add permissive thing to ignore this
+ fprintf(stderr,
+"Name part of filename ('%s') and name within the file ('%s') do not match for '%s' in '%s'!\n",
+ file->name, file->dsc.name,
+ BASENAME(i, file->ofs), BASENAME(i, c->ofs));
+ return RET_ERROR;
+ }
+ if (strcmp(c->source, file->dsc.name) != 0) {
+ // TODO: add permissive thing to ignore this
+ // (beware if tracking is active)
+ fprintf(stderr,
+"Source header '%s' of '%s' and name '%s' within the file '%s' do not match!\n",
+ c->source, BASENAME(i, c->ofs),
+ file->dsc.name, BASENAME(i, file->ofs));
+ return RET_ERROR;
+ }
+ if (strcmp(c->sourceversion, file->dsc.version) != 0) {
+ // TODO: add permissive thing to ignore this
+ // (beware if tracking is active)
+ fprintf(stderr,
+"Source version '%s' of '%s' and version '%s' within the file '%s' do not match!\n",
+ c->sourceversion, BASENAME(i, c->ofs),
+ file->dsc.version, BASENAME(i, file->ofs));
+ return RET_ERROR;
+ }
+ r = propersourcename(file->dsc.name);
+ if (RET_IS_OK(r))
+ r = properversion(file->dsc.version);
+ if (RET_IS_OK(r))
+ r = properfilenames(&file->dsc.files.names);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* check if signatures match files signed: */
+ for (j = 0 ; j < file->dsc.files.names.count ; j++) {
+ int jj;
+ const char *afn = file->dsc.files.names.values[j];
+ size_t al = strlen(afn);
+ bool found = false;
+
+ if (al <= 4 || memcmp(afn + al - 4, ".asc", 4) != 0)
+ continue;
+
+ for (jj = 0 ; jj < file->dsc.files.names.count ; jj++) {
+ const char *fn = file->dsc.files.names.values[jj];
+ size_t l = strlen(fn);
+
+ if (l + 4 != al)
+ continue;
+ if (memcmp(afn, fn, l) != 0)
+ continue;
+ found = true;
+ break;
+ }
+ if (!found) {
+ fprintf(stderr,
+"Signature file without file to be signed: '%s'!\n", afn);
+ return RET_ERROR;
+ }
+ }
+
+ oinfo = override_search(into->overrides.dsc, file->dsc.name);
+
+ r = getsectionprioritycomponent(i, c, into, file,
+ file->dsc.name, oinfo,
+ &section, &priority, &package->component);
+ if (RET_WAS_ERROR(r))
+ return r;
+ package->directory = calc_sourcedir(package->component,
+ file->dsc.name);
+ if (FAILEDTOALLOC(package->directory))
+ return RET_ERROR_OOM;
+ r = calc_dirconcats(package->directory, &file->dsc.files.names,
+ &package->filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ package->files = nzNEW(package->filekeys.count,
+ const struct candidate_file *);
+ if (FAILEDTOALLOC(package->files))
+ return RET_ERROR_OOM;
+ r = files_canadd(package->filekeys.values[0],
+ file->checksums);
+ if (RET_IS_OK(r))
+ package->files[0] = file;
+ if (RET_WAS_ERROR(r))
+ return r;
+ for (j = 1 ; j < package->filekeys.count ; j++) {
+ r = prepare_source_file(i, c,
+ package->filekeys.values[j],
+ file->dsc.files.names.values[j],
+ &file->dsc.files.checksums[j],
+ file->ofs, &package->files[j]);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ r = sources_complete(&file->dsc, package->directory, oinfo,
+ section, priority, &package->control);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ return RET_OK;
+}
+
+static retvalue candidate_preparetrackbyhands(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per) {
+ retvalue r;
+ char *byhanddir;
+ struct candidate_package *package;
+ struct candidate_file *firstbyhand = NULL, *file;
+ component_t component = component_strange;
+ int count = 0;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->type == fe_BYHAND) {
+ count++;
+ if (firstbyhand == NULL)
+ firstbyhand = file;
+ }
+ }
+ if (count == 0)
+ return RET_NOTHING;
+
+ /* search for a component to use */
+ for (package = per->packages ; package != NULL ;
+ package = package->next) {
+ if (atom_defined(package->component)) {
+ component = package->component;
+ break;
+ }
+ }
+
+ /* pseudo package containing byhand files */
+ package = candidate_newpackage(per, firstbyhand);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+ r = strlist_init_n(count, &package->filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ package->files = nzNEW(count, const struct candidate_file *);
+ if (FAILEDTOALLOC(package->files))
+ return RET_ERROR_OOM;
+
+ byhanddir = calc_byhanddir(component, c->source, c->changesversion);
+ if (FAILEDTOALLOC(byhanddir))
+ return RET_ERROR_OOM;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+ char *filekey;
+
+ if (file->type != fe_BYHAND)
+ continue;
+
+ r = candidate_usefile(i, c, file);
+ if (RET_WAS_ERROR(r)) {
+ free(byhanddir);
+ return r;
+ }
+
+ filekey = calc_dirconcat(byhanddir, BASENAME(i, file->ofs));
+ if (FAILEDTOALLOC(filekey)) {
+ free(byhanddir);
+ return RET_ERROR_OOM;
+ }
+
+ r = files_canadd(filekey, file->checksums);
+ if (RET_WAS_ERROR(r)) {
+ free(byhanddir);
+ return r;
+ }
+ if (RET_IS_OK(r))
+ package->files[package->filekeys.count] = file;
+ r = strlist_add(&package->filekeys, filekey);
+ assert (r == RET_OK);
+ }
+ free(byhanddir);
+ assert (package->filekeys.count == count);
+ return RET_OK;
+}
+
+static retvalue candidate_preparelogs(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per) {
+ retvalue r;
+ struct candidate_package *package;
+ struct candidate_file *firstlog = NULL, *file;
+ component_t component = component_strange;
+ int count = 0;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->type == fe_LOG) {
+ count++;
+ if (firstlog == NULL)
+ firstlog = file;
+ }
+ }
+ if (count == 0)
+ return RET_NOTHING;
+
+ /* search for a component to use */
+ for (package = per->packages ; package != NULL ;
+ package = package->next) {
+ if (atom_defined(package->component)) {
+ component = package->component;
+ break;
+ }
+ }
+ /* if there somehow were no packages to get an component from,
+ put in the main one of this distribution. */
+ if (!atom_defined(component)) {
+ assert (per->into->components.count > 0);
+ component = per->into->components.atoms[0];
+ }
+
+ /* pseudo package containing log files */
+ package = candidate_newpackage(per, firstlog);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+ r = strlist_init_n(count, &package->filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ package->files = nzNEW(count, const struct candidate_file *);
+ if (FAILEDTOALLOC(package->files))
+ return RET_ERROR_OOM;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+ char *filekey;
+
+ if (file->type != fe_LOG)
+ continue;
+
+ r = candidate_usefile(i, c, file);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ // TODO: add same checks on the basename contents?
+
+ filekey = calc_filekey(component, c->source, BASENAME(i, file->ofs));
+ if (FAILEDTOALLOC(filekey))
+ return RET_ERROR_OOM;
+
+ r = files_canadd(filekey, file->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r))
+ package->files[package->filekeys.count] = file;
+ r = strlist_add(&package->filekeys, filekey);
+ assert (r == RET_OK);
+ }
+ assert (package->filekeys.count == count);
+ return RET_OK;
+}
+
+static retvalue prepare_hookedbyhand(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per, struct candidate_file *file) {
+ const struct distribution *d = per->into;
+ const struct byhandhook *h = NULL;
+ struct byhandfile **b_p, *b;
+ retvalue result = RET_NOTHING;
+ retvalue r;
+
+ b_p = &per->byhandhookstocall;
+ while (*b_p != NULL)
+ b_p = &(*b_p)->next;
+
+ while (byhandhooks_matched(d->byhandhooks, &h,
+ file->section, file->priority,
+ BASENAME(i, file->ofs))) {
+ r = candidate_usefile(i, c, file);
+ if (RET_WAS_ERROR(r))
+ return r;
+ b = zNEW(struct byhandfile);
+ if (FAILEDTOALLOC(b))
+ return RET_ERROR_OOM;
+ b->file = file;
+ b->hook = h;
+ *b_p = b;
+ b_p = &b->next;
+ result = RET_OK;
+ }
+ return result;
+}
+
+static retvalue prepare_for_distribution(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *d) {
+ struct candidate_file *file;
+ retvalue r;
+
+ d->into->lookedat = true;
+
+ for (file = c->files ; file != NULL ; file = file->next) {
+ switch (file->type) {
+ case fe_UDEB:
+ case fe_DEB:
+ case fe_DDEB:
+ r = prepare_deb(i, c, d, file);
+ break;
+ case fe_DSC:
+ r = prepare_dsc(i, c, d, file);
+ break;
+ case fe_BYHAND:
+ r = prepare_hookedbyhand(i, c, d, file);
+ break;
+ default:
+ r = RET_NOTHING;
+ break;
+ }
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ }
+ if (d->into->tracking != dt_NONE) {
+ if (d->into->trackingoptions.includebyhand) {
+ r = candidate_preparetrackbyhands(i, c, d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (d->into->trackingoptions.includelogs) {
+ r = candidate_preparelogs(i, c, d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (d->into->trackingoptions.includebuildinfos) {
+ r = candidate_preparebuildinfos(i, c, d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (d->into->trackingoptions.includechanges) {
+ r = candidate_preparechangesfile(c, d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ //... check if something would be done ...
+ return RET_OK;
+}
+
+static retvalue candidate_addfiles(struct candidate *c) {
+ int j;
+ struct candidate_perdistribution *d;
+ struct candidate_package *p;
+ retvalue r;
+
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ for (p = d->packages ; p != NULL ; p = p->next) {
+ if (p->skip)
+ continue;
+ for (j = 0 ; j < p->filekeys.count ; j++) {
+ const struct candidate_file *f = p->files[j];
+ if (f == NULL)
+ continue;
+ assert(f->tempfilename != NULL);
+ r = files_hardlinkandadd(f->tempfilename,
+ p->filekeys.values[j],
+ f->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue add_dsc(struct distribution *into, struct trackingdata *trackingdata, struct candidate_package *p) {
+ retvalue r;
+ struct target *t = distribution_getpart(into,
+ p->component, architecture_source, pt_dsc);
+
+ assert (logger_isprepared(into->logger));
+
+ /* finally put it into the source distribution */
+ r = target_initpackagesdb(t, READWRITE);
+ if (!RET_WAS_ERROR(r)) {
+ retvalue r2;
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_addpackage(t, into->logger,
+ p->master->dsc.name,
+ p->master->dsc.version,
+ p->control,
+ &p->filekeys,
+ false, trackingdata,
+ architecture_source,
+ NULL, NULL);
+ r2 = target_closepackagesdb(t);
+ RET_ENDUPDATE(r, r2);
+ }
+ RET_UPDATE(into->status, r);
+ return r;
+}
+
+static retvalue checkadd_dsc(
+ struct distribution *into,
+ const struct incoming *i,
+ bool tracking, struct candidate_package *p) {
+ retvalue r;
+ struct target *t = distribution_getpart(into,
+ p->component, architecture_source, pt_dsc);
+
+ /* check for possible errors putting it into the source distribution */
+ r = target_initpackagesdb(t, READONLY);
+ if (!RET_WAS_ERROR(r)) {
+ retvalue r2;
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_checkaddpackage(t,
+ p->master->dsc.name,
+ p->master->dsc.version,
+ tracking,
+ i->permit[pmf_oldpackagenewer]);
+ r2 = target_closepackagesdb(t);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+}
+
+static retvalue candidate_add_into(const struct incoming *i, const struct candidate *c, const struct candidate_perdistribution *d, const char **changesfilekey_p) {
+ retvalue r;
+ struct candidate_package *p;
+ struct trackingdata trackingdata;
+ struct distribution *into = d->into;
+ trackingdb tracks;
+ struct atomlist binary_architectures;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ into->lookedat = true;
+ if (into->logger != NULL) {
+ r = logger_prepare(d->into->logger);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ tracks = NULL;
+ if (into->tracking != dt_NONE) {
+ r = tracking_initialize(&tracks, into, false);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (tracks != NULL) {
+ r = trackingdata_summon(tracks, c->source, c->sourceversion,
+ &trackingdata);
+ if (RET_WAS_ERROR(r)) {
+ (void)tracking_done(tracks, into);
+ return r;
+ }
+ if (into->trackingoptions.needsources) {
+ // TODO, but better before we start adding...
+ }
+ }
+
+ atomlist_init(&binary_architectures);
+ for (p = d->packages ; p != NULL ; p = p->next) {
+ if (FE_BINARY(p->master->type)) {
+ architecture_t a = p->master->architecture;
+
+ if (a != architecture_all)
+ atomlist_add_uniq(&binary_architectures, a);
+ }
+ }
+
+ r = RET_OK;
+ for (p = d->packages ; p != NULL ; p = p->next) {
+ if (p->skip) {
+ if (verbose >= 0)
+ printf(
+"Not putting '%s' in '%s' as already in there with equal or newer version.\n",
+ BASENAME(i, p->master->ofs),
+ into->codename);
+ continue;
+ }
+ if (p->master->type == fe_DSC) {
+ r = add_dsc(into, (tracks==NULL)?NULL:&trackingdata,
+ p);
+ } else if (FE_BINARY(p->master->type)) {
+ architecture_t a = p->master->architecture;
+ const struct atomlist *as, architectures = {&a, 1, 1};
+
+ if (i->options[iof_limit_arch_all] &&
+ a == architecture_all &&
+ binary_architectures.count > 0)
+ as = &binary_architectures;
+ else
+ as = &architectures;
+ r = binaries_adddeb(&p->master->deb,
+ as, p->packagetype, into,
+ (tracks==NULL)?NULL:&trackingdata,
+ p->component, &p->filekeys,
+ p->control);
+ } else if (p->master->type == fe_CHANGES) {
+ /* finally add the .changes to tracking, if requested */
+ assert (p->master->name == NULL);
+ assert (tracks != NULL);
+
+ r = trackedpackage_adddupfilekeys(trackingdata.tracks,
+ trackingdata.pkg,
+ ft_CHANGES, &p->filekeys, false);
+ if (p->filekeys.count > 0)
+ *changesfilekey_p = p->filekeys.values[0];
+ } else if (p->master->type == fe_BYHAND) {
+ assert (tracks != NULL);
+
+ r = trackedpackage_adddupfilekeys(trackingdata.tracks,
+ trackingdata.pkg,
+ ft_XTRA_DATA, &p->filekeys, false);
+ } else if (p->master->type == fe_BUILDINFO) {
+ assert (tracks != NULL);
+
+ r = trackedpackage_adddupfilekeys(trackingdata.tracks,
+ trackingdata.pkg,
+ ft_BUILDINFO, &p->filekeys, false);
+ } else if (p->master->type == fe_LOG) {
+ assert (tracks != NULL);
+
+ r = trackedpackage_adddupfilekeys(trackingdata.tracks,
+ trackingdata.pkg,
+ ft_LOG, &p->filekeys, false);
+ } else
+ r = RET_ERROR_INTERNAL;
+
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ atomlist_done(&binary_architectures);
+
+ if (tracks != NULL) {
+ retvalue r2;
+ r2 = trackingdata_finish(tracks, &trackingdata);
+ RET_UPDATE(r, r2);
+ r2 = tracking_done(tracks, into);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+}
+
+static inline retvalue candidate_checkadd_into(const struct incoming *i, const struct candidate_perdistribution *d) {
+ retvalue r;
+ struct candidate_package *p;
+ struct distribution *into = d->into;
+ bool somethingtodo = false;
+
+ for (p = d->packages ; p != NULL ; p = p->next) {
+ if (p->master->type == fe_DSC) {
+ r = checkadd_dsc(into, i, into->tracking != dt_NONE,
+ p);
+ } else if (FE_BINARY(p->master->type)) {
+ r = binaries_checkadddeb(&p->master->deb,
+ p->master->architecture,
+ p->packagetype,
+ into, into->tracking != dt_NONE,
+ p->component,
+ i->permit[pmf_oldpackagenewer]);
+ } else if (p->master->type == fe_CHANGES
+ || p->master->type == fe_BYHAND
+ || p->master->type == fe_BUILDINFO
+ || p->master->type == fe_LOG) {
+ continue;
+ } else
+ r = RET_ERROR_INTERNAL;
+
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING)
+ p->skip = true;
+ else
+ somethingtodo = true;
+ }
+ if (somethingtodo)
+ return RET_OK;
+ else
+ return RET_NOTHING;
+}
+
+static inline bool isallowed(UNUSED(struct incoming *i), struct candidate *c, struct distribution *into, struct upload_conditions *conditions) {
+ const struct candidate_file *file;
+
+ do switch (uploaders_nextcondition(conditions)) {
+ case uc_ACCEPTED:
+ return true;
+ case uc_REJECTED:
+ return false;
+ case uc_CODENAME:
+ (void)uploaders_verifystring(conditions, into->codename);
+ break;
+ case uc_SOURCENAME:
+ assert (c->source != NULL);
+ (void)uploaders_verifystring(conditions, c->source);
+ break;
+ case uc_SECTIONS:
+ for (file = c->files ; file != NULL ;
+ file = file->next) {
+ if (!FE_PACKAGE(file->type))
+ continue;
+ if (!uploaders_verifystring(conditions,
+ (file->section == NULL)
+ ?"-":file->section))
+ break;
+ }
+ break;
+ case uc_BINARIES:
+ for (file = c->files ; file != NULL ;
+ file = file->next) {
+ if (!FE_BINARY(file->type))
+ continue;
+ if (!uploaders_verifystring(conditions,
+ file->name))
+ break;
+ }
+ break;
+ case uc_ARCHITECTURES:
+ for (file = c->files ; file != NULL ;
+ file = file->next) {
+ if (!FE_PACKAGE(file->type))
+ continue;
+ if (!uploaders_verifyatom(conditions,
+ file->architecture))
+ break;
+ }
+ break;
+ case uc_BYHAND:
+ for (file = c->files ; file != NULL ;
+ file = file->next) {
+ if (file->type != fe_BYHAND)
+ continue;
+ if (!uploaders_verifystring(conditions,
+ file->section))
+ break;
+ }
+ break;
+ } while (true);
+}
+
+static retvalue candidate_checkpermissions(struct incoming *i, struct candidate *c, struct distribution *into) {
+ retvalue r;
+ struct upload_conditions *conditions;
+ bool allowed;
+
+ /* no rules means allowed */
+ if (into->uploaders == NULL)
+ return RET_OK;
+
+ r = distribution_loaduploaders(into);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert(into->uploaderslist != NULL);
+
+ r = uploaders_permissions(into->uploaderslist, c->signatures,
+ &conditions);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ allowed = isallowed(i, c, into, conditions);
+ free(conditions);
+ if (allowed)
+ return RET_OK;
+ else
+ /* reject */
+ return RET_NOTHING;
+}
+
+static retvalue check_architecture_availability(const struct incoming *i, const struct candidate *c) {
+ struct candidate_perdistribution *d;
+ bool check_all_availability = false;
+ bool have_all_available = false;
+ int j;
+
+ // TODO: switch to instead ensure every architecture can be put into
+ // one distribution at least would be nice. If implementing this do not
+ // forget to check later to only put files in when the distribution can
+ // cope with that.
+
+ for (j = 0 ; j < c->architectures.count ; j++) {
+ const char *architecture = c->architectures.values[j];
+ if (strcmp(architecture, "all") == 0) {
+ check_all_availability = true;
+ continue;
+ }
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ if (atomlist_in(&d->into->architectures, architecture_find(architecture)))
+ continue;
+ fprintf(stderr,
+"'%s' lists architecture '%s' not found in distribution '%s'!\n",
+ BASENAME(i, c->ofs), architecture,
+ d->into->codename);
+ return RET_ERROR;
+ }
+ if (strcmp(architecture, "source") != 0)
+ have_all_available = true;
+ }
+ if (check_all_availability && ! have_all_available) {
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ if (d->into->architectures.count > 1)
+ continue;
+ if (d->into->architectures.count > 0 &&
+ d->into->architectures.atoms[0] != architecture_source)
+ continue;
+ fprintf(stderr,
+"'%s' lists architecture 'all' but no binary architecture found in distribution '%s'!\n",
+ BASENAME(i, c->ofs), d->into->codename);
+ return RET_ERROR;
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue create_uniq_logsubdir(const char *logdir, const char *name, const char *version, const struct strlist *architectures, /*@out@*/char **subdir_p) {
+ char *dir, *p;
+ size_t l;
+ retvalue r;
+
+ r = dirs_make_recursive(logdir);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ p = calc_changes_basename(name, version, architectures);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ dir = calc_dirconcat(logdir, p);
+ free(p);
+ if (FAILEDTOALLOC(dir))
+ return RET_ERROR_OOM;
+ l = strlen(dir);
+ assert (l > 8 && strcmp(dir + l - 8 , ".changes") == 0);
+ memset(dir + l - 7, '0', 7);
+ r = dirs_create(dir);
+ while (r == RET_NOTHING) {
+ p = dir + l - 1;
+ while (*p == '9') {
+ *p = '0';
+ p--;
+ }
+ if (*p < '0' || *p > '8') {
+ fprintf(stderr,
+"Failed to create a new directory of the form '%s'\n"
+"it looks like all 10000000 such directories are already there...\n",
+ dir);
+ return RET_ERROR;
+ }
+ (*p)++;
+ r = dirs_create(dir);
+ }
+ *subdir_p = dir;
+ return RET_OK;
+
+}
+
+static retvalue candidate_prepare_logdir(struct incoming *i, struct candidate *c) {
+ int count, j;
+ struct candidate_file *file;
+ retvalue r;
+
+ r = create_uniq_logsubdir(i->logdir,
+ c->source, c->changesversion,
+ &c->architectures,
+ &c->logsubdir);
+ assert (RET_IS_OK(r));
+ if (RET_WAS_ERROR(r))
+ return RET_ERROR_OOM;
+ count = 0;
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->ofs == c->ofs || file->type == fe_LOG
+ || file->type == fe_BUILDINFO
+ || (file->type == fe_BYHAND && !file->used))
+ count++;
+ }
+ c->logcount = count;
+ c->logfiles = nzNEW(count, const struct candidate_file *);
+ if (FAILEDTOALLOC(c->logfiles))
+ return RET_ERROR_OOM;
+ j = 0;
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->ofs == c->ofs || file->type == fe_LOG
+ || file->type == fe_BUILDINFO
+ || (file->type == fe_BYHAND && !file->used)) {
+ r = candidate_usefile(i, c, file);
+ if (RET_WAS_ERROR(r))
+ return r;
+ c->logfiles[j++] = file;
+ }
+ }
+ assert (count == j);
+ return RET_OK;
+}
+
+static retvalue candidate_finish_logdir(struct incoming *i, struct candidate *c) {
+ int j;
+
+ for (j = 0 ; j < c->logcount ; j++) {
+ retvalue r;
+ const struct candidate_file *f = c->logfiles[j];
+
+ r = checksums_hardlink(c->logsubdir,
+ BASENAME(i, f->ofs), f->tempfilename,
+ f->checksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static retvalue candidate_add_byhands(struct incoming *i, UNUSED(struct candidate *c), struct candidate_perdistribution *d) {
+ struct byhandfile *b;
+ retvalue r;
+
+ for (b = d->byhandhookstocall ; b != NULL ; b = b->next){
+ const struct candidate_file *f = b->file;
+
+ r = byhandhook_call(b->hook, d->into->codename,
+ f->section, f->priority, BASENAME(i, f->ofs),
+ f->tempfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+/* the actual adding of packages,
+ * everything that can be tested earlier should be already tested now */
+static retvalue candidate_really_add(struct incoming *i, struct candidate *c) {
+ struct candidate_perdistribution *d;
+ retvalue r;
+
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ if (d->byhandhookstocall == NULL)
+ continue;
+ r = candidate_add_byhands(i, c, d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ /* make hardlinks/copies of the files */
+ r = candidate_addfiles(c);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ if (i->logdir != NULL) {
+ r = candidate_finish_logdir(i, c);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ r = RET_OK;
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ struct distribution *into = d->into;
+ const char *changesfilekey = NULL;
+
+ /* if there are regular packages to add,
+ * add them and call the log.
+ * If all packages were skipped but a byhandhook run,
+ * still advertise the .changes file to loggers */
+ if (!d->skip) {
+ r = candidate_add_into(i, c, d,
+ &changesfilekey);
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else if (d->byhandhookstocall == NULL)
+ continue;
+ logger_logchanges(into->logger, into->codename,
+ c->source, c->changesversion,
+ changesfile(c)->tempfilename, changesfilekey);
+ }
+ return RET_OK;
+}
+
+static retvalue candidate_add(struct incoming *i, struct candidate *c) {
+ struct candidate_perdistribution *d;
+ struct candidate_file *file;
+ retvalue r;
+ bool somethingtodo;
+ char *origfilename;
+ assert (c->perdistribution != NULL);
+
+ /* check if every distribution this is to be added to supports
+ * all architectures we have files for */
+ r = check_architecture_availability(i, c);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ r = distribution_loadalloverrides(d->into);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ // TODO: once uploaderlist allows one to look for package names or
+ // existing override entries or such things, check package names here
+ // enable checking for content name with outer name
+
+ /* when we get here, the package is allowed in, now we have to
+ * read the parts and check all stuff we only know now */
+
+ r = candidate_read_files(i, c);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* now the distribution specific part starts: */
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ r = prepare_for_distribution(i, c, d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (i->logdir != NULL) {
+ r = candidate_prepare_logdir(i, c);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ }
+ for (file = c->files ; file != NULL ; file = file->next) {
+ /* silently ignore unused buildinfo files: */
+ if (file->type == fe_BUILDINFO)
+ continue;
+ /* otherwise complain unless unused_files is given */
+ if (!file->used && !i->permit[pmf_unused_files]) {
+ // TODO: find some way to mail such errors...
+ fprintf(stderr,
+"Error: '%s' contains unused file '%s'!\n"
+"(Do Permit: unused_files to conf/incoming to ignore and\n"
+" additionally Cleanup: unused_files to delete them)\n",
+ BASENAME(i, c->ofs), BASENAME(i, file->ofs));
+ if (file->type == fe_LOG || file->type == fe_BYHAND)
+ fprintf(stderr,
+"Alternatively, you can also add a LogDir: for '%s' into conf/incoming\n"
+"then files like that will be stored there.\n",
+ i->name);
+ return RET_ERROR;
+ }
+ }
+
+ /* additional test run to see if anything could go wrong,
+ * or if there are already newer versions */
+ somethingtodo = false;
+ for (d = c->perdistribution ; d != NULL ; d = d->next) {
+ r = candidate_checkadd_into(i, d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ d->skip = true;
+ if (d->byhandhookstocall != NULL)
+ somethingtodo = true;
+ } else
+ somethingtodo = true;
+ }
+ if (! somethingtodo) {
+ if (verbose >= 0) {
+ printf(
+"Skipping %s because all packages are skipped!\n",
+ BASENAME(i, c->ofs));
+ }
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->used || i->cleanup[cuf_unused_files] ||
+ (file->type == fe_BUILDINFO &&
+ i->cleanup[cuf_unused_buildinfo_files]))
+ i->delete[file->ofs] = true;
+ }
+ return RET_NOTHING;
+ }
+
+ // TODO: make sure not two different files are supposed to be installed
+ // as the same filekey.
+
+ /* the actual adding of packages, make sure what can be checked was
+ * checked by now */
+
+ origfilename = calc_dirconcat(i->directory,
+ BASENAME(i, changesfile(c)->ofs));
+ causingfile = origfilename;
+
+ r = candidate_really_add(i, c);
+
+ causingfile = NULL;
+ free(origfilename);
+
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* mark files as done */
+ for (file = c->files ; file != NULL ; file = file->next) {
+ if (file->used)
+ i->processed[file->ofs] = true;
+ if (file->used || i->cleanup[cuf_unused_files] ||
+ (file->type == fe_BUILDINFO &&
+ i->cleanup[cuf_unused_buildinfo_files])) {
+ i->delete[file->ofs] = true;
+ }
+ }
+ return r;
+}
+
+static retvalue process_changes(struct incoming *i, int ofs) {
+ struct candidate *c;
+ retvalue r;
+ int j, k;
+ bool broken = false, tried = false;
+
+ r = candidate_read(i, ofs, &c, &broken);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (RET_IS_OK(r));
+ r = candidate_parse(i, c);
+ if (RET_WAS_ERROR(r)) {
+ candidate_free(c);
+ return r;
+ }
+ r = candidate_earlychecks(i, c);
+ if (RET_WAS_ERROR(r)) {
+ if (i->cleanup[cuf_on_error]) {
+ struct candidate_file *file;
+
+ i->delete[c->ofs] = true;
+ for (file = c->files ; file != NULL ;
+ file = file->next) {
+ i->delete[file->ofs] = true;
+ }
+ }
+ candidate_free(c);
+ return r;
+ }
+ for (k = 0 ; k < c->distributions.count ; k++) {
+ const char *name = c->distributions.values[k];
+
+ for (j = 0 ; j < i->allow.count ; j++) {
+ // TODO: implement "*"
+ if (strcmp(name, i->allow.values[j]) == 0) {
+ tried = true;
+ r = candidate_checkpermissions(i, c,
+ i->allow_into[j]);
+ if (r == RET_NOTHING)
+ continue;
+ if (RET_IS_OK(r))
+ r = candidate_newdistribution(c,
+ i->allow_into[j]);
+ if (RET_WAS_ERROR(r)) {
+ candidate_free(c);
+ return r;
+ } else
+ break;
+ }
+ }
+ if (c->perdistribution != NULL &&
+ !i->options[iof_multiple_distributions])
+ break;
+ }
+ if (c->perdistribution == NULL && i->default_into != NULL) {
+ tried = true;
+ r = candidate_checkpermissions(i, c, i->default_into);
+ if (RET_WAS_ERROR(r)) {
+ candidate_free(c);
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ r = candidate_newdistribution(c, i->default_into);
+ }
+ }
+ if (c->perdistribution == NULL) {
+ fprintf(stderr, tried?"No distribution accepting '%s' (i.e. none of the candidate distributions allowed inclusion)!\n":
+ "No distribution found for '%s'!\n",
+ i->files.values[ofs]);
+ if (i->cleanup[cuf_on_deny]) {
+ struct candidate_file *file;
+
+ i->delete[c->ofs] = true;
+ for (file = c->files ; file != NULL ;
+ file = file->next) {
+ // TODO: implement same-owner check
+ if (!i->cleanup[cuf_on_deny_check_owner])
+ i->delete[file->ofs] = true;
+ }
+ }
+ r = RET_ERROR_INCOMING_DENY;
+ } else {
+ if (broken) {
+ fprintf(stderr,
+"'%s' is signed with only invalid signatures.\n"
+"If this was not corruption but willfull modification,\n"
+"remove the signatures and try again.\n",
+ i->files.values[ofs]);
+ r = RET_ERROR;
+ } else
+ r = candidate_add(i, c);
+ if (RET_WAS_ERROR(r) && i->cleanup[cuf_on_error]) {
+ struct candidate_file *file;
+
+ i->delete[c->ofs] = true;
+ for (file = c->files ; file != NULL ;
+ file = file->next) {
+ i->delete[file->ofs] = true;
+ }
+ }
+ }
+ logger_wait();
+ candidate_free(c);
+ return r;
+}
+
+static inline /*@null@*/char *create_uniq_subdir(const char *basedir) {
+ char date[16], *dir;
+ unsigned long number = 0;
+ retvalue r;
+ time_t curtime;
+ struct tm *tm;
+ int e;
+
+ r = dirs_make_recursive(basedir);
+ if (RET_WAS_ERROR(r))
+ return NULL;
+
+ if (time(&curtime) == (time_t)-1)
+ tm = NULL;
+ else
+ tm = gmtime(&curtime);
+ if (tm == NULL || strftime(date, 16, "%Y-%m-%d", tm) != 10)
+ strcpy(date, "timeerror");
+
+ for (number = 0 ; number < 10000 ; number ++) {
+ dir = mprintf("%s/%s-%lu", basedir, date, number);
+ if (FAILEDTOALLOC(dir))
+ return NULL;
+ if (mkdir(dir, 0777) == 0)
+ return dir;
+ e = errno;
+ if (e != EEXIST) {
+ fprintf(stderr,
+"Error %d creating directory '%s': %s\n",
+ e, dir, strerror(e));
+ free(dir);
+ return NULL;
+ }
+ free(dir);
+ }
+ fprintf(stderr, "Could not create unique subdir in '%s'!\n", basedir);
+ return NULL;
+}
+
+/* tempdir should ideally be on the same partition like the pooldir */
+retvalue process_incoming(struct distribution *distributions, const char *name, const char *changesfilename) {
+ struct incoming *i;
+ retvalue result, r;
+ int j;
+ char *morguedir;
+
+ result = RET_NOTHING;
+
+ r = incoming_init(distributions, name, &i);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ for (j = 0 ; j < i->files.count ; j ++) {
+ const char *basefilename = i->files.values[j];
+ size_t l = strlen(basefilename);
+#define C_SUFFIX ".changes"
+ const size_t c_len = strlen(C_SUFFIX);
+ if (l <= c_len ||
+ memcmp(basefilename + (l - c_len), C_SUFFIX, c_len) != 0)
+ continue;
+ if (changesfilename != NULL && strcmp(basefilename, changesfilename) != 0)
+ continue;
+ /* a .changes file, check it */
+ r = process_changes(i, j);
+ RET_UPDATE(result, r);
+ }
+
+ logger_wait();
+ if (i->morguedir == NULL)
+ morguedir = NULL;
+ else {
+ morguedir = create_uniq_subdir(i->morguedir);
+ }
+ for (j = 0 ; j < i->files.count ; j ++) {
+ char *fullfilename;
+
+ if (!i->delete[j])
+ continue;
+
+ fullfilename = calc_dirconcat(i->directory, i->files.values[j]);
+ if (FAILEDTOALLOC(fullfilename)) {
+ result = RET_ERROR_OOM;
+ continue;
+ }
+ if (morguedir != NULL && !i->processed[j]) {
+ char *newname = calc_dirconcat(morguedir,
+ i->files.values[j]);
+ if (newname != NULL &&
+ rename(fullfilename, newname) == 0) {
+ free(newname);
+ free(fullfilename);
+ continue;
+ } else if (FAILEDTOALLOC(newname)) {
+ result = RET_ERROR_OOM;
+ } else {
+ int e = errno;
+
+ fprintf(stderr,
+"Error %d moving '%s' to '%s': %s\n",
+ e, i->files.values[j],
+ morguedir, strerror(e));
+ RET_UPDATE(result, RET_ERRNO(e));
+ /* no continue, instead
+ * delete the file as normal: */
+ }
+ }
+ if (verbose >= 3)
+ printf("deleting '%s'...\n", fullfilename);
+ deletefile(fullfilename);
+ free(fullfilename);
+ }
+ if (morguedir != NULL) {
+ /* in the case it is empty, remove again */
+ (void)rmdir(morguedir);
+ free(morguedir);
+ }
+ incoming_free(i);
+ return result;
+}
diff --git a/incoming.h b/incoming.h
new file mode 100644
index 0000000..392c907
--- /dev/null
+++ b/incoming.h
@@ -0,0 +1,10 @@
+#ifndef REPREPRO_INCOMING_H
+#define REPREPRO_INCOMING_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+
+retvalue process_incoming(struct distribution *distributions, const char *name, /*@null@*/const char *onlychangesfilename);
+#endif
diff --git a/indexfile.c b/indexfile.c
new file mode 100644
index 0000000..c13832d
--- /dev/null
+++ b/indexfile.c
@@ -0,0 +1,304 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2007,2008,2010,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <assert.h>
+#include "error.h"
+#include "ignore.h"
+#include "chunks.h"
+#include "names.h"
+#include "uncompression.h"
+#include "package.h"
+#include "indexfile.h"
+
+/* the purpose of this code is to read index files, either from a snapshot
+ * previously generated or downloaded while updating. */
+
+struct indexfile {
+ struct compressedfile *f;
+ char *filename;
+ int linenumber, startlinenumber;
+ retvalue status;
+ char *buffer;
+ int size, ofs, content;
+ bool failed;
+};
+
+retvalue indexfile_open(struct indexfile **file_p, const char *filename, enum compression compression) {
+ struct indexfile *f = zNEW(struct indexfile);
+ retvalue r;
+
+ if (FAILEDTOALLOC(f))
+ return RET_ERROR_OOM;
+ f->filename = strdup(filename);
+ if (FAILEDTOALLOC(f->filename)) {
+ free(f);
+ return RET_ERROR_OOM;
+ }
+ r = uncompress_open(&f->f, filename, compression);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ free(f->filename);
+ free(f);
+ return RET_ERRNO(errno);
+ }
+ f->linenumber = 0;
+ f->startlinenumber = 0;
+ f->status = RET_OK;
+ f->size = 4*1024*1024;
+ f->ofs = 0;
+ f->content = 0;
+ /* +1 for *d = '\0' in eof case */
+ f->buffer = malloc(f->size + 1);
+ if (FAILEDTOALLOC(f->buffer)) {
+ uncompress_abort(f->f);
+ free(f->filename);
+ free(f);
+ return RET_ERROR_OOM;
+ }
+ *file_p = f;
+ return RET_OK;
+}
+
+retvalue indexfile_close(struct indexfile *f) {
+ retvalue r;
+
+ r = uncompress_close(f->f);
+
+ free(f->filename);
+ free(f->buffer);
+ RET_UPDATE(r, f->status);
+ free(f);
+
+ return r;
+}
+
+static retvalue indexfile_get(struct indexfile *f) {
+ char *p, *d, *e, *start;
+ bool afternewline, nothingyet;
+ int bytes_read;
+
+ if (f->failed)
+ return RET_ERROR;
+
+ d = f->buffer;
+ afternewline = true;
+ nothingyet = true;
+ do {
+ start = f->buffer + f->ofs;
+ p = start ;
+ e = p + f->content;
+
+ // TODO: if the chunk_get* are more tested with strange
+ // input, this could be kept in-situ and only chunk_edit
+ // beautifying this chunk...
+
+ while (p < e) {
+ /* just ignore '\r', even if not line-end... */
+ if (*p == '\r') {
+ p++;
+ continue;
+ }
+ if (*p == '\n') {
+ f->linenumber++;
+ if (afternewline) {
+ p++;
+ f->content -= (p - start);
+ f->ofs += (p - start);
+ assert (f->ofs == (p - f->buffer));
+ if (nothingyet)
+ /* restart */
+ return indexfile_get(f);
+ if (d > f->buffer && *(d-1) == '\n')
+ d--;
+ *d = '\0';
+ return RET_OK;
+ }
+ afternewline = true;
+ nothingyet = false;
+ } else
+ afternewline = false;
+ if (unlikely(*p == '\0')) {
+ *(d++) = ' ';
+ p++;
+ } else
+ *(d++) = *(p++);
+ }
+ /* ** out of data, read new ** */
+
+ /* start at beginning of free space */
+ f->ofs = (d - f->buffer);
+ f->content = 0;
+
+ if (f->size - f->ofs <= 2048) {
+ /* Adding code to enlarge the buffer in this case
+ * is risky as hard to test properly.
+ *
+ * Also it is almost certainly caused by some
+ * mis-representation of the file or perhaps
+ * some attack. Requesting all existing memory in
+ * those cases does not sound very useful. */
+
+ fprintf(stderr,
+"Error parsing %s line %d: Ridiculous long (>= 256K) control chunk!\n",
+ f->filename,
+ f->startlinenumber);
+ f->failed = true;
+ return RET_ERROR;
+ }
+
+ bytes_read = uncompress_read(f->f, d, f->size - f->ofs);
+ if (bytes_read < 0)
+ return RET_ERROR;
+ else if (bytes_read == 0)
+ break;
+ f->content = bytes_read;
+ } while (true);
+
+ if (d == f->buffer)
+ return RET_NOTHING;
+
+ /* end of file reached, return what we got so far */
+ assert (f->content == 0);
+ assert (d-f->buffer <= f->size);
+ if (d > f->buffer && *(d-1) == '\n')
+ d--;
+ *d = '\0';
+ return RET_OK;
+}
+
+bool indexfile_getnext(struct indexfile *f, struct package *pkgout, struct target *target, bool allowwrongarchitecture) {
+ retvalue r;
+ bool ignorecruft = false; // TODO
+ char *packagename, *version;
+ const char *control;
+ architecture_t atom;
+
+ packagename = NULL; version = NULL;
+ do {
+ free(packagename); packagename = NULL;
+ free(version); version = NULL;
+ f->startlinenumber = f->linenumber + 1;
+ r = indexfile_get(f);
+ if (!RET_IS_OK(r))
+ break;
+ control = f->buffer;
+ r = chunk_getvalue(control, "Package", &packagename);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error parsing %s line %d to %d: Chunk without 'Package:' field!\n",
+ f->filename,
+ f->startlinenumber, f->linenumber);
+ if (!ignorecruft)
+ r = RET_ERROR_MISSING;
+ else
+ continue;
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+
+ r = chunk_getvalue(control, "Version", &version);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error parsing %s line %d to %d: Chunk without 'Version:' field!\n",
+ f->filename,
+ f->startlinenumber, f->linenumber);
+ if (!ignorecruft)
+ r = RET_ERROR_MISSING;
+ else
+ continue;
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+ if (target->packagetype == pt_dsc) {
+ atom = architecture_source;
+ } else {
+ char *architecture;
+
+ r = chunk_getvalue(control, "Architecture", &architecture);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (r == RET_NOTHING)
+ architecture = NULL;
+
+ /* check if architecture fits for target and error
+ out if not ignorewrongarchitecture */
+ if (architecture == NULL) {
+ fprintf(stderr,
+"Error parsing %s line %d to %d: Chunk without 'Architecture:' field!\n",
+ f->filename,
+ f->startlinenumber, f->linenumber);
+ if (!ignorecruft) {
+ r = RET_ERROR_MISSING;
+ break;
+ } else
+ continue;
+ } else if (strcmp(architecture, "all") == 0) {
+ atom = architecture_all;
+ } else if (strcmp(architecture,
+ atoms_architectures[
+ target->architecture
+ ]) == 0) {
+ atom = target->architecture;
+ } else if (!allowwrongarchitecture
+ && !ignore[IGN_wrongarchitecture]) {
+ fprintf(stderr,
+"Warning: ignoring package because of wrong 'Architecture:' field '%s'"
+" (expected 'all' or '%s') in %s lines %d to %d!\n",
+ architecture,
+ atoms_architectures[
+ target->architecture],
+ f->filename,
+ f->startlinenumber,
+ f->linenumber);
+ if (ignored[IGN_wrongarchitecture] == 0) {
+ fprintf(stderr,
+"This either mean the repository you get packages from is of an extremely\n"
+"low quality, or something went wrong. Trying to ignore it now, though.\n"
+"To no longer get this message use '--ignore=wrongarchitecture'.\n");
+ }
+ ignored[IGN_wrongarchitecture]++;
+ free(architecture);
+ continue;
+ } else {
+ /* just ignore this because of wrong
+ * architecture */
+ free(architecture);
+ continue;
+ }
+ free(architecture);
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+ pkgout->target = target;
+ pkgout->control = control;
+ pkgout->pkgname = packagename;
+ pkgout->name = pkgout->pkgname;
+ pkgout->pkgversion = version;
+ pkgout->version = pkgout->pkgversion;
+ pkgout->architecture = atom;
+ return true;
+ } while (true);
+ free(packagename);
+ free(version);
+ RET_UPDATE(f->status, r);
+ return false;
+}
diff --git a/indexfile.h b/indexfile.h
new file mode 100644
index 0000000..cdc68a4
--- /dev/null
+++ b/indexfile.h
@@ -0,0 +1,19 @@
+#ifndef REPREPRO_INDEXFILE_H
+#define REPREPRO_INDEXFILE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_TARGET_H
+#include "target.h"
+#endif
+
+struct indexfile;
+struct package;
+
+retvalue indexfile_open(/*@out@*/struct indexfile **, const char *, enum compression);
+retvalue indexfile_close(/*@only@*/struct indexfile *);
+bool indexfile_getnext(struct indexfile *, /*@out@*/struct package *, struct target *, bool allowwrongarchitecture);
+
+#endif
diff --git a/log.c b/log.c
new file mode 100644
index 0000000..a097653
--- /dev/null
+++ b/log.c
@@ -0,0 +1,1138 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <sys/select.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/poll.h>
+#include <sys/stat.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <strings.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <time.h>
+#include "error.h"
+#include "strlist.h"
+#include "atoms.h"
+#include "dirs.h"
+#include "target.h"
+#include "distribution.h"
+#include "configparser.h"
+#include "log.h"
+#include "filecntl.h"
+
+/*@null@*/ static /*@refcounted@*/ struct logfile {
+ /*@null@*/struct logfile *next;
+ char *filename;
+ /*@refs@*/size_t refcount;
+ int fd;
+} *logfile_root = NULL;
+
+static retvalue logfile_reference(/*@only@*/char *filename, /*@out@*/struct logfile **logfile) {
+ struct logfile *l;
+
+ assert (global.logdir != NULL && filename != NULL);
+
+ for (l = logfile_root ; l != NULL ; l = l->next) {
+ if (strcmp(l->filename, filename) == 0) {
+ l->refcount++;
+ *logfile = l;
+ free(filename);
+ return RET_OK;
+ }
+ }
+ l = NEW(struct logfile);
+ if (FAILEDTOALLOC(l)) {
+ free(filename);
+ return RET_ERROR_OOM;
+ }
+ if (filename[0] == '/')
+ l->filename = filename;
+ else {
+ l->filename = calc_dirconcat(global.logdir, filename);
+ free(filename);
+ }
+ if (FAILEDTOALLOC(l->filename)) {
+ free(l);
+ return RET_ERROR_OOM;
+ }
+ l->refcount = 1;
+ l->fd = -1;
+ l->next = logfile_root;
+ logfile_root = l;
+ *logfile = l;
+ return RET_OK;
+}
+
+static void logfile_dereference(struct logfile *logfile) {
+ assert (logfile != NULL);
+ assert (logfile->refcount > 0);
+ if (--logfile->refcount == 0) {
+
+ if (logfile_root == logfile)
+ logfile_root = logfile->next;
+ else {
+ struct logfile *previous = logfile_root;
+
+ while (previous != NULL && previous->next != logfile)
+ previous = previous->next;
+ assert (previous != NULL);
+ assert (previous->next == logfile);
+ previous->next = logfile->next;
+ }
+ if (logfile->fd >= 0) {
+ int ret, e;
+
+ ret = close(logfile->fd); logfile->fd = -1;
+ if (ret < 0) {
+ e = errno;
+ fprintf(stderr,
+"Error received when closing log file '%s': %d=%s\n",
+ logfile->filename, e, strerror(e));
+ }
+ }
+ free(logfile->filename);
+ free(logfile);
+ }
+}
+
+static retvalue logfile_open(struct logfile *logfile) {
+ assert (logfile != NULL);
+ assert (logfile->fd < 0);
+
+ (void)dirs_make_parent(logfile->filename);
+ logfile->fd = open(logfile->filename,
+ O_CREAT|O_APPEND|O_NOCTTY|O_WRONLY,
+ 0666);
+ if (logfile->fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Cannot open/create logfile '%s': %d=%s\n",
+ logfile->filename, e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ return RET_OK;
+}
+
+static retvalue logfile_write(struct logfile *logfile, struct target *target, const char *name, /*@null@*/const char *version, /*@null@*/const char *oldversion) {
+ int ret;
+ time_t currenttime;
+ struct tm t;
+
+ assert (logfile->fd >= 0);
+
+ currenttime = time(NULL);
+ if (localtime_r(&currenttime, &t) == NULL) {
+ if (version != NULL && oldversion != NULL)
+ ret = dprintf(logfile->fd,
+"EEEE-EE-EE EE:EE:EE replace %s %s %s %s %s %s %s\n",
+ target->distribution->codename,
+ atoms_packagetypes[target->packagetype],
+ atoms_components[target->component],
+ atoms_architectures[target->architecture],
+ name, version, oldversion);
+ else if (version != NULL)
+ ret = dprintf(logfile->fd,
+"EEEE-EE-EE EE:EE:EE add %s %s %s %s %s %s\n",
+ target->distribution->codename,
+ atoms_packagetypes[target->packagetype],
+ atoms_components[target->component],
+ atoms_architectures[target->architecture],
+ name, version);
+ else
+ ret = dprintf(logfile->fd,
+"EEEE-EE-EE EE:EE:EE remove %s %s %s %s %s %s\n",
+ target->distribution->codename,
+ atoms_packagetypes[target->packagetype],
+ atoms_components[target->component],
+ atoms_architectures[target->architecture],
+ name, oldversion);
+ } else if (version != NULL && oldversion != NULL)
+ ret = dprintf(logfile->fd,
+"%04d-%02d-%02d %02u:%02u:%02u replace %s %s %s %s %s %s %s\n",
+ 1900+t.tm_year, t.tm_mon+1,
+ t.tm_mday, t.tm_hour,
+ t.tm_min, t.tm_sec,
+ target->distribution->codename,
+ atoms_packagetypes[target->packagetype],
+ atoms_components[target->component],
+ atoms_architectures[target->architecture],
+ name, version, oldversion);
+ else if (version != NULL)
+ ret = dprintf(logfile->fd,
+"%04d-%02d-%02d %02u:%02u:%02u add %s %s %s %s %s %s\n",
+ 1900+t.tm_year, t.tm_mon+1,
+ t.tm_mday, t.tm_hour,
+ t.tm_min, t.tm_sec,
+ target->distribution->codename,
+ atoms_packagetypes[target->packagetype],
+ atoms_components[target->component],
+ atoms_architectures[target->architecture],
+ name, version);
+ else
+ ret = dprintf(logfile->fd,
+"%04d-%02d-%02d %02u:%02u:%02u remove %s %s %s %s %s %s\n",
+ 1900+t.tm_year, t.tm_mon+1,
+ t.tm_mday, t.tm_hour,
+ t.tm_min, t.tm_sec,
+ target->distribution->codename,
+ atoms_packagetypes[target->packagetype],
+ atoms_components[target->component],
+ atoms_architectures[target->architecture],
+ name, oldversion);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error writing to log file '%s': %d=%s",
+ logfile->filename, e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ return RET_OK;
+}
+
+struct notificator {
+ char *scriptname;
+ /* if defined, only call if it matches the package: */
+ packagetype_t packagetype;
+ component_t component;
+ architecture_t architecture;
+ command_t command;
+ bool withcontrol, changesacceptrule;
+};
+
+static void notificator_done(/*@special@*/struct notificator *n) /*@releases n->scriptname, n->packagename, n->component, n->architecture@*/{
+ free(n->scriptname);
+}
+
+static retvalue notificator_parse(struct notificator *n, struct configiterator *iter) {
+ retvalue r;
+ int c;
+
+ setzero(struct notificator, n);
+ n->architecture = atom_unknown;
+ n->component = atom_unknown;
+ n->packagetype = atom_unknown;
+ n->command = atom_unknown;
+ while ((c = config_nextnonspaceinline(iter)) != EOF) {
+ if (c == '-') {
+ char *word, *s, *detachedargument = NULL;
+ const char *argument;
+ atom_t *value_p = NULL;
+ enum atom_type value_type;
+ bool error = false;
+
+ r = config_completeword(iter, c, &word);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ s = word + 1;
+ while (*s != '\0' && *s != '=')
+ s++;
+ if (*s == '=') {
+ argument = s+1;
+ s[0] = '\0';
+ } else
+ argument = NULL;
+ switch (s-word) {
+ case 2:
+ if (word[1] == 'A') {
+ value_p = &n->architecture;
+ value_type = at_architecture;
+ } else if (word[1] == 'C') {
+ value_p = &n->component;
+ value_type = at_component;
+ } else if (word[1] == 'T') {
+ value_p = &n->packagetype;
+ value_type = at_packagetype;
+ } else
+ error = true;
+ break;
+ case 5:
+ if (strcmp(word, "--via") == 0) {
+ value_p = &n->command;
+ value_type = at_command;
+ } else
+ error = true;
+ break;
+ case 6:
+ if (strcmp(word, "--type") == 0) {
+ value_p = &n->packagetype;
+ value_type = at_packagetype;
+ } else
+ error = true;
+ break;
+ case 9:
+ if (strcmp(word, "--changes") == 0)
+ n->changesacceptrule = true;
+ else
+ error = true;
+ break;
+ case 11:
+ if (strcmp(word, "--component") == 0) {
+ value_p = &n->component;
+ value_type = at_component;
+ } else
+ error = true;
+ break;
+ case 13:
+ if (strcmp(word, "--withcontrol") == 0)
+ n->withcontrol = true;
+ else
+ error = true;
+ break;
+ case 14:
+ if (strcmp(word, "--architecture") == 0) {
+ value_p = &n->architecture;
+ value_type = at_architecture;
+ } else
+ error = true;
+ break;
+ default:
+ error = true;
+ break;
+ }
+ if (error) {
+ fprintf(stderr,
+"Unknown Log notifier option in %s, line %u, column %u: '%s'\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter), word);
+ free(word);
+ return RET_ERROR;
+ }
+ if (value_p == NULL) {
+ if (argument != NULL) {
+ fprintf(stderr,
+"Log notifier option has = but may not, in %s, line %u, column %u: '%s'\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ word);
+ free(word);
+ return RET_ERROR;
+ }
+ free(word);
+ continue;
+ }
+ /* option expecting string value: */
+ if (atom_defined(*value_p)) {
+ fprintf(stderr,
+"Repeated notifier option %s in %s, line %u, column %u!\n", word,
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter));
+ free(word);
+ return RET_ERROR;
+ }
+ detachedargument = NULL;
+ if (argument == NULL) {
+ r = config_getwordinline(iter, &detachedargument);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Log notifier option %s misses an argument in %s, line %u, column %u\n",
+ word, config_filename(iter),
+ config_line(iter),
+ config_column(iter));
+ free(word);
+ return RET_ERROR;
+ }
+ argument = detachedargument;
+ }
+ *value_p = atom_find(value_type, argument);
+ if (!atom_defined(*value_p)) {
+ fprintf(stderr,
+"Warning: unknown %s '%s', ignoring notificator line at line %u in %s\n",
+ atomtypes[value_type],
+ argument, config_line(iter),
+ config_filename(iter));
+ config_overline(iter);
+ free(detachedargument);
+ free(word);
+ return RET_NOTHING;
+ }
+ free(detachedargument);
+ free(word);
+ } else {
+ char *script;
+
+ if (n->changesacceptrule && atom_defined(n->architecture)) {
+ fprintf(stderr,
+"Error: --changes and --architecture cannot be combined! (line %u in '%s')\n",
+ config_markerline(iter), config_filename(iter));
+ return RET_ERROR;
+ }
+ if (n->changesacceptrule && atom_defined(n->component)) {
+ fprintf(stderr,
+"Error: --changes and --component cannot be combined! (line %u in %s)\n",
+ config_markerline(iter), config_filename(iter));
+ return RET_ERROR;
+ }
+ if (n->changesacceptrule && atom_defined(n->packagetype)) {
+ fprintf(stderr,
+"Error: --changes and --type cannot be combined! (line %u in %s)\n",
+ config_markerline(iter), config_filename(iter));
+ return RET_ERROR;
+ }
+
+ r = config_completeword(iter, c, &script);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ c = config_nextnonspaceinline(iter);
+ if (c != EOF) {
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Unexpected data at end of notifier after script name '%s'\n",
+ config_filename(iter),
+ config_line(iter), config_column(iter),
+ script);
+ free(script);
+ return RET_ERROR;
+ }
+ n->scriptname = configfile_expandname(script, script);
+ if (FAILEDTOALLOC(n->scriptname))
+ return RET_ERROR_OOM;
+ return RET_OK;
+ }
+ }
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Unexpected end of line: name of notifier script missing!\n",
+ config_filename(iter), config_line(iter), config_column(iter));
+ return RET_ERROR;
+}
+
+/*@null@*/ static struct notification_process {
+ /*@null@*/struct notification_process *next;
+ char **arguments;
+ /*@null@*/char *causingfile;
+ /*@null@*/char *causingrule;
+ /*@null@*/char *suitefrom;
+ /* data to send to the process */
+ size_t datalen, datasent;
+ /*@null@*/char *data;
+ /* process */
+ pid_t child;
+ int fd;
+} *processes = NULL;
+
+static void notification_process_free(/*@only@*/struct notification_process *p) {
+ char **a;
+
+ if (p->fd >= 0)
+ (void)close(p->fd);
+ for (a = p->arguments ; *a != NULL ; a++)
+ free(*a);
+ free(p->causingfile);
+ free(p->causingrule);
+ free(p->suitefrom);
+ free(p->arguments);
+ free(p->data);
+ free(p);
+}
+
+static int catchchildren(void) {
+ pid_t child;
+ int status;
+ struct notification_process *p, **pp;
+ int returned = 0;
+
+ /* to avoid stealing aptmethods.c children, only
+ * check for our children. (As not many run, that
+ * is no large overhead. */
+ pp = &processes;
+ while ((p=*pp) != NULL) {
+ if (p->child <= 0) {
+ pp = &p->next;
+ continue;
+ }
+
+ child = waitpid(p->child, &status, WNOHANG);
+ if (child == 0) {
+ pp = &p->next;
+ continue;
+ }
+ if (child < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error calling waitpid on notification child: %d=%s\n",
+ e, strerror(e));
+ /* but still handle the failed child: */
+ } else if (WIFSIGNALED(status)) {
+ fprintf(stderr,
+"Notification process '%s' killed with signal %d!\n",
+ p->arguments[0], WTERMSIG(status));
+ } else if (!WIFEXITED(status)) {
+ fprintf(stderr,
+"Notification process '%s' failed!\n",
+ p->arguments[0]);
+ } else if (WIFEXITED(status) && WEXITSTATUS(status) != 0) {
+ fprintf(stderr,
+"Notification process '%s' returned with exit code %d!\n",
+ p->arguments[0],
+ (int)(WEXITSTATUS(status)));
+ }
+ if (p->fd >= 0) {
+ (void)close(p->fd);
+ p->fd = -1;
+ }
+ p->child = 0;
+ *pp = p->next;
+ notification_process_free(p);
+ returned++;
+ }
+ return returned;
+}
+
+static void feedchildren(bool dowait) {
+ struct notification_process *p;
+ fd_set w;
+ int ret;
+ int number = 0;
+ struct timeval tv = {0, 0};
+
+ FD_ZERO(&w);
+ for (p = processes; p!= NULL ; p = p->next) {
+ if (p->child > 0 && p->fd >= 0 && p->datasent < p->datalen) {
+ FD_SET(p->fd, &w);
+ if (p->fd >= number)
+ number = p->fd + 1;
+ }
+ }
+ if (number == 0)
+ return;
+ ret = select(number, NULL, &w, NULL, dowait?NULL:&tv);
+ if (ret < 0) {
+ // TODO...
+ return;
+ }
+ for (p = processes; p != NULL ; p = p->next) {
+ if (p->child > 0 && p->fd >= 0 && FD_ISSET(p->fd, &w)) {
+ size_t tosent = p->datalen - p->datasent;
+ ssize_t sent;
+
+ if (tosent > (size_t)512)
+ tosent = 512;
+ sent = write(p->fd, p->data+p->datasent, 512);
+ if (sent < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error '%s' while sending data to '%s', sending SIGABRT to it!\n",
+ strerror(e),
+ p->arguments[0]);
+ (void)kill(p->child, SIGABRT);
+ }
+ p->datasent += sent;
+ if (p->datasent >= p->datalen) {
+ free(p->data);
+ p->data = NULL;
+ }
+ }
+ }
+}
+
+static size_t runningchildren(void) {
+ struct notification_process *p;
+ size_t running = 0;
+
+ p = processes;
+ while (p != NULL && p->child != 0) {
+ running ++;
+ p = p->next;
+ }
+ return running;
+}
+
+static retvalue startchild(void) {
+ struct notification_process *p;
+ pid_t child;
+ int filedes[2];
+ int ret;
+
+ p = processes;
+ while (p != NULL && p->child != 0)
+ p = p->next;
+ if (p == NULL)
+ return RET_NOTHING;
+ if (p->datalen > 0) {
+ ret = pipe(filedes);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error creating pipe: %d=%s!\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ p->fd = filedes[1];
+ } else {
+ p->fd = -1;
+ }
+ child = fork();
+ if (child == 0) {
+ if (p->datalen > 0) {
+ dup2(filedes[0], 0);
+ if (filedes[0] != 0)
+ (void)close(filedes[0]);
+ (void)close(filedes[1]);
+ }
+ /* Try to close all open fd but 0,1,2 */
+ closefrom(3);
+ sethookenvironment(p->causingfile, p->causingrule,
+ p->suitefrom, NULL);
+ (void)execv(p->arguments[0], p->arguments);
+ fprintf(stderr, "Error executing '%s': %s\n", p->arguments[0],
+ strerror(errno));
+ _exit(255);
+ }
+ if (p->datalen > 0) {
+ (void)close(filedes[0]);
+ markcloseonexec(p->fd);
+ }
+ if (child < 0) {
+ int e = errno;
+ fprintf(stderr, "Error forking: %d=%s!\n", e, strerror(e));
+ if (p->fd >= 0) {
+ (void)close(p->fd);
+ p->fd = -1;
+ }
+ return RET_ERRNO(e);
+ }
+ p->child = child;
+ if (p->datalen > 0) {
+ struct pollfd polldata;
+ ssize_t written;
+
+ polldata.fd = p->fd;
+ polldata.events = POLLOUT;
+ while (poll(&polldata, 1, 0) > 0) {
+ if ((polldata.revents & POLLNVAL) != 0) {
+ p->fd = -1;
+ return RET_ERROR;
+ }
+ if ((polldata.revents & POLLHUP) != 0) {
+ (void)close(p->fd);
+ p->fd = -1;
+ return RET_OK;
+ }
+ if ((polldata.revents & POLLOUT) != 0) {
+ size_t towrite = p->datalen - p->datasent;
+ if (towrite > (size_t)512)
+ towrite = 512;
+ written = write(p->fd,
+ p->data + p->datasent,
+ towrite);
+ if (written < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error '%s' while sending data to '%s', sending SIGABRT to it!\n",
+ strerror(e),
+ p->arguments[0]);
+ (void)kill(p->child, SIGABRT);
+ return RET_ERRNO(e);
+ }
+ p->datasent += written;
+ if (p->datasent >= p->datalen) {
+ free(p->data);
+ p->data = NULL;
+ ret = close(p->fd);
+ p->fd = -1;
+ if (ret != 0)
+ return RET_ERRNO(errno);
+ else
+ return RET_OK;
+ }
+ continue;
+ }
+ /* something to write but at the same time not,
+ * let's better stop here better */
+ return RET_OK;
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue notificator_enqueuechanges(struct notificator *n, const char *codename, const char *name, const char *version, const char *safefilename, /*@null@*/const char *filekey) {
+ size_t count, i, j;
+ char **arguments;
+ struct notification_process *p;
+
+ catchchildren();
+ feedchildren(false);
+ if (!n->changesacceptrule)
+ return RET_NOTHING;
+ if (limitation_missed(n->command, causingcommand)) {
+ return RET_NOTHING;
+ }
+ count = 6; /* script "accepted" codename name version safename */
+ if (filekey != NULL)
+ count++;
+ arguments = nzNEW(count + 1, char*);
+ if (FAILEDTOALLOC(arguments))
+ return RET_ERROR_OOM;
+ i = 0;
+ arguments[i++] = strdup(n->scriptname);
+ arguments[i++] = strdup("accepted");
+ arguments[i++] = strdup(codename);
+ arguments[i++] = strdup(name);
+ arguments[i++] = strdup(version);
+ arguments[i++] = strdup(safefilename);
+ if (filekey != NULL)
+ arguments[i++] = strdup(filekey);
+ assert (i == count);
+ arguments[i] = NULL;
+ for (i = 0 ; i < count ; i++)
+ if (FAILEDTOALLOC(arguments[i])) {
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ return RET_ERROR_OOM;
+ }
+ if (processes == NULL) {
+ p = NEW(struct notification_process);
+ processes = p;
+ } else {
+ p = processes;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = NEW(struct notification_process);
+ p = p->next;
+ }
+ if (FAILEDTOALLOC(p)) {
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ return RET_ERROR_OOM;
+ }
+ if (causingfile != NULL) {
+ p->causingfile = strdup(causingfile);
+ if (FAILEDTOALLOC(p->causingfile)) {
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ free(p);
+ return RET_ERROR_OOM;
+ }
+ } else
+ p->causingfile = NULL;
+ p->causingrule = NULL;
+ p->suitefrom = NULL;
+ p->arguments = arguments;
+ p->next = NULL;
+ p->child = 0;
+ p->fd = -1;
+ p->datalen = 0;
+ p->datasent = 0;
+ p->data = NULL;
+
+ if (runningchildren() < 1)
+ startchild();
+ return RET_OK;
+}
+
+static retvalue notificator_enqueue(struct notificator *n, struct target *target, const char *name, /*@null@*/const char *version, /*@null@*/const char *oldversion, /*@null@*/const struct strlist *filekeys, /*@null@*/const struct strlist *oldfilekeys, bool renotification, /*@null@*/const char *causingrule, /*@null@*/ const char *suitefrom) {
+ size_t count, i;
+ char **arguments;
+ const char *action = NULL;
+ struct notification_process *p;
+
+ catchchildren();
+ feedchildren(false);
+ if (n->changesacceptrule)
+ return RET_NOTHING;
+ // some day, some atom handling for those would be nice
+ if (limitation_missed(n->architecture, target->architecture)) {
+ if (runningchildren() < 1)
+ startchild();
+ return RET_NOTHING;
+ }
+ if (limitation_missed(n->component, target->component)) {
+ if (runningchildren() < 1)
+ startchild();
+ return RET_NOTHING;
+ }
+ if (limitation_missed(n->packagetype, target->packagetype)) {
+ if (runningchildren() < 1)
+ startchild();
+ return RET_NOTHING;
+ }
+ if (limitation_missed(n->command, causingcommand)) {
+ if (runningchildren() < 1)
+ startchild();
+ return RET_NOTHING;
+ }
+ count = 7; /* script action codename type component architecture */
+ if (version != NULL) {
+ action = "add";
+ count += 2; /* version and filekeylist marker */
+ if (filekeys != NULL)
+ count += filekeys->count;
+ }
+ if (oldversion != NULL) {
+ assert (!renotification);
+
+ if (action == NULL)
+ action = "remove";
+ else
+ action = "replace";
+
+ count += 2; /* version and filekeylist marker */
+ if (oldfilekeys != NULL)
+ count += oldfilekeys->count;
+ }
+ assert (action != NULL);
+ if (renotification)
+ action = "info";
+ arguments = nzNEW(count + 1, char*);
+ if (FAILEDTOALLOC(arguments))
+ return RET_ERROR_OOM;
+ i = 0;
+ arguments[i++] = strdup(n->scriptname);
+ arguments[i++] = strdup(action);
+ arguments[i++] = strdup(target->distribution->codename);
+ arguments[i++] = strdup(atoms_packagetypes[target->packagetype]);
+ arguments[i++] = strdup(atoms_components[target->component]);
+ arguments[i++] = strdup(atoms_architectures[target->architecture]);
+ arguments[i++] = strdup(name);
+ if (version != NULL)
+ arguments[i++] = strdup(version);
+ if (oldversion != NULL)
+ arguments[i++] = strdup(oldversion);
+ if (version != NULL) {
+ int j;
+ arguments[i++] = strdup("--");
+ if (filekeys != NULL)
+ for (j = 0 ; j < filekeys->count ; j++)
+ arguments[i++] = strdup(filekeys->values[j]);
+ }
+ if (oldversion != NULL) {
+ int j;
+ arguments[i++] = strdup("--");
+ if (oldfilekeys != NULL)
+ for (j = 0 ; j < oldfilekeys->count ; j++)
+ arguments[i++] = strdup(oldfilekeys->values[j]);
+ }
+ assert (i == count);
+ arguments[i] = NULL;
+ for (i = 0 ; i < count ; i++) {
+ size_t j;
+ if (FAILEDTOALLOC(arguments[i])) {
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ return RET_ERROR_OOM;
+ }
+ }
+ if (processes == NULL) {
+ p = NEW(struct notification_process);
+ processes = p;
+ } else {
+ p = processes;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = NEW(struct notification_process);
+ p = p->next;
+ }
+ if (FAILEDTOALLOC(p)) {
+ size_t j;
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ return RET_ERROR_OOM;
+ }
+ if (causingfile != NULL) {
+ size_t j;
+ p->causingfile = strdup(causingfile);
+ if (FAILEDTOALLOC(p->causingfile)) {
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ free(p);
+ return RET_ERROR_OOM;
+ }
+ } else
+ p->causingfile = NULL;
+ if (causingrule != NULL) {
+ size_t j;
+ p->causingrule = strdup(causingrule);
+ if (FAILEDTOALLOC(p->causingrule)) {
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ free(p->causingfile);
+ free(p);
+ return RET_ERROR_OOM;
+ }
+ } else
+ p->causingrule = NULL;
+ if (suitefrom != NULL) {
+ size_t j;
+ p->suitefrom = strdup(suitefrom);
+ if (FAILEDTOALLOC(p->suitefrom)) {
+ for (j = 0 ; j < count ; j++)
+ free(arguments[j]);
+ free(arguments);
+ free(p->causingfile);
+ free(p->causingrule);
+ free(p);
+ return RET_ERROR_OOM;
+ }
+ } else
+ p->suitefrom = NULL;
+ p->arguments = arguments;
+ p->next = NULL;
+ p->child = 0;
+ p->fd = -1;
+ p->datalen = 0;
+ p->datasent = 0;
+ p->data = NULL;
+ if (runningchildren() < 1)
+ startchild();
+ return RET_OK;
+}
+
+void logger_wait(void) {
+ while (processes != NULL) {
+ catchchildren();
+ if (interrupted())
+ break;
+ feedchildren(true);
+ // TODO: add option to start multiple at the same time
+ if (runningchildren() < 1)
+ startchild();
+ else {
+ struct timeval tv = { 0, 100 };
+ select(0, NULL, NULL, NULL, &tv);
+ }
+ }
+}
+
+void logger_warn_waiting(void) {
+ struct notification_process *p;
+
+ if (processes != NULL) {
+ (void)fputs(
+"WARNING: some notificator hooks were not run!\n"
+"(most likely due to receiving an interruption request)\n"
+"You will either have to run them by hand or run rerunnotifiers if\n"
+"you want the information they get to not be out of sync.\n"
+"Missed calls are:\n", stderr);
+ for (p = processes ; p != NULL ; p = p->next) {
+ char **c = p->arguments;
+ if (c == NULL)
+ continue;
+ while (*c != NULL) {
+ (void)fputc('"', stderr);
+ (void)fputs(*c, stderr);
+ (void)fputc('"', stderr);
+ c++;
+ if (*c != NULL)
+ (void)fputc(' ', stderr);
+ }
+ (void)fputc('\n', stderr);
+ }
+ }
+}
+
+struct logger {
+ /*@dependent@*//*@null@*/struct logfile *logfile;
+ size_t notificator_count;
+ struct notificator *notificators;
+};
+
+void logger_free(struct logger *logger) {
+ if (logger == NULL)
+ return;
+
+ if (logger->logfile != NULL)
+ logfile_dereference(logger->logfile);
+ if (logger->notificators != NULL) {
+ size_t i;
+
+ for (i = 0 ; i < logger->notificator_count ; i++)
+ notificator_done(&logger->notificators[i]);
+ free(logger->notificators);
+ }
+
+ free(logger);
+}
+
+retvalue logger_init(struct configiterator *iter, struct logger **logger_p) {
+ struct logger *n;
+ retvalue r;
+ char *logfilename;
+ bool havenotificators;
+
+ r = config_getfileinline(iter, &logfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING)
+ logfilename = NULL;
+ if (config_nextnonspaceinline(iter) != EOF) {
+ fprintf(stderr, "Error parsing %s, line %u, column %u:\n"
+ "Unexpected second filename for logfile.\n",
+ config_filename(iter), config_line(iter),
+ config_column(iter));
+ free(logfilename);
+ return RET_ERROR;
+ }
+ config_overline(iter);
+ havenotificators = config_nextline(iter);
+ if (!havenotificators && logfilename == NULL) {
+ *logger_p = NULL;
+ return RET_NOTHING;
+ }
+
+ n = NEW(struct logger);
+ if (FAILEDTOALLOC(n)) {
+ free(logfilename);
+ return RET_ERROR_OOM;
+ }
+ if (logfilename != NULL) {
+ assert (*logfilename != '\0');
+ r = logfile_reference(logfilename, &n->logfile);
+ if (RET_WAS_ERROR(r)) {
+ free(n);
+ return r;
+ }
+ } else
+ n->logfile = NULL;
+
+ n->notificators = NULL;
+ n->notificator_count = 0;
+
+ while (havenotificators) {
+ struct notificator *newnot;
+ newnot = realloc(n->notificators,
+ (n->notificator_count+1)
+ * sizeof(struct notificator));
+ if (FAILEDTOALLOC(newnot)) {
+ logger_free(n);
+ return RET_ERROR_OOM;
+ }
+ n->notificators = newnot;
+ r = notificator_parse(&n->notificators[n->notificator_count++],
+ iter);
+ if (RET_WAS_ERROR(r)) {
+ /* a bit ugly: also free the just failed item here */
+ logger_free(n);
+ return r;
+ }
+ if (r == RET_NOTHING)
+ n->notificator_count--;
+ // TODO assert eol here...
+ havenotificators = config_nextline(iter);
+ }
+ *logger_p = n;
+ return RET_OK;
+}
+
+retvalue logger_prepare(struct logger *logger) {
+ retvalue r;
+
+ if (logger->logfile == NULL)
+ return RET_NOTHING;
+
+ if (logger->logfile != NULL && logger->logfile->fd < 0) {
+ r = logfile_open(logger->logfile);
+ } else
+ r = RET_OK;
+ return r;
+}
+bool logger_isprepared(/*@null@*/const struct logger *logger) {
+ if (logger == NULL)
+ return true;
+ if (logger->logfile != NULL && logger->logfile->fd < 0)
+ return false;
+ return true;
+}
+
+void logger_log(struct logger *log, struct target *target, const char *name, const char *version, const char *oldversion, const struct strlist *filekeys, const struct strlist *oldfilekeys, const char *causingrule, const char *suitefrom) {
+ size_t i;
+
+ assert (name != NULL);
+
+ assert (version != NULL || oldversion != NULL);
+
+ if (log->logfile != NULL)
+ logfile_write(log->logfile, target, name, version, oldversion);
+ for (i = 0 ; i < log->notificator_count ; i++) {
+ notificator_enqueue(&log->notificators[i], target,
+ name, version, oldversion,
+ filekeys, oldfilekeys, false,
+ causingrule, suitefrom);
+ }
+}
+
+void logger_logchanges(struct logger *log, const char *codename, const char *name, const char *version, const char *safefilename, const char *changesfilekey) {
+ size_t i;
+
+ assert (name != NULL);
+ assert (version != NULL);
+
+ if (log == NULL)
+ return;
+
+ for (i = 0 ; i < log->notificator_count ; i++) {
+ notificator_enqueuechanges(&log->notificators[i], codename,
+ name, version, safefilename,
+ changesfilekey);
+ }
+}
+
+bool logger_rerun_needs_target(const struct logger *logger, const struct target *target) {
+ size_t i;
+ struct notificator *n;
+
+ for (i = 0 ; i < logger->notificator_count ; i++) {
+ n = &logger->notificators[i];
+
+ if (limitation_missed(n->architecture, target->architecture))
+ continue;
+ if (limitation_missed(n->component, target->component))
+ continue;
+ if (limitation_missed(n->packagetype, target->packagetype))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+retvalue logger_reruninfo(struct logger *logger, struct target *target, const char *name, const char *version, /*@null@*/const struct strlist *filekeys) {
+ retvalue result, r;
+ size_t i;
+
+ assert (name != NULL);
+ assert (version != NULL);
+
+ result = RET_NOTHING;
+
+ for (i = 0 ; i < logger->notificator_count ; i++) {
+ r = notificator_enqueue(&logger->notificators[i], target,
+ name, version, NULL,
+ filekeys, NULL, true,
+ NULL, NULL);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
diff --git a/log.h b/log.h
new file mode 100644
index 0000000..4d0faf7
--- /dev/null
+++ b/log.h
@@ -0,0 +1,30 @@
+#ifndef REPREPRO_LOG_H
+#define REPREPRO_LOG_H
+
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_HOOKS_H
+#include "hooks.h"
+#endif
+
+struct target;
+struct logger;
+
+retvalue logger_init(struct configiterator *, /*@out@*/struct logger **);
+void logger_free(/*@only@*/struct logger *);
+
+retvalue logger_prepare(struct logger *logger);
+bool logger_isprepared(/*@null@*/const struct logger *logger);
+
+void logger_logchanges(struct logger *, const char * /*codename*/, const char * /*name*/, const char * /*version*/, const char * /*safefilename*/, /*@null@*/const char * /*changesfilekey*/);
+
+void logger_log(struct logger *, struct target *, const char * /*name*/, /*@null@*/const char * /*version*/, /*@null@*/const char */*oldversion*/, /*@null@*/const struct strlist * /*filekeys*/, /*@null@*/const struct strlist * /*oldfilekeys*/, /*@null@*/const char * /*causingrule*/, /*@null@*/const char * /*suitefrom*/);
+
+bool logger_rerun_needs_target(const struct logger *, const struct target *);
+retvalue logger_reruninfo(struct logger *, struct target *, const char * /*name*/, const char * /*version*/, /*@null@*/const struct strlist * /*filekeys*/);
+
+/* wait for all jobs to finish */
+void logger_wait(void);
+void logger_warn_waiting(void);
+#endif
diff --git a/main.c b/main.c
new file mode 100644
index 0000000..ad3f61a
--- /dev/null
+++ b/main.c
@@ -0,0 +1,5355 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008,2009,2011,2012,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <limits.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <string.h>
+#include <strings.h>
+#include <fcntl.h>
+#include <signal.h>
+#include "error.h"
+#define DEFINE_IGNORE_VARIABLES
+#include "ignore.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "atoms.h"
+#include "dirs.h"
+#include "names.h"
+#include "filecntl.h"
+#include "files.h"
+#include "filelist.h"
+#include "target.h"
+#include "reference.h"
+#include "binaries.h"
+#include "sources.h"
+#include "release.h"
+#include "aptmethod.h"
+#include "updates.h"
+#include "pull.h"
+#include "upgradelist.h"
+#include "signature.h"
+#include "debfile.h"
+#include "checkindeb.h"
+#include "checkindsc.h"
+#include "checkin.h"
+#include "downloadcache.h"
+#include "termdecide.h"
+#include "tracking.h"
+#include "optionsfile.h"
+#include "dpkgversions.h"
+#include "incoming.h"
+#include "override.h"
+#include "log.h"
+#include "copypackages.h"
+#include "uncompression.h"
+#include "sourceextraction.h"
+#include "pool.h"
+#include "printlistformat.h"
+#include "globmatch.h"
+#include "needbuild.h"
+#include "archallflood.h"
+#include "sourcecheck.h"
+#include "uploaderslist.h"
+#include "sizes.h"
+#include "filterlist.h"
+#include "descriptions.h"
+#include "outhook.h"
+#include "package.h"
+
+#ifndef STD_BASE_DIR
+#define STD_BASE_DIR "."
+#endif
+#ifndef STD_METHOD_DIR
+#define STD_METHOD_DIR "/usr/lib/apt/methods"
+#endif
+
+#ifndef LLONG_MAX
+#define LLONG_MAX __LONG_LONG_MAX__
+#endif
+
+/* global options available to the rest */
+struct global_config global;
+
+/* global options */
+static char /*@only@*/ /*@notnull@*/ // *g*
+ *x_basedir = NULL,
+ *x_outdir = NULL,
+ *x_distdir = NULL,
+ *x_dbdir = NULL,
+ *x_listdir = NULL,
+ *x_confdir = NULL,
+ *x_logdir = NULL,
+ *x_morguedir = NULL,
+ *x_methoddir = NULL;
+static char /*@only@*/ /*@null@*/
+ *x_section = NULL,
+ *x_priority = NULL,
+ *x_component = NULL,
+ *x_architecture = NULL,
+ *x_packagetype = NULL;
+static char /*@only@*/ /*@null@*/ *listformat = NULL;
+static char /*@only@*/ /*@null@*/ *endhook = NULL;
+static char /*@only@*/ /*@null@*/ *outhook = NULL;
+static char /*@only@*/
+ *gunzip = NULL,
+ *bunzip2 = NULL,
+ *unlzma = NULL,
+ *unxz = NULL,
+ *lunzip = NULL,
+ *unzstd = NULL,
+ *gnupghome = NULL;
+static int listmax = -1;
+static int listskip = 0;
+static int delete = D_COPY;
+static bool nothingiserror = false;
+static bool nolistsdownload = false;
+static bool keepunreferenced = false;
+static bool keepunusednew = false;
+static bool askforpassphrase = false;
+static bool guessgpgtty = true;
+static bool skipold = true;
+static size_t waitforlock = 0;
+static enum exportwhen export = EXPORT_CHANGED;
+int verbose = 0;
+static bool fast = false;
+static bool verbosedatabase = false;
+static enum spacecheckmode spacecheckmode = scm_FULL;
+/* default: 100 MB for database to grow */
+static off_t reserveddbspace = 1024*1024*100
+/* 1MB safety margin for other filesystems */;
+static off_t reservedotherspace = 1024*1024;
+
+/* define for each config value an owner, and only higher owners are allowed
+ * to change something owned by lower owners. */
+static enum config_option_owner config_state,
+#define O(x) owner_ ## x = CONFIG_OWNER_DEFAULT
+O(fast), O(x_morguedir), O(x_outdir), O(x_basedir), O(x_distdir), O(x_dbdir), O(x_listdir), O(x_confdir), O(x_logdir), O(x_methoddir), O(x_section), O(x_priority), O(x_component), O(x_architecture), O(x_packagetype), O(nothingiserror), O(nolistsdownload), O(keepunusednew), O(keepunreferenced), O(keeptemporaries), O(keepdirectories), O(askforpassphrase), O(skipold), O(export), O(waitforlock), O(spacecheckmode), O(reserveddbspace), O(reservedotherspace), O(guessgpgtty), O(verbosedatabase), O(gunzip), O(bunzip2), O(unlzma), O(unxz), O(lunzip), O(unzstd), O(gnupghome), O(listformat), O(listmax), O(listskip), O(onlysmalldeletes), O(endhook), O(outhook);
+#undef O
+
+#define CONFIGSET(variable, value) if (owner_ ## variable <= config_state) { \
+ owner_ ## variable = config_state; \
+ variable = value; }
+#define CONFIGGSET(variable, value) if (owner_ ## variable <= config_state) { \
+ owner_ ## variable = config_state; \
+ global.variable = value; }
+#define CONFIGDUP(variable, value) if (owner_ ## variable <= config_state) { \
+ owner_ ## variable = config_state; \
+ free(variable); \
+ variable = strdup(value); \
+ if (FAILEDTOALLOC(variable)) { \
+ (void)fputs("Out of Memory!", \
+ stderr); \
+ exit(EXIT_FAILURE); \
+ } }
+
+#define y(type, name) type name
+#define n(type, name) UNUSED(type dummy_ ## name)
+
+#define ACTION_N(act, sp, args, name) static retvalue action_n_ ## act ## _ ## sp ## _ ## name ( \
+ UNUSED(struct distribution *dummy2), \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ int argc, args(const char *, argv[]))
+
+#define ACTION_C(act, sp, a, name) static retvalue action_c_ ## act ## _ ## sp ## _ ## name ( \
+ struct distribution *alldistributions, \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ a(int, argc), a(const char *, argv[]))
+
+#define ACTION_B(act, sp, u, name) static retvalue action_b_ ## act ## _ ## sp ## _ ## name ( \
+ u(struct distribution *, alldistributions), \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ int argc, const char *argv[])
+
+#define ACTION_L(act, sp, u, args, name) static retvalue action_l_ ## act ## _ ## sp ## _ ## name ( \
+ struct distribution *alldistributions, \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ int argc, args(const char *, argv[]))
+
+#define ACTION_R(act, sp, d, a, name) static retvalue action_r_ ## act ## _ ## sp ## _ ## name ( \
+ d(struct distribution *, alldistributions), \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ a(int, argc), a(const char *, argv[]))
+
+#define ACTION_T(act, sp, name) static retvalue action_t_ ## act ## _ ## sp ## _ ## name ( \
+ UNUSED(struct distribution *ddummy), \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ UNUSED(int argc), UNUSED(const char *dummy4[]))
+
+#define ACTION_F(act, sp, d, a, name) static retvalue action_f_ ## act ## _ ## sp ## _ ## name ( \
+ d(struct distribution *, alldistributions), \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ a(int, argc), a(const char *, argv[]))
+
+#define ACTION_RF(act, sp, ud, u, name) static retvalue action_rf_ ## act ## _ ## sp ## _ ## name ( \
+ ud(struct distribution *, alldistributions), \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ u(int, argc), u(const char *, argv[]))
+
+#define ACTION_D(act, sp, u, name) static retvalue action_d_ ## act ## _ ## sp ## _ ## name ( \
+ struct distribution *alldistributions, \
+ sp(const char *, section), \
+ sp(const char *, priority), \
+ act(const struct atomlist *, architectures), \
+ act(const struct atomlist *, components), \
+ act(const struct atomlist *, packagetypes), \
+ u(int, argc), u(const char *, argv[]))
+
+static retvalue splitnameandversion(const char *nameandversion, const char **name_p, const char **version_p) {
+ char *version;
+ retvalue r;
+
+ version = index(nameandversion, '=');
+ if (version != NULL) {
+ if (index(version+1, '=') != NULL) {
+ fprintf(stderr,
+"Cannot parse '%s': more than one '='\n",
+ nameandversion);
+ *name_p = NULL;
+ *version_p = NULL;
+ r = RET_ERROR;
+ } else if (version[1] == '\0') {
+ fprintf(stderr,
+"Cannot parse '%s': no version after '='\n",
+ nameandversion);
+ *name_p = NULL;
+ *version_p = NULL;
+ r = RET_ERROR;
+ } else if (version == nameandversion) {
+ fprintf(stderr,
+"Cannot parse '%s': no source name found before the '='\n",
+ nameandversion);
+ *name_p = NULL;
+ *version_p = NULL;
+ r = RET_ERROR;
+ } else {
+ *name_p = strndup(nameandversion, version - nameandversion);
+ if (FAILEDTOALLOC(*name_p))
+ r = RET_ERROR_OOM;
+ else
+ r = RET_OK;
+ *version_p = version + 1;
+ }
+ } else {
+ r = RET_OK;
+ *name_p = nameandversion;
+ *version_p = NULL;
+ }
+ return r;
+}
+
+static inline void splitnameandversion_done(const char **name_p, const char **version_p) {
+ // In case version_p points to a non-NULL value, name_p needs to be freed after usage.
+ if (*version_p != NULL) {
+ free((char*)*name_p);
+ *name_p = NULL;
+ }
+}
+
+ACTION_N(n, n, y, printargs) {
+ int i;
+
+ fprintf(stderr, "argc: %d\n", argc);
+ for (i=0 ; i < argc ; i++) {
+ fprintf(stderr, "%s\n", argv[i]);
+ }
+ return RET_OK;
+}
+
+ACTION_N(n, n, n, dumpuncompressors) {
+ enum compression c;
+
+ assert (argc == 1);
+ for (c = 0 ; c < c_COUNT ; c++) {
+ if (c == c_none)
+ continue;
+ printf("%s: ", uncompression_suffix[c]);
+ if (uncompression_builtin(c)) {
+ if (extern_uncompressors[c] != NULL)
+ printf("built-in + '%s'\n",
+ extern_uncompressors[c]);
+ else
+ printf("built-in\n");
+ } else if (extern_uncompressors[c] != NULL)
+ printf("'%s'\n", extern_uncompressors[c]);
+ else switch (c) {
+ case c_bzip2:
+ printf(
+"not supported (install bzip2 or use --bunzip2 to tell where bunzip2 is).\n");
+
+ break;
+ case c_lzma:
+ printf(
+"not supported (install lzma or use --unlzma to tell where unlzma is).\n");
+ break;
+ case c_xz:
+ printf(
+"not supported (install xz-utils or use --unxz to tell where unxz is).\n");
+ break;
+ case c_lunzip:
+ printf(
+"not supported (install lzip or use --lunzip to tell where lunzip is).\n");
+ break;
+ case c_zstd:
+ printf(
+"not supported (install unzstd or use --unzstd to tell where unzstd is).\n");
+ break;
+ default:
+ printf("not supported\n");
+ }
+ }
+ return RET_OK;
+}
+ACTION_N(n, n, y, uncompress) {
+ enum compression c;
+
+ assert (argc == 4);
+ c = c_none + 1;
+ while (c < c_COUNT && strcmp(argv[1], uncompression_suffix[c]) != 0)
+ c++;
+ if (c >= c_COUNT) {
+ fprintf(stderr, "Unknown compression format '%s'\n", argv[1]);
+ return RET_ERROR;
+ }
+ if (!uncompression_supported(c)) {
+ fprintf(stderr,
+"Cannot uncompress format '%s'\nCheck __dumpuncompressors for more details.\n",
+ argv[1]);
+ return RET_ERROR;
+ }
+ return uncompress_file(argv[2], argv[3], c);
+}
+
+ACTION_N(n, n, y, extractcontrol) {
+ retvalue result;
+ char *control;
+
+ assert (argc == 2);
+
+ result = extractcontrol(&control, argv[1]);
+
+ if (RET_IS_OK(result)) {
+ puts(control);
+ free(control);
+ }
+ return result;
+}
+
+ACTION_N(n, n, y, extractfilelist) {
+ retvalue result;
+ char *filelist;
+ size_t fls, len;
+ size_t lengths[256];
+ const unsigned char *dirs[256];
+ int depth = 0, i, j;
+
+ assert (argc == 2);
+
+ result = getfilelist(&filelist, &fls, argv[1]);
+ if (RET_IS_OK(result)) {
+ const unsigned char *p = (unsigned char*)filelist;
+ while (*p != '\0') {
+ unsigned char c = *(p++);
+ if (c > 2) {
+ if (depth >= c)
+ depth -= c;
+ else
+ depth = 0;
+ } else if (c == 2) {
+ len = 0;
+ while (*p == 255) {
+ len +=255;
+ p++;
+ }
+ len += *(p++);
+ lengths[depth] = len;
+ dirs[depth++] = p;
+ p += len;
+ } else {
+ len = 0;
+ while (*p == 255) {
+ len +=255;
+ p++;
+ }
+ len += *(p++);
+ (void)putchar('/');
+ for (i = 0 ; i < depth ; i++) {
+ const unsigned char *n = dirs[i];
+ j = lengths[i];
+ while (j-- > 0)
+ (void)putchar(*(n++));
+ (void)putchar('/');
+ }
+ while (len-- > 0)
+ (void)putchar(*(p++));
+ (void)putchar('\n');
+ }
+ }
+ free(filelist);
+ }
+ return result;
+}
+
+ACTION_N(n, n, y, extractsourcesection) {
+ struct dsc_headers dsc;
+ struct sourceextraction *extraction;
+ char *section = NULL, *priority = NULL, *directory, *filename;
+ retvalue result, r;
+ bool broken;
+ int i;
+
+ assert (argc == 2);
+
+ r = sources_readdsc(&dsc, argv[1], argv[1], &broken);
+ if (!RET_IS_OK(r))
+ return r;
+ if (broken && !IGNORING(brokensignatures,
+"'%s' contains only broken signatures.\n"
+"This most likely means the file was damaged or edited improperly\n",
+ argv[1]))
+ return RET_ERROR;
+ r = dirs_getdirectory(argv[1], &directory);
+ if (RET_WAS_ERROR(r)) {
+ sources_done(&dsc);
+ return r;
+ }
+ assert (RET_IS_OK(r));
+
+ extraction = sourceextraction_init(&section, &priority);
+ if (FAILEDTOALLOC(extraction)) {
+ sources_done(&dsc);
+ return RET_ERROR_OOM;
+ }
+ for (i = 0 ; i < dsc.files.names.count ; i ++)
+ sourceextraction_setpart(extraction, i,
+ dsc.files.names.values[i]);
+ result = RET_OK;
+ while (sourceextraction_needs(extraction, &i)) {
+ filename = calc_dirconcat(directory, dsc.files.names.values[i]);
+ if (FAILEDTOALLOC(filename)) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ r = sourceextraction_analyse(extraction, filename);
+ free(filename);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ }
+ free(directory);
+ if (RET_WAS_ERROR(result)) {
+ sourceextraction_abort(extraction);
+ } else {
+ r = sourceextraction_finish(extraction);
+ RET_UPDATE(result, r);
+ }
+ if (RET_IS_OK(result)) {
+ if (section != NULL)
+ printf("Section: %s\n", section);
+ if (priority != NULL)
+ printf("Priority: %s\n", priority);
+ }
+ sources_done(&dsc);
+ free(section);
+ free(priority);
+ return result;
+}
+
+ACTION_F(n, n, n, y, fakeemptyfilelist) {
+ assert (argc == 2);
+ return fakefilelist(argv[1]);
+}
+
+ACTION_F(n, n, n, y, generatefilelists) {
+ assert (argc == 2 || argc == 3);
+
+ if (argc == 2)
+ return files_regenerate_filelist(false);
+ if (strcmp(argv[1], "reread") == 0)
+ return files_regenerate_filelist(true);
+
+ fprintf(stderr, "Error: Unrecognized second argument '%s'\n"
+ "Syntax: reprepro generatefilelists [reread]\n",
+ argv[1]);
+ return RET_ERROR;
+}
+
+ACTION_T(n, n, translatefilelists) {
+ return database_translate_filelists();
+}
+
+ACTION_N(n, n, n, translatelegacychecksums) {
+
+ assert (argc == 1);
+
+ return database_translate_legacy_checksums(
+ verbosedatabase || verbose > 10);
+}
+
+
+ACTION_F(n, n, n, n, addmd5sums) {
+ char buffer[2000], *c, *m;
+ retvalue result, r;
+
+ result = RET_NOTHING;
+
+ while (fgets(buffer, 1999, stdin) != NULL) {
+ struct checksums *checksums;
+
+ c = strchr(buffer, '\n');
+ if (c == NULL) {
+ fprintf(stderr, "Line too long\n");
+ return RET_ERROR;
+ }
+ *c = '\0';
+ m = strchr(buffer, ' ');
+ if (m == NULL) {
+ fprintf(stderr, "Malformed line\n");
+ return RET_ERROR;
+ }
+ *m = '\0'; m++;
+ if (*m == '\0') {
+ fprintf(stderr, "Malformed line\n");
+ return RET_ERROR;
+ }
+ r = checksums_setall(&checksums, m, strlen(m));
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = files_add_checksums(buffer, checksums);
+ RET_UPDATE(result, r);
+ checksums_free(checksums);
+
+ }
+ return result;
+}
+
+
+ACTION_R(n, n, n, y, removereferences) {
+ assert (argc == 2);
+ return references_remove(argv[1]);
+}
+
+ACTION_R(n, n, n, y, removereference) {
+ assert (argc == 3);
+ return references_decrement(argv[2], argv[1]);
+}
+
+ACTION_R(n, n, n, n, dumpreferences) {
+ return references_dump();
+}
+
+static retvalue checkifreferenced(UNUSED(void *data), const char *filekey) {
+ retvalue r;
+
+ r = references_isused(filekey);
+ if (r == RET_NOTHING) {
+ printf("%s\n", filekey);
+ return RET_OK;
+ } else if (RET_IS_OK(r)) {
+ return RET_NOTHING;
+ } else
+ return r;
+}
+
+ACTION_RF(n, n, n, n, dumpunreferenced) {
+ retvalue result;
+
+ result = files_foreach(checkifreferenced, NULL);
+ return result;
+}
+
+static retvalue deleteifunreferenced(UNUSED(void *data), const char *filekey) {
+ retvalue r;
+
+ r = references_isused(filekey);
+ if (r == RET_NOTHING) {
+ r = pool_delete(filekey);
+ return r;
+ } else if (RET_IS_OK(r)) {
+ return RET_NOTHING;
+ } else
+ return r;
+}
+
+ACTION_RF(n, n, n, n, deleteunreferenced) {
+ retvalue result;
+
+ if (keepunreferenced) {
+ if (owner_keepunreferenced == CONFIG_OWNER_CMDLINE)
+ fprintf(stderr,
+"Calling deleteunreferenced with --keepunreferencedfiles does not really make sense, does it?\n");
+ else
+ fprintf(stderr,
+"Error: deleteunreferenced called with option\n"
+"'keepunreferencedfiles' activated. Please run\n"
+"'reprepro --nokeepunreferencedfiles deleteunreferenced',\n"
+"if you are sure you want to delete those files.\n");
+ return RET_ERROR;
+ }
+ result = files_foreach(deleteifunreferenced, NULL);
+ return result;
+}
+
+ACTION_RF(n, n, n, y, deleteifunreferenced) {
+ char buffer[5000], *nl;
+ int i;
+ retvalue r, ret;
+
+ ret = RET_NOTHING;
+ if (argc > 1) {
+ for (i = 1 ; i < argc ; i++) {
+ r = deleteifunreferenced(NULL, argv[i]);
+ RET_UPDATE(ret, r);
+ if (r == RET_NOTHING && verbose >= 0)
+ fprintf(stderr, "Not removing '%s'\n",
+ argv[i]);
+ }
+
+ } else
+ while (fgets(buffer, 4999, stdin) != NULL) {
+ nl = strchr(buffer, '\n');
+ if (nl == NULL) {
+ return RET_ERROR;
+ }
+ *nl = '\0';
+ r = deleteifunreferenced(NULL, buffer);
+ RET_UPDATE(ret, r);
+ if (r == RET_NOTHING && verbose >= 0)
+ fprintf(stderr, "Not removing '%s'\n",
+ buffer);
+ }
+ return ret;
+}
+
+ACTION_R(n, n, n, y, addreference) {
+ assert (argc == 2 || argc == 3);
+ return references_increment(argv[1], argv[2]);
+}
+
+ACTION_R(n, n, n, y, addreferences) {
+ char buffer[5000], *nl;
+ int i;
+ retvalue r, ret;
+
+ ret = RET_NOTHING;
+
+ if (argc > 2) {
+ for (i = 2 ; i < argc ; i++) {
+ const char *filename = argv[i];
+ r = references_increment(filename, argv[1]);
+ RET_UPDATE(ret, r);
+ }
+ } else {
+ while (fgets(buffer, 4999, stdin) != NULL) {
+ nl = strchr(buffer, '\n');
+ if (nl == NULL) {
+ return RET_ERROR;
+ }
+ *nl = '\0';
+ r = references_increment(buffer, argv[1]);
+ RET_UPDATE(ret, r);
+ }
+ }
+
+ return ret;
+}
+
+static retvalue remove_from_target(struct distribution *distribution, struct trackingdata *trackingdata, struct target *target, int count, struct nameandversion *nameandversion, int *remaining) {
+ retvalue result, r;
+ int i;
+
+ result = RET_NOTHING;
+ for (i = 0 ; i < count ; i++){
+ r = target_removepackage(target, distribution->logger,
+ nameandversion[i].name, nameandversion[i].version, trackingdata);
+ RET_UPDATE(distribution->status, r);
+ if (RET_IS_OK(r)) {
+ if (!nameandversion[i].found)
+ (*remaining)--;
+ nameandversion[i].found = true;
+ }
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+ACTION_D(y, n, y, remove) {
+ retvalue result, r;
+ struct distribution *distribution;
+ struct nameandversion data[argc-2];
+ struct target *t;
+ char *delimiter;
+ int remaining;
+
+ trackingdb tracks;
+ struct trackingdata trackingdata;
+
+ r = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Cannot remove packages from read-only distribution '%s'\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ r = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->tracking != dt_NONE) {
+ r = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ r = trackingdata_new(tracks, &trackingdata);
+ if (RET_WAS_ERROR(r)) {
+ (void)tracking_done(tracks, distribution);
+ return r;
+ }
+ }
+
+ for (int i = 0 ; i < argc-2 ; i++) {
+ data[i].found = false;
+ r = splitnameandversion(argv[2 + i], &data[i].name, &data[i].version);
+ if (RET_WAS_ERROR(r)) {
+ for (i-- ; i >= 0 ; i--) {
+ splitnameandversion_done(&data[i].name, &data[i].version);
+ }
+ return r;
+ }
+ }
+
+ remaining = argc-2;
+ result = RET_NOTHING;
+ for (t = distribution->targets ; t != NULL ; t = t->next) {
+ if (!target_matches(t, components, architectures, packagetypes))
+ continue;
+ r = target_initpackagesdb(t, READWRITE);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ r = remove_from_target(distribution,
+ (distribution->tracking != dt_NONE)
+ ? &trackingdata
+ : NULL,
+ t, argc-2, data,
+ &remaining);
+ RET_UPDATE(result, r);
+ r = target_closepackagesdb(t);
+ RET_UPDATE(distribution->status, r);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+
+ if (distribution->tracking != dt_NONE) {
+ if (RET_WAS_ERROR(result))
+ trackingdata_done(&trackingdata);
+ else
+ trackingdata_finish(tracks, &trackingdata);
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ }
+ if (verbose >= 0 && !RET_WAS_ERROR(result) && remaining > 0) {
+ int i = argc - 2;
+
+ (void)fputs("Not removed as not found: ", stderr);
+ delimiter = "";
+ for (i = 0; i < argc - 2; i++) {
+ if (!data[i].found) {
+ if (data[i].version == NULL) {
+ fprintf(stderr, "%s%s", delimiter, data[i].name);
+ } else {
+ fprintf(stderr, "%s%s=%s", delimiter, data[i].name, data[i].version);
+ }
+ remaining--;
+ delimiter = ", ";
+ }
+ }
+ (void)fputc('\n', stderr);
+ }
+ for (int i = 0; i < argc - 2; i++) {
+ splitnameandversion_done(&data[i].name, &data[i].version);
+ }
+ return result;
+}
+
+struct removesrcdata {
+ const char *sourcename;
+ const char /*@null@*/ *sourceversion;
+ bool found;
+};
+
+static retvalue package_source_fits(struct package *package, void *data) {
+ struct removesrcdata *d = data;
+ retvalue r;
+
+ r = package_getsource(package);
+ if (!RET_IS_OK(r))
+ return r;
+ for (; d->sourcename != NULL ; d++) {
+ if (strcmp(package->source, d->sourcename) != 0)
+ continue;
+ if (d->sourceversion == NULL)
+ break;
+ if (strcmp(package->sourceversion, d->sourceversion) == 0)
+ break;
+ }
+ if (d->sourcename == NULL)
+ return RET_NOTHING;
+ else {
+ d->found = true;
+ return RET_OK;
+ }
+}
+
+static retvalue remove_packages(struct distribution *distribution, struct removesrcdata *toremove) {
+ trackingdb tracks;
+ retvalue result, r;
+
+ r = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->tracking != dt_NONE) {
+ r = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (r == RET_NOTHING)
+ tracks = NULL;
+ } else
+ tracks = NULL;
+ result = RET_NOTHING;
+ if (tracks != NULL) {
+ result = RET_NOTHING;
+ for (; toremove->sourcename != NULL ; toremove++) {
+ r = tracking_removepackages(tracks, distribution,
+ toremove->sourcename,
+ toremove->sourceversion);
+ RET_UPDATE(result, r);
+ if (r == RET_NOTHING) {
+ if (verbose >= -2) {
+ if (toremove->sourceversion == NULL)
+ fprintf(stderr,
+"Nothing about source package '%s' found in the tracking data of '%s'!\n"
+"This either means nothing from this source in this version is there,\n"
+"or the tracking information might be out of date.\n",
+ toremove->sourcename,
+ distribution->codename);
+ else
+ fprintf(stderr,
+"Nothing about '%s' version '%s' found in the tracking data of '%s'!\n"
+"This either means nothing from this source in this version is there,\n"
+"or the tracking information might be out of date.\n",
+ toremove->sourcename,
+ toremove->sourceversion,
+ distribution->codename);
+ }
+ }
+ }
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+ }
+ return package_remove_each(distribution,
+ // TODO: why not arch comp pt here?
+ NULL, NULL, NULL,
+ package_source_fits, NULL,
+ toremove);
+}
+
+ACTION_D(n, n, y, removesrc) {
+ retvalue r;
+ struct distribution *distribution;
+ struct removesrcdata data[2];
+
+ r = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Error: Cannot remove packages from read-only distribution '%s'\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ data[0].found = false;
+ data[0].sourcename = argv[2];
+ if (argc <= 3)
+ data[0].sourceversion = NULL;
+ else
+ data[0].sourceversion = argv[3];
+ if (index(data[0].sourcename, '=') != NULL && verbose >= 0) {
+ fputs(
+"Warning: removesrc treats '=' as normal character.\n"
+"Did you want to use removesrcs?\n",
+ stderr);
+ }
+ data[1].sourcename = NULL;
+ data[1].sourceversion = NULL;
+ return remove_packages(distribution, data);
+}
+
+ACTION_D(n, n, y, removesrcs) {
+ retvalue r;
+ struct distribution *distribution;
+ struct removesrcdata data[argc-1];
+ int i;
+
+ r = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Error: Cannot remove packages from read-only distribution '%s'\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+ for (i = 0 ; i < argc-2 ; i++) {
+ data[i].found = false;
+ r = splitnameandversion(argv[2 + i], &data[i].sourcename, &data[i].sourceversion);
+ if (RET_WAS_ERROR(r)) {
+ for (i--; i >= 0; i--) {
+ splitnameandversion_done(&data[i].sourcename, &data[i].sourceversion);
+ }
+ return r;
+ }
+ }
+ data[i].sourcename = NULL;
+ data[i].sourceversion= NULL;
+ r = remove_packages(distribution, data);
+ for (i = 0 ; i < argc-2 ; i++) {
+ if (verbose >= 0 && !data[i].found) {
+ if (data[i].sourceversion != NULL)
+ fprintf(stderr,
+"No package from source '%s', version '%s' found.\n",
+ data[i].sourcename,
+ data[i].sourceversion);
+ else
+ fprintf(stderr,
+"No package from source '%s' (any version) found.\n",
+ data[i].sourcename);
+ }
+ splitnameandversion_done(&data[i].sourcename, &data[i].sourceversion);
+ }
+ return r;
+}
+
+static retvalue package_matches_condition(struct package *package, void *data) {
+ term *condition = data;
+
+ return term_decidepackage(condition, package, package->target);
+}
+
+ACTION_D(y, n, y, removefilter) {
+ retvalue result, r;
+ struct distribution *distribution;
+ trackingdb tracks;
+ struct trackingdata trackingdata;
+ term *condition;
+
+ assert (argc == 3);
+
+ r = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Error: Cannot remove packages from read-only distribution '%s'\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ result = term_compilefortargetdecision(&condition, argv[2]);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ r = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(r)) {
+ term_free(condition);
+ return r;
+ }
+
+ if (distribution->tracking != dt_NONE) {
+ r = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(r)) {
+ term_free(condition);
+ return r;
+ }
+ if (r == RET_NOTHING)
+ tracks = NULL;
+ else {
+ r = trackingdata_new(tracks, &trackingdata);
+ if (RET_WAS_ERROR(r)) {
+ (void)tracking_done(tracks, distribution);
+ term_free(condition);
+ return r;
+ }
+ }
+ } else
+ tracks = NULL;
+
+ result = package_remove_each(distribution,
+ components, architectures, packagetypes,
+ package_matches_condition,
+ (tracks != NULL)?&trackingdata:NULL,
+ condition);
+ if (tracks != NULL) {
+ trackingdata_finish(tracks, &trackingdata);
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ }
+ term_free(condition);
+ return result;
+}
+
+static retvalue package_matches_glob(struct package *package, void *data) {
+ if (globmatch(package->name, data))
+ return RET_OK;
+ else
+ return RET_NOTHING;
+}
+
+ACTION_D(y, n, y, removematched) {
+ retvalue result, r;
+ struct distribution *distribution;
+ trackingdb tracks;
+ struct trackingdata trackingdata;
+
+ assert (argc == 3);
+
+ r = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Error: Cannot remove packages from read-only distribution '%s'\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ r = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (distribution->tracking != dt_NONE) {
+ r = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING)
+ tracks = NULL;
+ else {
+ r = trackingdata_new(tracks, &trackingdata);
+ if (RET_WAS_ERROR(r)) {
+ (void)tracking_done(tracks, distribution);
+ return r;
+ }
+ }
+ } else
+ tracks = NULL;
+
+ result = package_remove_each(distribution,
+ components, architectures, packagetypes,
+ package_matches_glob,
+ (tracks != NULL)?&trackingdata:NULL,
+ (void*)argv[2]);
+ if (tracks != NULL) {
+ trackingdata_finish(tracks, &trackingdata);
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ }
+ return result;
+}
+
+ACTION_B(y, n, y, buildneeded) {
+ retvalue r;
+ struct distribution *distribution;
+ const char *glob;
+ architecture_t arch;
+ bool anyarchitecture;
+
+ if (architectures != NULL) {
+ fprintf(stderr,
+"Error: build-needing cannot be used with --architecture!\n");
+ return RET_ERROR;
+ }
+ if (packagetypes != NULL) {
+ fprintf(stderr,
+"Error: build-needing cannot be used with --packagetype!\n");
+ return RET_ERROR;
+ }
+
+ if (argc == 4)
+ glob = argv[3];
+ else
+ glob = NULL;
+
+ if (strcmp(argv[2], "any") == 0) {
+ anyarchitecture = true;
+ } else {
+ anyarchitecture = false;
+ arch = architecture_find(argv[2]);
+ if (!atom_defined(arch)) {
+ fprintf(stderr,
+"Error: Architecture '%s' is not known!\n", argv[2]);
+ return RET_ERROR;
+ }
+ if (arch == architecture_source) {
+ fprintf(stderr,
+"Error: Architecture '%s' makes no sense for build-needing!\n", argv[2]);
+ return RET_ERROR;
+ }
+ }
+ r = distribution_get(alldistributions, argv[1], false, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (!atomlist_in(&distribution->architectures, architecture_source)) {
+ fprintf(stderr,
+"Error: Architecture '%s' does not contain sources. build-needing cannot be used!\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+ if (anyarchitecture) {
+ retvalue result;
+ int i;
+
+ result = find_needs_build(distribution,
+ architecture_all,
+ components, glob, true);
+
+ for (i = 0 ; i < distribution->architectures.count ; i++) {
+ architecture_t a = distribution->architectures.atoms[i];
+
+ if (a == architecture_source || a == architecture_all)
+ continue;
+ r = find_needs_build(distribution, a,
+ components, glob, true);
+ RET_UPDATE(result, r);
+ }
+ return result;
+ } else {
+ if (!atomlist_in(&distribution->architectures, arch) &&
+ arch != architecture_all) {
+ fprintf(stderr,
+"Error: Architecture '%s' not found in distribution '%s'!\n", argv[2],
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ return find_needs_build(distribution, arch, components,
+ glob, false);
+ }
+}
+
+ACTION_C(n, n, n, listcodenames) {
+ retvalue r = RET_NOTHING;
+ struct distribution *d;
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ puts(d->codename);
+ r = RET_OK;
+ }
+ return r;
+}
+
+static retvalue list_in_target(struct target *target, const char *packagename) {
+ retvalue r, result = RET_NOTHING;
+ struct package_cursor iterator;
+
+ if (listmax == 0)
+ return RET_NOTHING;
+
+ r = package_openduplicateiterator(target, packagename, 0, &iterator);
+ if (!RET_IS_OK(r))
+ return r;
+
+ do {
+ if (listskip <= 0) {
+ r = listformat_print(listformat, &iterator.current);
+ RET_UPDATE(result, r);
+ if (listmax > 0)
+ listmax--;
+ } else
+ listskip--;
+ } while (package_next(&iterator));
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+static retvalue list_package(struct package *package, UNUSED(void *dummy3)) {
+ if (listmax == 0)
+ return RET_NOTHING;
+
+ if (listskip <= 0) {
+ if (listmax > 0)
+ listmax--;
+ return listformat_print(listformat, package);
+ } else {
+ listskip--;
+ return RET_NOTHING;
+ }
+}
+
+ACTION_B(y, n, y, list) {
+ retvalue result = RET_NOTHING, r;
+ struct distribution *distribution;
+ struct target *t;
+
+ assert (argc >= 2);
+
+ r = distribution_get(alldistributions, argv[1], false, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (argc == 2)
+ return package_foreach(distribution,
+ components, architectures, packagetypes,
+ list_package, NULL, NULL);
+ else for (t = distribution->targets ; t != NULL ; t = t->next) {
+ if (!target_matches(t, components, architectures, packagetypes))
+ continue;
+ r = list_in_target(t, argv[2]);
+ if (RET_WAS_ERROR(r))
+ return r;
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+struct lsversion {
+ /*@null@*/struct lsversion *next;
+ char *version;
+ struct atomlist architectures;
+};
+struct lspart {
+ struct lspart *next;
+ const char *codename;
+ const char *component;
+ struct lsversion *versions;
+};
+
+static retvalue newlsversion(struct lsversion **versions_p, struct package *package, architecture_t architecture) {
+ struct lsversion *v, **v_p;
+
+ for (v_p = versions_p ; (v = *v_p) != NULL ; v_p = &v->next) {
+ if (strcmp(v->version, package->version) != 0)
+ continue;
+ return atomlist_add_uniq(&v->architectures, architecture);
+ }
+ v = zNEW(struct lsversion);
+ if (FAILEDTOALLOC(v))
+ return RET_ERROR_OOM;
+ *v_p = v;
+ v->version = package_dupversion(package);
+ if (FAILEDTOALLOC(v->version))
+ return RET_ERROR_OOM;
+ return atomlist_add(&v->architectures, architecture);
+}
+
+static retvalue ls_in_target(struct target *target, const char *packagename, struct lsversion **versions_p) {
+ retvalue r, result;
+ struct package_cursor iterator;
+
+ result = package_openduplicateiterator(target, packagename, 0, &iterator);
+ if (!RET_IS_OK(result))
+ return result;
+ do {
+ r = package_getversion(&iterator.current);
+ if (RET_IS_OK(r))
+ r = newlsversion(versions_p, &iterator.current,
+ target->architecture);
+ RET_UPDATE(result, r);
+ } while (package_next(&iterator));
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+static inline retvalue printlsparts(const char *pkgname, struct lspart *parts) {
+ int versionlen, codenamelen, componentlen;
+ struct lspart *p;
+ retvalue result = RET_NOTHING;
+
+ versionlen = 0; codenamelen = 0; componentlen = 0;
+ for (p = parts ; p->codename != NULL ; p = p->next) {
+ struct lsversion *v;
+ int l;
+
+ l = strlen(p->codename);
+ if (l > codenamelen)
+ codenamelen = l;
+ if (p->component != NULL) {
+ l = strlen(p->component);
+ if (l > componentlen)
+ componentlen = l;
+ }
+ for (v = p->versions ; v != NULL ; v = v->next) {
+ l = strlen(v->version);
+ if (l > versionlen)
+ versionlen = l;
+ }
+ }
+ while (parts->codename != NULL) {
+ p = parts;
+ parts = parts->next;
+ while (p->versions != NULL) {
+ architecture_t a; int i;
+ struct lsversion *v;
+
+ v = p->versions;
+ p->versions = v->next;
+
+ result = RET_OK;
+ printf("%s | %*s | %*s | ", pkgname,
+ versionlen, v->version,
+ codenamelen, p->codename);
+ if (componentlen > 0 && p->component != NULL)
+ printf("%*s | ", componentlen, p->component);
+ for (i = 0 ; i + 1 < v->architectures.count ; i++) {
+ a = v->architectures.atoms[i];
+ printf("%s, ", atoms_architectures[a]);
+ }
+ a = v->architectures.atoms[i];
+ puts(atoms_architectures[a]);
+
+ free(v->version);
+ atomlist_done(&v->architectures);
+ free(v);
+ }
+ free(p);
+ }
+ free(parts);
+ return result;
+}
+
+ACTION_B(y, n, y, ls) {
+ retvalue r;
+ struct distribution *d;
+ struct target *t;
+ struct lspart *first, *last;
+
+ assert (argc == 2);
+
+ first = zNEW(struct lspart);
+ last = first;
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (!target_matches(t, components, architectures,
+ packagetypes))
+ continue;
+ r = ls_in_target(t, argv[1], &last->versions);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (last->versions != NULL) {
+ last->codename = d->codename;
+ last->next = zNEW(struct lspart);
+ last = last->next;
+ }
+ }
+ return printlsparts(argv[1], first);
+}
+
+ACTION_B(y, n, y, lsbycomponent) {
+ retvalue r;
+ struct distribution *d;
+ struct target *t;
+ struct lspart *first, *last;
+ int i;
+
+ assert (argc == 2);
+
+ first = zNEW(struct lspart);
+ last = first;
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ for (i = 0 ; i < d->components.count ; i ++) {
+ component_t component = d->components.atoms[i];
+
+ if (limitations_missed(components, component))
+ continue;
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (t->component != component)
+ continue;
+ if (limitations_missed(architectures,
+ t->architecture))
+ continue;
+ if (limitations_missed(packagetypes,
+ t->packagetype))
+ continue;
+ r = ls_in_target(t, argv[1], &last->versions);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (last->versions != NULL) {
+ last->codename = d->codename;
+ last->component = atoms_components[component];
+ last->next = zNEW(struct lspart);
+ last = last->next;
+ }
+ }
+ }
+ return printlsparts(argv[1], first);
+}
+
+static retvalue listfilterprint(struct package *package, void *data) {
+ term *condition = data;
+ retvalue r;
+
+ if (listmax == 0)
+ return RET_NOTHING;
+
+ r = term_decidepackage(condition, package, package->target);
+ if (RET_IS_OK(r)) {
+ if (listskip <= 0) {
+ if (listmax > 0)
+ listmax--;
+ r = listformat_print(listformat, package);
+ } else {
+ listskip--;
+ r = RET_NOTHING;
+ }
+ }
+ return r;
+}
+
+ACTION_B(y, n, y, listfilter) {
+ retvalue r, result;
+ struct distribution *distribution;
+ term *condition;
+
+ assert (argc == 3);
+
+ r = distribution_get(alldistributions, argv[1], false, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ result = term_compilefortargetdecision(&condition, argv[2]);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ result = package_foreach(distribution,
+ components, architectures, packagetypes,
+ listfilterprint, NULL, condition);
+ term_free(condition);
+ return result;
+}
+
+static retvalue listmatchprint(struct package *package, void *data) {
+ const char *glob = data;
+
+ if (listmax == 0)
+ return RET_NOTHING;
+
+ if (globmatch(package->name, glob)) {
+ if (listskip <= 0) {
+ if (listmax > 0)
+ listmax--;
+ return listformat_print(listformat, package);
+ } else {
+ listskip--;
+ return RET_NOTHING;
+ }
+ } else
+ return RET_NOTHING;
+}
+
+ACTION_B(y, n, y, listmatched) {
+ retvalue r, result;
+ struct distribution *distribution;
+
+ assert (argc == 3);
+
+ r = distribution_get(alldistributions, argv[1], false, &distribution);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ result = package_foreach(distribution,
+ components, architectures, packagetypes,
+ listmatchprint, NULL, (void*)argv[2]);
+ return result;
+}
+
+ACTION_F(n, n, n, y, detect) {
+ char buffer[5000], *nl;
+ int i;
+ retvalue r, ret;
+
+ ret = RET_NOTHING;
+ if (argc > 1) {
+ for (i = 1 ; i < argc ; i++) {
+ r = files_detect(argv[i]);
+ RET_UPDATE(ret, r);
+ }
+
+ } else
+ while (fgets(buffer, 4999, stdin) != NULL) {
+ nl = strchr(buffer, '\n');
+ if (nl == NULL) {
+ return RET_ERROR;
+ }
+ *nl = '\0';
+ r = files_detect(buffer);
+ RET_UPDATE(ret, r);
+ }
+ return ret;
+}
+
+ACTION_F(n, n, n, y, forget) {
+ char buffer[5000], *nl;
+ int i;
+ retvalue r, ret;
+
+ ret = RET_NOTHING;
+ if (argc > 1) {
+ for (i = 1 ; i < argc ; i++) {
+ r = files_remove(argv[i]);
+ RET_UPDATE(ret, r);
+ }
+
+ } else
+ while (fgets(buffer, 4999, stdin) != NULL) {
+ nl = strchr(buffer, '\n');
+ if (nl == NULL) {
+ return RET_ERROR;
+ }
+ *nl = '\0';
+ r = files_remove(buffer);
+ RET_UPDATE(ret, r);
+ }
+ return ret;
+}
+
+ACTION_F(n, n, n, n, listmd5sums) {
+ return files_printmd5sums();
+}
+
+ACTION_F(n, n, n, n, listchecksums) {
+ return files_printchecksums();
+}
+
+ACTION_B(n, n, n, dumpcontents) {
+ retvalue result, r;
+ struct table *packages;
+ const char *package, *chunk;
+ struct cursor *cursor;
+
+ assert (argc == 2);
+
+ result = database_openpackages(argv[1], true, &packages);
+ if (RET_WAS_ERROR(result))
+ return result;
+ r = table_newglobalcursor(packages, true, &cursor);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ (void)table_close(packages);
+ return r;
+ }
+ result = RET_NOTHING;
+ while (cursor_nexttempdata(packages, cursor, &package, &chunk, NULL)) {
+ printf("'%s' -> '%s'\n", package, chunk);
+ result = RET_OK;
+ }
+ r = cursor_close(packages, cursor);
+ RET_ENDUPDATE(result, r);
+ r = table_close(packages);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+ACTION_F(n, n, y, y, export) {
+ retvalue result, r;
+ struct distribution *d;
+
+ if (export == EXPORT_NEVER || export == EXPORT_SILENT_NEVER) {
+ fprintf(stderr,
+"Error: reprepro export incompatible with --export=never\n");
+ return RET_ERROR;
+ }
+
+ result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+
+ if (d->exportoptions[deo_noexport]) {
+ /* if explicitly selected, warn if not used: */
+ if (argc > 1 && verbose >= 0 ) {
+ printf("No exporting %s (as it has the noexport option set).\n", d->codename);
+ }
+ continue;
+ }
+
+ if (verbose > 0) {
+ printf("Exporting %s...\n", d->codename);
+ }
+ r = distribution_fullexport(d);
+ if (RET_IS_OK(r))
+ /* avoid being exported again */
+ d->lookedat = false;
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r) && export != EXPORT_FORCE) {
+ return r;
+ }
+ }
+ return result;
+}
+
+/***********************update********************************/
+
+ACTION_D(y, n, y, update) {
+ retvalue result;
+ struct update_pattern *patterns;
+ struct update_distribution *u_distributions;
+
+ result = dirs_make_recursive(global.listdir);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = updates_getpatterns(&patterns);
+ if (RET_WAS_ERROR(result))
+ return result;
+ assert (RET_IS_OK(result));
+
+ result = updates_calcindices(patterns, alldistributions,
+ components, architectures, packagetypes,
+ &u_distributions);
+ if (!RET_IS_OK(result)) {
+ if (result == RET_NOTHING) {
+ if (argc == 1)
+ fputs(
+"Nothing to do, because no distribution has an Update: field.\n", stderr);
+ else
+ fputs(
+"Nothing to do, because none of the selected distributions has an Update: field.\n",
+ stderr);
+ }
+ updates_freepatterns(patterns);
+ return result;
+ }
+ assert (RET_IS_OK(result));
+
+ if (!RET_WAS_ERROR(result))
+ result = updates_update(u_distributions,
+ nolistsdownload, skipold,
+ spacecheckmode, reserveddbspace,
+ reservedotherspace);
+ updates_freeupdatedistributions(u_distributions);
+ updates_freepatterns(patterns);
+ return result;
+}
+
+ACTION_D(y, n, y, predelete) {
+ retvalue result;
+ struct update_pattern *patterns;
+ struct update_distribution *u_distributions;
+
+ result = dirs_make_recursive(global.listdir);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = updates_getpatterns(&patterns);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ assert (RET_IS_OK(result));
+
+ result = updates_calcindices(patterns, alldistributions,
+ components, architectures, packagetypes,
+ &u_distributions);
+ if (!RET_IS_OK(result)) {
+ if (result == RET_NOTHING) {
+ if (argc == 1)
+ fputs(
+"Nothing to do, because no distribution has an Update: field.\n", stderr);
+ else
+ fputs(
+"Nothing to do, because none of the selected distributions has an Update: field.\n",
+ stderr);
+ }
+ updates_freepatterns(patterns);
+ return result;
+ }
+ assert (RET_IS_OK(result));
+
+ if (!RET_WAS_ERROR(result))
+ result = updates_predelete(u_distributions,
+ nolistsdownload, skipold);
+ updates_freeupdatedistributions(u_distributions);
+ updates_freepatterns(patterns);
+ return result;
+}
+
+ACTION_B(y, n, y, checkupdate) {
+ retvalue result;
+ struct update_pattern *patterns;
+ struct update_distribution *u_distributions;
+
+ result = dirs_make_recursive(global.listdir);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = updates_getpatterns(&patterns);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ result = updates_calcindices(patterns, alldistributions,
+ components, architectures, packagetypes,
+ &u_distributions);
+ if (!RET_IS_OK(result)) {
+ if (result == RET_NOTHING) {
+ if (argc == 1)
+ fputs(
+"Nothing to do, because no distribution has an Updates: field.\n", stderr);
+ else
+ fputs(
+"Nothing to do, because none of the selected distributions has an Update: field.\n",
+ stderr);
+ }
+ updates_freepatterns(patterns);
+ return result;
+ }
+
+ result = updates_checkupdate(u_distributions,
+ nolistsdownload, skipold);
+
+ updates_freeupdatedistributions(u_distributions);
+ updates_freepatterns(patterns);
+
+ return result;
+}
+
+ACTION_B(y, n, y, dumpupdate) {
+ retvalue result;
+ struct update_pattern *patterns;
+ struct update_distribution *u_distributions;
+
+ result = dirs_make_recursive(global.listdir);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = updates_getpatterns(&patterns);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ result = updates_calcindices(patterns, alldistributions,
+ components, architectures, packagetypes,
+ &u_distributions);
+ if (!RET_IS_OK(result)) {
+ if (result == RET_NOTHING) {
+ if (argc == 1)
+ fputs(
+"Nothing to do, because no distribution has an Updates: field.\n", stderr);
+ else
+ fputs(
+"Nothing to do, because none of the selected distributions has an Update: field.\n",
+ stderr);
+ }
+ updates_freepatterns(patterns);
+ return result;
+ }
+
+ result = updates_dumpupdate(u_distributions,
+ nolistsdownload, skipold);
+
+ updates_freeupdatedistributions(u_distributions);
+ updates_freepatterns(patterns);
+
+ return result;
+}
+
+ACTION_L(n, n, n, n, cleanlists) {
+ retvalue result;
+ struct update_pattern *patterns;
+
+ assert (argc == 1);
+
+ if (!isdirectory(global.listdir))
+ return RET_NOTHING;
+
+ result = updates_getpatterns(&patterns);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = updates_cleanlists(alldistributions, patterns);
+ updates_freepatterns(patterns);
+ return result;
+}
+
+/***********************migrate*******************************/
+
+ACTION_D(y, n, y, pull) {
+ retvalue result;
+ struct pull_rule *rules;
+ struct pull_distribution *p;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ true, READWRITE);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = pull_getrules(&rules);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ assert (RET_IS_OK(result));
+
+ result = pull_prepare(alldistributions, rules, fast,
+ components, architectures, packagetypes, &p);
+ if (RET_WAS_ERROR(result)) {
+ pull_freerules(rules);
+ return result;
+ }
+ result = pull_update(p);
+
+ pull_freerules(rules);
+ pull_freedistributions(p);
+ return result;
+}
+
+ACTION_B(y, n, y, checkpull) {
+ retvalue result;
+ struct pull_rule *rules;
+ struct pull_distribution *p;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = pull_getrules(&rules);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ assert (RET_IS_OK(result));
+
+ result = pull_prepare(alldistributions, rules, fast,
+ components, architectures, packagetypes, &p);
+ if (RET_WAS_ERROR(result)) {
+ pull_freerules(rules);
+ return result;
+ }
+ result = pull_checkupdate(p);
+
+ pull_freerules(rules);
+ pull_freedistributions(p);
+
+ return result;
+}
+
+ACTION_B(y, n, y, dumppull) {
+ retvalue result;
+ struct pull_rule *rules;
+ struct pull_distribution *p;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = pull_getrules(&rules);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ assert (RET_IS_OK(result));
+
+ result = pull_prepare(alldistributions, rules, fast,
+ components, architectures, packagetypes, &p);
+ if (RET_WAS_ERROR(result)) {
+ pull_freerules(rules);
+ return result;
+ }
+ result = pull_dumpupdate(p);
+
+ pull_freerules(rules);
+ pull_freedistributions(p);
+
+ return result;
+}
+
+static retvalue copy_or_move(struct distribution *alldistributions, const struct atomlist * architectures, const struct atomlist * components,
+ const struct atomlist * packagetypes, int argc, const char * argv[], bool remove_source) {
+ struct distribution *destination, *source;
+ struct nameandversion data[argc-2];
+ int i;
+ retvalue result;
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = distribution_get(alldistributions, argv[2], false, &source);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot %s packages to read-only distribution '%s'.\n",
+ remove_source ? "move" : "copy", destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ for (i = 0; i < argc-3; i++) {
+ result = splitnameandversion(argv[3 + i], &data[i].name, &data[i].version);
+ if (RET_WAS_ERROR(result)) {
+ for (i-- ; i >= 0 ; i--) {
+ splitnameandversion_done(&data[i].name, &data[i].version);
+ }
+ return result;
+ }
+ }
+ data[i].name = NULL;
+ data[i].version = NULL;
+
+ result = copy_by_name(destination, source, data,
+ components, architectures, packagetypes, remove_source);
+ for (i = 0; i < argc - 3; i++) {
+ splitnameandversion_done(&data[i].name, &data[i].version);
+ }
+ return result;
+}
+
+ACTION_D(y, n, y, copy) {
+ return copy_or_move(alldistributions, architectures, components, packagetypes, argc, argv, false);
+}
+
+ACTION_D(y, n, y, move) {
+ return copy_or_move(alldistributions, architectures, components, packagetypes, argc, argv, true);
+}
+
+static retvalue copysrc_or_movesrc(struct distribution *alldistributions, const struct atomlist * architectures, const struct atomlist * components,
+ const struct atomlist * packagetypes, int argc, const char * argv[], bool remove_source) {
+ struct distribution *destination, *source;
+ retvalue result;
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = distribution_get(alldistributions, argv[2], false, &source);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot %s packages to read-only distribution '%s'.\n",
+ remove_source ? "move" : "copy", destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return copy_by_source(destination, source, argc-3, argv+3,
+ components, architectures, packagetypes, remove_source);
+ return result;
+}
+
+ACTION_D(y, n, y, copysrc) {
+ return copysrc_or_movesrc(alldistributions, architectures, components, packagetypes, argc, argv, false);
+}
+
+ACTION_D(y, n, y, movesrc) {
+ return copysrc_or_movesrc(alldistributions, architectures, components, packagetypes, argc, argv, true);
+}
+
+static retvalue copy_or_move_filter(struct distribution *alldistributions, const struct atomlist * architectures, const struct atomlist * components,
+ const struct atomlist * packagetypes, int argc, const char * argv[], bool remove_source) {
+ struct distribution *destination, *source;
+ retvalue result;
+
+ assert (argc == 4);
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = distribution_get(alldistributions, argv[2], false, &source);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot %s packages to read-only distribution '%s'.\n",
+ remove_source ? "move" : "copy", destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return copy_by_formula(destination, source, argv[3],
+ components, architectures, packagetypes, remove_source);
+}
+
+ACTION_D(y, n, y, copyfilter) {
+ return copy_or_move_filter(alldistributions, architectures, components, packagetypes, argc, argv, false);
+}
+
+ACTION_D(y, n, y, movefilter) {
+ return copy_or_move_filter(alldistributions, architectures, components, packagetypes, argc, argv, true);
+}
+
+static retvalue copy_or_move_matched(struct distribution *alldistributions, const struct atomlist * architectures, const struct atomlist * components,
+ const struct atomlist * packagetypes, int argc, const char * argv[], bool remove_source) {
+ struct distribution *destination, *source;
+ retvalue result;
+
+ assert (argc == 4);
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = distribution_get(alldistributions, argv[2], false, &source);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot %s packages to read-only distribution '%s'.\n",
+ remove_source ? "move" : "copy", destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return copy_by_glob(destination, source, argv[3],
+ components, architectures, packagetypes, remove_source);
+}
+
+ACTION_D(y, n, y, copymatched) {
+ return copy_or_move_matched(alldistributions, architectures, components, packagetypes, argc, argv, false);
+}
+
+ACTION_D(y, n, y, movematched) {
+ return copy_or_move_matched(alldistributions, architectures, components, packagetypes, argc, argv, true);
+}
+
+ACTION_D(y, n, y, restore) {
+ struct distribution *destination;
+ retvalue result;
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot copy packages to read-only distribution '%s'.\n",
+ destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return restore_by_name(destination,
+ components, architectures, packagetypes, argv[2],
+ argc-3, argv+3);
+}
+
+ACTION_D(y, n, y, restoresrc) {
+ struct distribution *destination;
+ retvalue result;
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot copy packages to read-only distribution '%s'.\n",
+ destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return restore_by_source(destination,
+ components, architectures, packagetypes, argv[2],
+ argc-3, argv+3);
+}
+
+ACTION_D(y, n, y, restorematched) {
+ struct distribution *destination;
+ retvalue result;
+
+ assert (argc == 4);
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot copy packages to read-only distribution '%s'.\n",
+ destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return restore_by_glob(destination,
+ components, architectures, packagetypes, argv[2],
+ argv[3]);
+}
+
+ACTION_D(y, n, y, restorefilter) {
+ struct distribution *destination;
+ retvalue result;
+
+ assert (argc == 4);
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot copy packages to read-only distribution '%s'.\n",
+ destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return restore_by_formula(destination,
+ components, architectures, packagetypes, argv[2],
+ argv[3]);
+}
+
+ACTION_D(y, n, y, addpackage) {
+ struct distribution *destination;
+ retvalue result;
+ architecture_t architecture = atom_unknown;
+ component_t component = atom_unknown;
+ packagetype_t packagetype = atom_unknown;
+
+ if (packagetypes != NULL) {
+ if (packagetypes->count > 1) {
+ fprintf(stderr,
+"_addpackage can only cope with one packagetype at a time!\n");
+ return RET_ERROR;
+ }
+ packagetype = packagetypes->atoms[0];
+ }
+ if (architectures != NULL) {
+ if (architectures->count > 1) {
+ fprintf(stderr,
+"_addpackage can only cope with one architecture at a time!\n");
+ return RET_ERROR;
+ }
+ architecture = architectures->atoms[0];
+ }
+ if (components != NULL) {
+ if (components->count > 1) {
+ fprintf(stderr,
+"_addpackage can only cope with one component at a time!\n");
+ return RET_ERROR;
+ }
+ component = components->atoms[0];
+ }
+
+ if (!atom_defined(packagetype) && atom_defined(architecture) &&
+ architecture == architecture_source)
+ packagetype = pt_dsc;
+ if (atom_defined(packagetype) && !atom_defined(architecture) &&
+ packagetype == pt_dsc)
+ architecture = architecture_source;
+ // TODO: some more guesses based on components and udebcomponents
+
+ if (!atom_defined(architecture) || !atom_defined(component) ||
+ !atom_defined(packagetype)) {
+ fprintf(stderr, "_addpackage needs -C and -A and -T set!\n");
+ return RET_ERROR;
+ }
+
+ result = distribution_get(alldistributions, argv[1], true, &destination);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (destination->readonly) {
+ fprintf(stderr,
+"Cannot add packages to read-only distribution '%s'.\n",
+ destination->codename);
+ return RET_ERROR;
+ }
+ result = distribution_prepareforwriting(destination);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return copy_from_file(destination,
+ component, architecture, packagetype, argv[2],
+ argc-3, argv+3);
+}
+
+/***********************rereferencing*************************/
+ACTION_R(n, n, y, y, rereference) {
+ retvalue result, r;
+ struct distribution *d;
+ struct target *t;
+
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+
+ if (verbose > 0) {
+ printf("Referencing %s...\n", d->codename);
+ }
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ r = target_rereference(t);
+ RET_UPDATE(result, r);
+ }
+ r = tracking_rereference(d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+
+ return result;
+}
+/***************************retrack****************************/
+ACTION_D(n, n, y, retrack) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ if (d->tracking == dt_NONE) {
+ if (argc > 1) {
+ fprintf(stderr,
+"Cannot retrack %s: Tracking not activated for this distribution!\n",
+ d->codename);
+ RET_UPDATE(result, RET_ERROR);
+ }
+ continue;
+ }
+ r = tracking_retrack(d, true);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ return result;
+}
+
+ACTION_D(n, n, y, removetrack) {
+ retvalue result, r;
+ struct distribution *distribution;
+ trackingdb tracks;
+
+ assert (argc == 4);
+
+ result = distribution_get(alldistributions, argv[1], false, &distribution);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ r = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+
+ result = tracking_remove(tracks, argv[2], argv[3]);
+
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+ACTION_D(n, n, y, removealltracks) {
+ retvalue result, r;
+ struct distribution *d;
+ const char *codename;
+ int i;
+
+ if (delete <= 0)
+ for (i = 1 ; i < argc ; i ++) {
+ codename = argv[i];
+
+ d = alldistributions;
+ while (d != NULL && strcmp(codename, d->codename) != 0)
+ d = d->next;
+ if (d != NULL && d->tracking != dt_NONE) {
+ fprintf(stderr,
+"Error: Requested removing of all tracks of distribution '%s',\n"
+"which still has tracking enabled. Use --delete to delete anyway.\n",
+ codename);
+ return RET_ERROR;
+ }
+ }
+ result = RET_NOTHING;
+ for (i = 1 ; i < argc ; i ++) {
+ codename = argv[i];
+
+ if (verbose >= 0) {
+ printf("Deleting all tracks for %s...\n", codename);
+ }
+
+ r = tracking_drop(codename);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ break;
+ if (r == RET_NOTHING) {
+ d = alldistributions;
+ while (d != NULL && strcmp(codename, d->codename) != 0)
+ d = d->next;
+ if (d == NULL) {
+ fprintf(stderr,
+"Warning: There was no tracking information to delete for '%s',\n"
+"which is also not found in conf/distributions. Either this was already\n"
+"deleted earlier, or you might have mistyped.\n", codename);
+ }
+ }
+ }
+ return result;
+}
+
+ACTION_D(n, n, y, tidytracks) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ trackingdb tracks;
+
+ if (!d->selected)
+ continue;
+
+ if (d->tracking == dt_NONE) {
+ r = tracking_drop(d->codename);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ continue;
+ }
+
+ if (verbose >= 0) {
+ printf("Looking for old tracks in %s...\n",
+ d->codename);
+ }
+ r = tracking_initialize(&tracks, d, false);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ continue;
+ }
+ r = tracking_tidyall(tracks);
+ RET_UPDATE(result, r);
+ r = tracking_done(tracks, d);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ return result;
+}
+
+ACTION_B(n, n, y, dumptracks) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ trackingdb tracks;
+
+ if (!d->selected)
+ continue;
+
+ r = tracking_initialize(&tracks, d, true);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ continue;
+ }
+ if (r == RET_NOTHING)
+ continue;
+ r = tracking_printall(tracks);
+ RET_UPDATE(result, r);
+ r = tracking_done(tracks, d);
+ RET_ENDUPDATE(result, r);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ return result;
+}
+
+/***********************checking*************************/
+
+ACTION_RF(y, n, y, y, check) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+
+ if (verbose > 0) {
+ printf("Checking %s...\n", d->codename);
+ }
+
+ r = package_foreach(d,
+ components, architectures, packagetypes,
+ package_check, NULL, NULL);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ return result;
+}
+
+ACTION_F(n, n, n, y, checkpool) {
+
+ if (argc == 2 && strcmp(argv[1], "fast") != 0) {
+ fprintf(stderr, "Error: Unrecognized second argument '%s'\n"
+ "Syntax: reprepro checkpool [fast]\n",
+ argv[1]);
+ return RET_ERROR;
+ }
+
+ return files_checkpool(argc == 2);
+}
+
+/* Update checksums of existing files */
+
+ACTION_F(n, n, n, n, collectnewchecksums) {
+
+ return files_collectnewchecksums();
+}
+/*****************reapplying override info***************/
+
+ACTION_F(y, n, y, y, reoverride) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ true, READWRITE);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+
+ if (!d->selected)
+ continue;
+
+ if (verbose > 0) {
+ fprintf(stderr, "Reapplying override to %s...\n",
+ d->codename);
+ }
+
+ r = distribution_loadalloverrides(d);
+ if (RET_IS_OK(r)) {
+ struct target *t;
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (!target_matches(t,
+ components, architectures, packagetypes))
+ continue;
+ r = target_reoverride(t, d);
+ RET_UPDATE(result, r);
+ // TODO: how to separate this in those
+ // affecting d and those that do not?
+ RET_UPDATE(d->status, r);
+ }
+ distribution_unloadoverrides(d);
+ } else if (r == RET_NOTHING) {
+ fprintf(stderr,
+"No override files, thus nothing to do for %s.\n",
+ d->codename);
+ } else {
+ RET_UPDATE(result, r);
+ }
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ return result;
+}
+
+/*****************retrieving Description data from .deb files***************/
+
+static retvalue repair_descriptions(struct target *target) {
+ struct package_cursor iterator;
+ retvalue result, r;
+
+ assert(target->packages == NULL);
+ assert(target->packagetype == pt_deb ||
+ target->packagetype == pt_udeb ||
+ target->packagetype == pt_ddeb);
+
+ if (verbose > 2) {
+ printf(
+"Redoing checksum information for packages in '%s'...\n",
+ target->identifier);
+ }
+
+ r = package_openiterator(target, READWRITE, true, &iterator);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ char *newcontrolchunk = NULL;
+
+ if (interrupted()) {
+ result = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ /* replace it by itself to normalize the Description field */
+ r = description_addpackage(target, iterator.current.name,
+ iterator.current.control,
+ &newcontrolchunk);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (RET_IS_OK(r)) {
+ if (verbose >= 0) {
+ printf(
+"Fixing description for '%s'...\n", iterator.current.name);
+ }
+ r = package_newcontrol_by_cursor(&iterator,
+ newcontrolchunk, strlen(newcontrolchunk));
+ free(newcontrolchunk);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ target->wasmodified = true;
+ }
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+ACTION_F(y, n, y, y, repairdescriptions) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ true, READWRITE);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ struct target *t;
+
+ if (!d->selected)
+ continue;
+
+ if (verbose > 0) {
+ printf(
+"Looking for 'Description's to repair in %s...\n", d->codename);
+ }
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (interrupted()) {
+ result = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ if (!target_matches(t, components, architectures, packagetypes))
+ continue;
+ if (t->packagetype == pt_dsc)
+ continue;
+ r = repair_descriptions(t);
+ RET_UPDATE(result, r);
+ RET_UPDATE(d->status, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ }
+ return result;
+}
+
+/*****************adding checksums of files again*****************/
+
+ACTION_F(y, n, y, y, redochecksums) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ true, READWRITE);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ struct target *t;
+
+ if (!d->selected)
+ continue;
+
+ if (verbose > 0) {
+ fprintf(stderr,
+"Readding checksum information to packages in %s...\n", d->codename);
+ }
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (!target_matches(t,
+ components, architectures, packagetypes))
+ continue;
+ r = target_redochecksums(t, d);
+ RET_UPDATE(result, r);
+ RET_UPDATE(d->status, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ return result;
+}
+
+/*******************sizes of distributions***************/
+
+ACTION_RF(n, n, y, y, sizes) {
+ retvalue result;
+
+ result = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ return sizes_distributions(alldistributions, argc > 1);
+}
+
+/***********************include******************************************/
+
+ACTION_D(y, y, y, includedeb) {
+ retvalue result, r;
+ struct distribution *distribution;
+ packagetype_t packagetype;
+ trackingdb tracks;
+ int i = 0;
+ component_t component = atom_unknown;
+
+ if (components != NULL) {
+ if (components->count > 1) {
+ fprintf(stderr,
+"Error: Only one component is allowed with %s!\n",
+ argv[0]);
+ return RET_ERROR;
+ }
+ assert(components->count > 0);
+ component = components->atoms[0];
+ }
+
+ if (architectures != NULL)
+ if (!atomlist_hasexcept(architectures, architecture_source)) {
+ fprintf(stderr,
+"Error: -A source is not possible with includedeb!\n");
+ return RET_ERROR;
+ }
+ if (strcmp(argv[0], "includeudeb") == 0) {
+ packagetype = pt_udeb;
+ if (limitations_missed(packagetypes, pt_udeb)) {
+ fprintf(stderr,
+"Calling includeudeb with a -T not containing udeb makes no sense!\n");
+ return RET_ERROR;
+ }
+ } else if (strcmp(argv[0], "includeddeb") == 0) {
+ packagetype = pt_ddeb;
+ if (limitations_missed(packagetypes, pt_ddeb)) {
+ fprintf(stderr,
+"Calling includeddeb with a -T not containing ddeb makes no sense!\n");
+ return RET_ERROR;
+ }
+ } else if (strcmp(argv[0], "includedeb") == 0) {
+ packagetype = pt_deb;
+ if (limitations_missed(packagetypes, pt_deb)) {
+ fprintf(stderr,
+"Calling includedeb with a -T not containing deb makes no sense!\n");
+ return RET_ERROR;
+ }
+
+ } else {
+ fprintf(stderr, "Internal error while parding command!\n");
+ return RET_ERROR;
+ }
+
+ for (i = 2 ; i < argc ; i++) {
+ const char *filename = argv[i];
+
+ if (packagetype == pt_udeb) {
+ if (!endswith(filename, ".udeb") && !IGNORING(extension,
+"includeudeb called with file '%s' not ending with '.udeb'\n", filename))
+ return RET_ERROR;
+ } else if (packagetype == pt_ddeb) {
+ if (!endswith(filename, ".ddeb") && !IGNORING(extension,
+"includeddeb called with file '%s' not ending with '.ddeb'\n", filename))
+ return RET_ERROR;
+ } else {
+ if (!endswith(filename, ".deb") && !IGNORING(extension,
+"includedeb called with file '%s' not ending with '.deb'\n", filename))
+ return RET_ERROR;
+ }
+ }
+
+ result = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ if (distribution->readonly) {
+ fprintf(stderr, "Cannot add packages to read-only distribution '%s'.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ if (packagetype == pt_udeb)
+ result = override_read(distribution->udeb_override,
+ &distribution->overrides.udeb, false);
+ else
+ /* we use the normal deb overrides for ddebs too -
+ * they're not meant to have overrides anyway */
+ result = override_read(distribution->deb_override,
+ &distribution->overrides.deb, false);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ // TODO: same for component? (depending on type?)
+ if (architectures != NULL) {
+ architecture_t missing = atom_unknown;
+
+ if (!atomlist_subset(&distribution->architectures,
+ architectures, &missing)){
+ fprintf(stderr,
+"Cannot force into the architecture '%s' not available in '%s'!\n",
+ atoms_architectures[missing],
+ distribution->codename);
+ return RET_ERROR;
+ }
+ }
+
+ r = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(r)) {
+ return RET_ERROR;
+ }
+
+ if (distribution->tracking != dt_NONE) {
+ result = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ } else {
+ tracks = NULL;
+ }
+ result = RET_NOTHING;
+ for (i = 2 ; i < argc ; i++) {
+ const char *filename = argv[i];
+
+ r = deb_add(component, architectures,
+ section, priority, packagetype,
+ distribution, filename,
+ delete, tracks);
+ RET_UPDATE(result, r);
+ }
+
+ distribution_unloadoverrides(distribution);
+
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+
+ACTION_D(y, y, y, includedsc) {
+ retvalue result, r;
+ struct distribution *distribution;
+ trackingdb tracks;
+ component_t component = atom_unknown;
+
+ if (components != NULL) {
+ if (components->count > 1) {
+ fprintf(stderr,
+"Error: Only one component is allowed with %s!\n",
+ argv[0]);
+ return RET_ERROR;
+ }
+ assert(components->count > 0);
+ component = components->atoms[0];
+ }
+
+
+ assert (argc == 3);
+
+ if (limitations_missed(architectures, architecture_source)) {
+ fprintf(stderr,
+"Cannot put a source package anywhere else than in architecture 'source'!\n");
+ return RET_ERROR;
+ }
+ if (limitations_missed(packagetypes, pt_dsc)) {
+ fprintf(stderr,
+"Cannot put a source package anywhere else than in type 'dsc'!\n");
+ return RET_ERROR;
+ }
+ if (!endswith(argv[2], ".dsc") && !IGNORING(extension,
+"includedsc called with a file not ending with '.dsc'\n"))
+ return RET_ERROR;
+
+ result = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Cannot add packages to read-only distribution '%s'.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+ result = override_read(distribution->dsc_override,
+ &distribution->overrides.dsc, true);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ if (distribution->tracking != dt_NONE) {
+ result = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ } else {
+ tracks = NULL;
+ }
+
+ result = dsc_add(component, section, priority,
+ distribution, argv[2], delete, tracks);
+ logger_wait();
+
+ distribution_unloadoverrides(distribution);
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+ACTION_D(y, y, y, include) {
+ retvalue result, r;
+ struct distribution *distribution;
+ trackingdb tracks;
+ component_t component = atom_unknown;
+
+ if (components != NULL) {
+ if (components->count > 1) {
+ fprintf(stderr,
+"Error: Only one component is allowed with %s!\n",
+ argv[0]);
+ return RET_ERROR;
+ }
+ assert(components->count > 0);
+ component = components->atoms[0];
+ }
+
+ assert (argc == 3);
+
+ if (!endswith(argv[2], ".changes") && !IGNORING(extension,
+"include called with a file not ending with '.changes'\n"
+"(Did you mean includedeb or includedsc?)\n"))
+ return RET_ERROR;
+
+ result = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Cannot add packages to read-only distribution '%s'.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ result = distribution_loadalloverrides(distribution);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ if (distribution->tracking != dt_NONE) {
+ result = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ } else {
+ tracks = NULL;
+ }
+ result = distribution_loaduploaders(distribution);
+ if (RET_WAS_ERROR(result)) {
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+ }
+ result = changes_add(tracks, packagetypes, component, architectures,
+ section, priority, distribution,
+ argv[2], delete);
+ if (RET_WAS_ERROR(result))
+ RET_UPDATE(distribution->status, result);
+
+ distribution_unloadoverrides(distribution);
+ distribution_unloaduploaders(distribution);
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+/***********************createsymlinks***********************************/
+
+static bool mayaliasas(const struct distribution *alldistributions, const char *part, const char *cnpart) {
+ const struct distribution *d;
+
+ /* here it is only checked whether there is something that could
+ * cause this link to exist. No tests whether this really will
+ * cause it to be created (or already existing). */
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (d->suite == NULL)
+ continue;
+ if (strcmp(d->suite, part) == 0 &&
+ strcmp(d->codename, cnpart) == 0)
+ return true;
+ if (strcmp(d->codename, part) == 0 &&
+ strcmp(d->suite, cnpart) == 0)
+ return true;
+ }
+ return false;
+}
+
+ACTION_C(n, n, y, createsymlinks) {
+ retvalue result, r;
+ struct distribution *d, *d2;
+ bool warned_slash = false;
+
+ r = dirs_make_recursive(global.distdir);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ char *linkname, *buffer;
+ size_t bufsize;
+ int ret;
+ const char *separator_in_suite;
+
+ if (!d->selected)
+ continue;
+
+ if (d->suite == NULL || strcmp(d->suite, d->codename) == 0)
+ continue;
+ r = RET_NOTHING;
+ for (d2 = alldistributions ; d2 != NULL ; d2 = d2->next) {
+ if (!d2->selected)
+ continue;
+ if (d!=d2 && d2->suite!=NULL &&
+ strcmp(d->suite, d2->suite)==0) {
+ fprintf(stderr,
+"Not linking %s->%s due to conflict with %s->%s\n",
+ d->suite, d->codename,
+ d2->suite, d2->codename);
+ r = RET_ERROR;
+ } else if (strcmp(d->suite, d2->codename)==0) {
+ fprintf(stderr,
+"Not linking %s->%s due to conflict with %s\n",
+ d->suite, d->codename, d2->codename);
+ r = RET_ERROR;
+ }
+ }
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ continue;
+ }
+
+ separator_in_suite = strchr(d->suite, '/');
+ if (separator_in_suite != NULL) {
+ /* things with / in it are tricky:
+ * relative symbolic links are hard,
+ * perhaps something else already moved
+ * the earlier ones, ... */
+ const char *separator_in_codename;
+ size_t ofs_in_suite = separator_in_suite - d->suite;
+ char *part = strndup(d->suite, ofs_in_suite);
+
+ if (FAILEDTOALLOC(part))
+ return RET_ERROR_OOM;
+
+ /* check if this is some case we do not want to warn about: */
+
+ separator_in_codename = strchr(d->codename, '/');
+ if (separator_in_codename != NULL &&
+ strcmp(separator_in_codename,
+ separator_in_suite) == 0) {
+ /* all but the first is common: */
+ size_t cnofs = separator_in_codename - d->codename;
+ char *cnpart = strndup(d->codename, cnofs);
+ if (FAILEDTOALLOC(cnpart)) {
+ free(part);
+ return RET_ERROR_OOM;
+ }
+ if (mayaliasas(alldistributions, part, cnpart)) {
+ if (verbose > 1)
+ fprintf(stderr,
+"Not creating '%s' -> '%s' because of the '/' in it.\n"
+"Hopefully something else will link '%s' -> '%s' then this is not needed.\n",
+ d->suite, d->codename,
+ part, cnpart);
+ free(part);
+ free(cnpart);
+ continue;
+ }
+ free(cnpart);
+ }
+ free(part);
+ if (verbose >= 0 && !warned_slash) {
+ fprintf(stderr,
+"Creating symlinks with '/' in them is not yet supported:\n");
+ warned_slash = true;
+ }
+ if (verbose >= 0)
+ fprintf(stderr,
+"Not creating '%s' -> '%s' because of '/'.\n", d->suite, d->codename);
+ continue;
+ }
+
+ linkname = calc_dirconcat(global.distdir, d->suite);
+ bufsize = strlen(d->codename)+10;
+ buffer = calloc(1, bufsize);
+ if (FAILEDTOALLOC(linkname) || FAILEDTOALLOC(buffer)) {
+ free(linkname); free(buffer);
+ (void)fputs("Out of Memory!\n", stderr);
+ return RET_ERROR_OOM;
+ }
+
+ ret = readlink(linkname, buffer, bufsize - 4);
+ if (ret < 0 && errno == ENOENT) {
+ ret = symlink(d->codename, linkname);
+ if (ret != 0) {
+ int e = errno;
+ r = RET_ERRNO(e);
+ fprintf(stderr,
+"Error %d creating symlink %s->%s: %s\n", e, linkname, d->codename, strerror(e));
+ RET_UPDATE(result, r);
+ } else {
+ if (verbose > 0) {
+ printf("Created %s->%s\n", linkname,
+ d->codename);
+ }
+ RET_UPDATE(result, RET_OK);
+ }
+ } else if (ret >= 0) {
+ buffer[ret] = '\0';
+ if (ret >= ((int)bufsize) - 4) {
+ buffer[bufsize-4]='.';
+ buffer[bufsize-3]='.';
+ buffer[bufsize-2]='.';
+ buffer[bufsize-1]='\0';
+ }
+ if (strcmp(buffer, d->codename) == 0) {
+ if (verbose > 2) {
+ printf("Already ok: %s->%s\n",
+ linkname, d->codename);
+ }
+ RET_UPDATE(result, RET_OK);
+ } else {
+ if (delete <= 0) {
+ fprintf(stderr,
+"Cannot create %s as already pointing to %s instead of %s,\n"
+" use --delete to delete the old link before creating an new one.\n",
+ linkname, buffer, d->codename);
+ RET_UPDATE(result, RET_ERROR);
+ } else {
+ unlink(linkname);
+ ret = symlink(d->codename, linkname);
+ if (ret != 0) {
+ int e = errno;
+ r = RET_ERRNO(e);
+ fprintf(stderr,
+"Error %d creating symlink %s->%s: %s\n", e, linkname, d->codename, strerror(e));
+ RET_UPDATE(result, r);
+ } else {
+ if (verbose > 0) {
+ printf(
+"Replaced %s->%s\n", linkname, d->codename);
+ }
+ RET_UPDATE(result, RET_OK);
+ }
+
+ }
+ }
+ } else {
+ int e = errno;
+ r = RET_ERRNO(e);
+ fprintf(stderr,
+"Error %d checking %s, perhaps not a symlink?: %s\n", e, linkname, strerror(e));
+ RET_UPDATE(result, r);
+ }
+ free(linkname); free(buffer);
+
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+/***********************checkuploaders***********************************/
+
+/* Read a fake package description from stdin */
+static inline retvalue read_package_description(char **sourcename, struct strlist *sections, struct strlist *binaries, struct strlist *byhands, struct atomlist *architectures, struct signatures **signatures, char **buffer_p, size_t *bufferlen_p) {
+ retvalue r;
+ ssize_t got;
+ char *buffer, *v, *p;
+ struct strlist *l;
+ struct signatures *s;
+ struct signature *sig;
+ architecture_t architecture;
+
+ if (isatty(0)) {
+ puts(
+"Please input the simulated package data to test.\n"
+"Format: (source|section|binary|byhand|architecture|signature) <value>\n"
+"some keys may be given multiple times");
+ }
+ while ((got = getline(buffer_p, bufferlen_p, stdin)) >= 0) {
+ buffer = *buffer_p;
+ if (got == 0 || buffer[got - 1] != '\n') {
+ fputs("stdin is not text\n", stderr);
+ return RET_ERROR;
+ }
+ buffer[--got] = '\0';
+ if (strncmp(buffer, "source ", 7) == 0) {
+ if (*sourcename != NULL) {
+ fprintf(stderr,
+"Source name only allowed once!\n");
+ return RET_ERROR;
+ }
+ *sourcename = strdup(buffer + 7);
+ if (FAILEDTOALLOC(*sourcename))
+ return RET_ERROR_OOM;
+ continue;
+ } else if (strncmp(buffer, "signature ", 10) == 0) {
+ v = buffer + 10;
+ if (*signatures == NULL) {
+ s = calloc(1, sizeof(struct signatures)
+ +sizeof(struct signature));
+ if (FAILEDTOALLOC(s))
+ return RET_ERROR_OOM;
+ } else {
+ s = realloc(*signatures,
+ sizeof(struct signatures)
+ + (s->count+1)
+ * sizeof(struct signature));
+ if (FAILEDTOALLOC(s))
+ return RET_ERROR_OOM;
+ }
+ *signatures = s;
+ sig = s->signatures + s->count;
+ s->count++;
+ s->validcount++;
+ sig->expired_key = false;
+ sig->expired_signature = false;
+ sig->revoced_key = false;
+ sig->state = sist_valid;
+ switch (*v) {
+ case 'b':
+ sig->state = sist_bad;
+ s->validcount--;
+ v++;
+ break;
+ case 'e':
+ sig->state = sist_mostly;
+ sig->expired_signature = true;
+ s->validcount--;
+ v++;
+ break;
+ case 'i':
+ sig->state = sist_invalid;
+ s->validcount--;
+ v++;
+ break;
+ }
+ p = v;
+ while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f'))
+ p++;
+ sig->keyid = strndup(v, p-v);
+ sig->primary_keyid = NULL;
+ if (FAILEDTOALLOC(sig->keyid))
+ return RET_ERROR_OOM;
+ if (*p == ':') {
+ p++;
+ v = p;
+ while ((*p >= '0' && *p <= '9')
+ || (*p >= 'a' && *p <= 'f'))
+ p++;
+ if (*p != '\0') {
+ fprintf(stderr,
+"Invalid character in key id: '%c'!\n",
+ *p);
+ return RET_ERROR;
+ }
+ sig->primary_keyid = strdup(v);
+ } else if (*p != '\0') {
+ fprintf(stderr,
+"Invalid character in key id: '%c'!\n",
+ *p);
+ return RET_ERROR;
+ } else
+ sig->primary_keyid = strdup(sig->keyid);
+ if (FAILEDTOALLOC(sig->primary_keyid))
+ return RET_ERROR_OOM;
+ continue;
+ } else if (strncmp(buffer, "section ", 8) == 0) {
+ v = buffer + 8;
+ l = sections;
+ } else if (strncmp(buffer, "binary ", 7) == 0) {
+ v = buffer + 7;
+ l = binaries;
+ } else if (strncmp(buffer, "byhand ", 7) == 0) {
+ v = buffer + 7;
+ l = byhands;
+ } else if (strncmp(buffer, "architecture ", 13) == 0) {
+ v = buffer + 13;
+ r = architecture_intern(v, &architecture);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = atomlist_add(architectures, architecture);
+ if (RET_WAS_ERROR(r))
+ return r;
+ continue;
+ } else if (strcmp(buffer, "finished") == 0) {
+ break;
+ } else {
+ fprintf(stderr, "Unparseable line '%s'\n", buffer);
+ return RET_ERROR;
+ }
+ r = strlist_add_dup(l, v);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ if (ferror(stdin)) {
+ int e = errno;
+ fprintf(stderr, "Error %d reading data from stdin: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (*sourcename == NULL) {
+ fprintf(stderr, "No source name specified!\n");
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static inline void verifystrlist(struct upload_conditions *conditions, const struct strlist *list) {
+ int i;
+ for (i = 0 ; i < list->count ; i++) {
+ if (!uploaders_verifystring(conditions, list->values[i]))
+ break;
+ }
+}
+static inline void verifyatomlist(struct upload_conditions *conditions, const struct atomlist *list) {
+ int i;
+ for (i = 0 ; i < list->count ; i++) {
+ if (!uploaders_verifyatom(conditions, list->atoms[i]))
+ break;
+ }
+}
+
+
+ACTION_C(n, n, y, checkuploaders) {
+ retvalue result, r;
+ struct distribution *d;
+ char *sourcename = NULL;
+ struct strlist sections, binaries, byhands;
+ struct atomlist architectures;
+ struct signatures *signatures = NULL;
+ struct upload_conditions *conditions;
+ bool accepted, rejected;
+ char *buffer = NULL;
+ size_t bufferlen = 0;
+ int i;
+
+ r = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ r = distribution_loaduploaders(d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ strlist_init(&sections);
+ strlist_init(&binaries);
+ strlist_init(&byhands);
+ atomlist_init(&architectures);
+
+ r = read_package_description(&sourcename, &sections, &binaries,
+ &byhands, &architectures, &signatures,
+ &buffer, &bufferlen);
+ free(buffer);
+ if (RET_WAS_ERROR(r)) {
+ free(sourcename);
+ strlist_done(&sections);
+ strlist_done(&byhands);
+ atomlist_done(&architectures);
+ signatures_free(signatures);
+ return r;
+ }
+
+ result = RET_NOTHING;
+ accepted = false;
+ for (i = 1 ; !accepted && i < argc ; i++) {
+ r = distribution_get(alldistributions, argv[i], false, &d);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ r = distribution_loaduploaders(d);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ if (d->uploaderslist == NULL) {
+ printf(
+"'%s' would have been accepted by '%s' (as it has no uploader restrictions)\n",
+ sourcename, d->codename);
+ accepted = true;
+ break;
+ }
+ r = uploaders_permissions(d->uploaderslist, signatures,
+ &conditions);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ rejected = false;
+ do switch (uploaders_nextcondition(conditions)) {
+ case uc_ACCEPTED:
+ accepted = true;
+ break;
+ case uc_REJECTED:
+ rejected = true;
+ break;
+ case uc_CODENAME:
+ uploaders_verifystring(conditions, d->codename);
+ break;
+ case uc_SOURCENAME:
+ uploaders_verifystring(conditions, sourcename);
+ break;
+ case uc_SECTIONS:
+ verifystrlist(conditions, &sections);
+ break;
+ case uc_ARCHITECTURES:
+ verifyatomlist(conditions, &architectures);
+ break;
+ case uc_BYHAND:
+ verifystrlist(conditions, &byhands);
+ break;
+ case uc_BINARIES:
+ verifystrlist(conditions, &byhands);
+ break;
+ } while (!accepted && !rejected);
+ free(conditions);
+
+ if (accepted) {
+ printf("'%s' would have been accepted by '%s'\n",
+ sourcename, d->codename);
+ break;
+ }
+ }
+ if (!accepted)
+ printf(
+"'%s' would NOT have been accepted by any of the distributions selected.\n",
+ sourcename);
+ free(sourcename);
+ strlist_done(&sections);
+ strlist_done(&byhands);
+ atomlist_done(&architectures);
+ signatures_free(signatures);
+ if (RET_WAS_ERROR(result))
+ return result;
+ else if (accepted)
+ return RET_OK;
+ else
+ return RET_NOTHING;
+}
+
+/***********************clearvanished***********************************/
+
+ACTION_D(n, n, n, clearvanished) {
+ retvalue result, r;
+ struct distribution *d;
+ struct strlist identifiers, codenames;
+ bool *inuse;
+ int i;
+
+ result = database_listpackages(&identifiers);
+ if (!RET_IS_OK(result)) {
+ return result;
+ }
+
+ inuse = nzNEW(identifiers.count, bool);
+ if (FAILEDTOALLOC(inuse)) {
+ strlist_done(&identifiers);
+ return RET_ERROR_OOM;
+ }
+ for (d = alldistributions; d != NULL ; d = d->next) {
+ struct target *t;
+ for (t = d->targets; t != NULL ; t = t->next) {
+ int ofs = strlist_ofs(&identifiers, t->identifier);
+ if (ofs >= 0) {
+ inuse[ofs] = true;
+ if (verbose > 6)
+ printf(
+"Marking '%s' as used.\n", t->identifier);
+ } else if (verbose > 3){
+ fprintf(stderr,
+"Strange, '%s' does not appear in packages.db yet.\n", t->identifier);
+ }
+ }
+ }
+ for (i = 0 ; i < identifiers.count ; i ++) {
+ const char *identifier = identifiers.values[i];
+ const char *p, *q;
+
+ if (inuse[i])
+ continue;
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ if (delete <= 0) {
+ r = database_haspackages(identifier);
+ if (RET_IS_OK(r)) {
+ fprintf(stderr,
+"There are still packages in '%s', not removing (give --delete to do so)!\n", identifier);
+ continue;
+ }
+ }
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ // TODO: if delete, check what is removed, so that tracking
+ // information can be updated.
+ printf(
+"Deleting vanished identifier '%s'.\n", identifier);
+ /* intern component and architectures, so parsing
+ * has no problems (actually only need component now) */
+ p = identifier;
+ if (strncmp(p, "u|", 2) == 0)
+ p += 2;
+ p = strchr(p, '|');
+ if (p != NULL) {
+ p++;
+ q = strchr(p, '|');
+ if (q != NULL) {
+ atom_t dummy;
+
+ char *component = strndup(p, q-p);
+ q++;
+ char *architecture = strdup(q);
+ if (FAILEDTOALLOC(component) ||
+ FAILEDTOALLOC(architecture)) {
+ free(component);
+ free(architecture);
+ return RET_ERROR_OOM;
+ }
+ r = architecture_intern(architecture, &dummy);
+ free(architecture);
+ if (RET_WAS_ERROR(r)) {
+ free(component);
+ return r;
+ }
+ r = component_intern(component, &dummy);
+ free(component);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ /* derference anything left */
+ references_remove(identifier);
+ /* remove the database */
+ database_droppackages(identifier);
+ }
+ free(inuse);
+ strlist_done(&identifiers);
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ r = tracking_listdistributions(&codenames);
+ RET_UPDATE(result, r);
+ if (RET_IS_OK(r)) {
+ for (d = alldistributions; d != NULL ; d = d->next) {
+ strlist_remove(&codenames, d->codename);
+ }
+ for (i = 0 ; i < codenames.count ; i ++) {
+ printf(
+"Deleting tracking data for vanished distribution '%s'.\n",
+ codenames.values[i]);
+ r = tracking_drop(codenames.values[i]);
+ RET_UPDATE(result, r);
+ }
+ strlist_done(&codenames);
+ }
+
+ return result;
+}
+
+ACTION_B(n, n, y, listdbidentifiers) {
+ retvalue result;
+ struct strlist identifiers;
+ const struct distribution *d;
+ int i;
+
+ result = database_listpackages(&identifiers);
+ if (!RET_IS_OK(result)) {
+ return result;
+ }
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = RET_NOTHING;
+ for (i = 0 ; i < identifiers.count ; i++) {
+ const char *p, *q, *identifier = identifiers.values[i];
+
+ if (argc <= 1) {
+ puts(identifier);
+ result = RET_OK;
+ continue;
+ }
+ p = identifier;
+ if (strncmp(p, "u|", 2) == 0)
+ p += 2;
+ q = strchr(p, '|');
+ if (q == NULL)
+ q = strchr(p, '\0');
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ if (strncmp(p, d->codename, q - p) == 0
+ && d->codename[q-p] == '\0') {
+ puts(identifier);
+ result = RET_OK;
+ break;
+ }
+ }
+ }
+ strlist_done(&identifiers);
+ return result;
+}
+
+ACTION_C(n, n, y, listconfidentifiers) {
+ struct target *t;
+ const struct distribution *d;
+ retvalue result;
+
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+
+ for (t = d->targets; t != NULL ; t = t->next) {
+ puts(t->identifier);
+ result = RET_OK;
+ }
+ }
+ return result;
+}
+
+ACTION_N(n, n, y, versioncompare) {
+ retvalue r;
+ int i;
+
+ assert (argc == 3);
+
+ r = properversion(argv[1]);
+ if (RET_WAS_ERROR(r))
+ fprintf(stderr, "'%s' is not a proper version!\n", argv[1]);
+ r = properversion(argv[2]);
+ if (RET_WAS_ERROR(r))
+ fprintf(stderr, "'%s' is not a proper version!\n", argv[2]);
+ r = dpkgversions_cmp(argv[1], argv[2], &i);
+ if (RET_IS_OK(r)) {
+ if (i < 0) {
+ printf("'%s' is smaller than '%s'.\n",
+ argv[1], argv[2]);
+ } else if (i > 0) {
+ printf("'%s' is larger than '%s'.\n",
+ argv[1], argv[2]);
+ } else
+ printf("'%s' is the same as '%s'.\n",
+ argv[1], argv[2]);
+ }
+ return r;
+}
+/***********************processincoming********************************/
+ACTION_D(n, n, y, processincoming) {
+ struct distribution *d;
+
+ for (d = alldistributions ; d != NULL ; d = d->next)
+ d->selected = true;
+
+ return process_incoming(alldistributions, argv[1],
+ (argc==3) ? argv[2] : NULL);
+}
+/***********************gensnapshot********************************/
+ACTION_R(n, n, y, y, gensnapshot) {
+ retvalue result;
+ struct distribution *distribution;
+
+ assert (argc == 3);
+
+ result = distribution_get(alldistributions, argv[1], false, &distribution);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ return distribution_snapshot(distribution, argv[2]);
+}
+
+ACTION_R(n, n, n, y, unreferencesnapshot) {
+ retvalue result;
+ char *id;
+
+ assert (argc == 3);
+
+ id = mprintf("s=%s=%s", argv[1], argv[2]);
+ if (FAILEDTOALLOC(id))
+ return RET_ERROR_OOM;
+
+ result = references_remove(id);
+
+ free(id);
+
+ return result;
+}
+
+/***********************rerunnotifiers********************************/
+static retvalue rerunnotifiersintarget(struct target *target, UNUSED(void *dummy)) {
+ if (!logger_rerun_needs_target(target->distribution->logger, target))
+ return RET_NOTHING;
+ return RET_OK;
+}
+
+ACTION_B(y, n, y, rerunnotifiers) {
+ retvalue result, r;
+ struct distribution *d;
+
+ result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+
+ result = RET_NOTHING;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+
+ if (d->logger == NULL)
+ continue;
+
+ if (verbose > 0) {
+ printf("Processing %s...\n", d->codename);
+ }
+ r = logger_prepare(d->logger);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+
+ r = package_foreach(d,
+ components, architectures, packagetypes,
+ package_rerunnotifiers,
+ rerunnotifiersintarget, NULL);
+ logger_wait();
+
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ return result;
+}
+
+/*********************** flood ****************************/
+
+ACTION_D(y, n, y, flood) {
+ retvalue result, r;
+ struct distribution *distribution;
+ trackingdb tracks;
+ component_t architecture = atom_unknown;
+
+ result = distribution_get(alldistributions, argv[1], true, &distribution);
+ assert (result != RET_NOTHING);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (distribution->readonly) {
+ fprintf(stderr,
+"Cannot add packages to read-only distribution '%s'.\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ if (argc == 3) {
+ architecture = architecture_find(argv[2]);
+ if (!atom_defined(architecture)) {
+ fprintf(stderr, "Error: Unknown architecture '%s'!\n",
+ argv[2]);
+ return RET_ERROR;
+ }
+ if (architecture == architecture_source) {
+ fprintf(stderr,
+"Error: Architecture 'source' does not make sense with 'flood'!\n");
+ return RET_ERROR;
+ }
+ if (!atomlist_in(&distribution->architectures, architecture)) {
+ fprintf(stderr,
+"Error: Architecture '%s' not part of '%s'!\n",
+ argv[2], distribution->codename);
+ return RET_ERROR;
+ }
+ }
+
+ result = distribution_prepareforwriting(distribution);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ if (distribution->tracking != dt_NONE) {
+ result = tracking_initialize(&tracks, distribution, false);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ } else
+ tracks = NULL;
+ result = flood(distribution, components, architectures, packagetypes,
+ architecture, tracks);
+
+ if (RET_WAS_ERROR(result))
+ RET_UPDATE(distribution->status, result);
+
+ if (tracks != NULL) {
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ }
+ return result;
+}
+
+/*********************** unusedsources ****************************/
+ACTION_B(n, n, y, unusedsources) {
+ retvalue r;
+
+ r = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return unusedsources(alldistributions);
+}
+
+/*********************** missingsource ****************************/
+ACTION_B(n, n, y, sourcemissing) {
+ retvalue r;
+
+ r = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return sourcemissing(alldistributions);
+}
+/*********************** reportcruft ****************************/
+ACTION_B(n, n, y, reportcruft) {
+ retvalue r;
+
+ r = distribution_match(alldistributions, argc-1, argv+1,
+ false, READONLY);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return reportcruft(alldistributions);
+}
+
+/*********************/
+/* argument handling */
+/*********************/
+
+// TODO: this has become an utter mess and needs some serious cleaning...
+#define NEED_REFERENCES 1
+/* FILESDB now includes REFERENCED... */
+#define NEED_FILESDB 2
+#define NEED_DEREF 4
+#define NEED_DATABASE 8
+#define NEED_CONFIG 16
+#define NEED_NO_PACKAGES 32
+#define IS_RO 64
+#define MAY_UNUSED 128
+#define NEED_ACT 256
+#define NEED_SP 512
+#define NEED_DELNEW 1024
+#define NEED_RESTRICT 2048
+#define A_N(w) action_n_n_n_ ## w, 0
+#define A_C(w) action_c_n_n_ ## w, NEED_CONFIG
+#define A_ROB(w) action_b_n_n_ ## w, NEED_DATABASE|IS_RO
+#define A_ROBact(w) action_b_y_n_ ## w, NEED_ACT|NEED_DATABASE|IS_RO
+#define A_L(w) action_l_n_n_ ## w, NEED_DATABASE
+#define A_B(w) action_b_n_n_ ## w, NEED_DATABASE
+#define A_Bact(w) action_b_y_n_ ## w, NEED_ACT|NEED_DATABASE
+#define A_F(w) action_f_n_n_ ## w, NEED_DATABASE|NEED_FILESDB
+#define A_Fact(w) action_f_y_n_ ## w, NEED_ACT|NEED_DATABASE|NEED_FILESDB
+#define A_R(w) action_r_n_n_ ## w, NEED_DATABASE|NEED_REFERENCES
+#define A__F(w) action_f_n_n_ ## w, NEED_DATABASE|NEED_FILESDB|NEED_NO_PACKAGES
+#define A__R(w) action_r_n_n_ ## w, NEED_DATABASE|NEED_REFERENCES|NEED_NO_PACKAGES
+#define A__T(w) action_t_n_n_ ## w, NEED_DATABASE|NEED_NO_PACKAGES|MAY_UNUSED
+#define A_RF(w) action_rf_n_n_ ## w, NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES
+#define A_RFact(w) action_rf_y_n_ ## w, NEED_ACT|NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES
+/* to dereference files, one needs files and references database: */
+#define A_D(w) action_d_n_n_ ## w, NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES|NEED_DEREF
+#define A_Dact(w) action_d_y_n_ ## w, NEED_ACT|NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES|NEED_DEREF
+#define A_Dactsp(w) action_d_y_y_ ## w, NEED_ACT|NEED_SP|NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES|NEED_DEREF
+
+static const struct action {
+ const char *name;
+ retvalue (*start)(
+ /*@null@*/struct distribution *,
+ /*@null@*/const char *priority,
+ /*@null@*/const char *section,
+ /*@null@*/const struct atomlist *,
+ /*@null@*/const struct atomlist *,
+ /*@null@*/const struct atomlist *,
+ int argc, const char *argv[]);
+ int needs;
+ int minargs, maxargs;
+ const char *wrongargmessage;
+} all_actions[] = {
+ {"__d", A_N(printargs),
+ -1, -1, NULL},
+ {"__dumpuncompressors", A_N(dumpuncompressors),
+ 0, 0, "__dumpuncompressors"},
+ {"__uncompress", A_N(uncompress),
+ 3, 3, "__uncompress .gz|.bz2|.lzma|.xz|.lz <compressed-filename> <into-filename>"},
+ {"__extractsourcesection", A_N(extractsourcesection),
+ 1, 1, "__extractsourcesection <.dsc-file>"},
+ {"__extractcontrol", A_N(extractcontrol),
+ 1, 1, "__extractcontrol <.deb-file>"},
+ {"__extractfilelist", A_N(extractfilelist),
+ 1, 1, "__extractfilelist <.deb-file>"},
+ {"__checkuploaders", A_C(checkuploaders),
+ 1, -1, "__checkuploaders <codenames>"},
+ {"_versioncompare", A_N(versioncompare),
+ 2, 2, "_versioncompare <version> <version>"},
+ {"_detect", A__F(detect),
+ -1, -1, NULL},
+ {"_forget", A__F(forget),
+ -1, -1, NULL},
+ {"_listmd5sums", A__F(listmd5sums),
+ 0, 0, "_listmd5sums"},
+ {"_listchecksums", A__F(listchecksums),
+ 0, 0, "_listchecksums"},
+ {"_addchecksums", A__F(addmd5sums),
+ 0, 0, "_addchecksums < data"},
+ {"_addmd5sums", A__F(addmd5sums),
+ 0, 0, "_addmd5sums < data"},
+ {"_dumpcontents", A_ROB(dumpcontents)|MAY_UNUSED,
+ 1, 1, "_dumpcontents <identifier>"},
+ {"_removereferences", A__R(removereferences),
+ 1, 1, "_removereferences <identifier>"},
+ {"_removereference", A__R(removereference),
+ 2, 2, "_removereferences <identifier>"},
+ {"_addreference", A__R(addreference),
+ 2, 2, "_addreference <reference> <referee>"},
+ {"_addreferences", A__R(addreferences),
+ 1, -1, "_addreferences <referee> <references>"},
+ {"_fakeemptyfilelist", A__F(fakeemptyfilelist),
+ 1, 1, "_fakeemptyfilelist <filekey>"},
+ {"_addpackage", A_Dact(addpackage),
+ 3, -1, "-C <component> -A <architecture> -T <packagetype> _addpackage <distribution> <filename> <package-names>"},
+ {"remove", A_Dact(remove),
+ 2, -1, "[-C <component>] [-A <architecture>] [-T <type>] remove <codename> <package-names>"},
+ {"removesrc", A_D(removesrc),
+ 2, 3, "removesrc <codename> <source-package-names> [<source-version>]"},
+ {"removesrcs", A_D(removesrcs),
+ 2, -1, "removesrcs <codename> (<source-package-name>[=<source-version>])+"},
+ {"ls", A_ROBact(ls),
+ 1, 1, "[-C <component>] [-A <architecture>] [-T <type>] ls <package-name>"},
+ {"lsbycomponent", A_ROBact(lsbycomponent),
+ 1, 1, "[-C <component>] [-A <architecture>] [-T <type>] lsbycomponent <package-name>"},
+ {"list", A_ROBact(list),
+ 1, 2, "[-C <component>] [-A <architecture>] [-T <type>] list <codename> [<package-name>]"},
+ {"listfilter", A_ROBact(listfilter),
+ 2, 2, "[-C <component>] [-A <architecture>] [-T <type>] listfilter <codename> <term to describe which packages to list>"},
+ {"removefilter", A_Dact(removefilter),
+ 2, 2, "[-C <component>] [-A <architecture>] [-T <type>] removefilter <codename> <term to describe which packages to remove>"},
+ {"listmatched", A_ROBact(listmatched),
+ 2, 2, "[-C <component>] [-A <architecture>] [-T <type>] listmatched <codename> <glob to describe packages>"},
+ {"removematched", A_Dact(removematched),
+ 2, 2, "[-C <component>] [-A <architecture>] [-T <type>] removematched <codename> <glob to describe packages>"},
+ {"createsymlinks", A_C(createsymlinks),
+ 0, -1, "createsymlinks [<distributions>]"},
+ {"export", A_F(export),
+ 0, -1, "export [<distributions>]"},
+ {"check", A_RFact(check),
+ 0, -1, "check [<distributions>]"},
+ {"sizes", A_RF(sizes),
+ 0, -1, "check [<distributions>]"},
+ {"reoverride", A_Fact(reoverride),
+ 0, -1, "[-T ...] [-C ...] [-A ...] reoverride [<distributions>]"},
+ {"repairdescriptions", A_Fact(repairdescriptions),
+ 0, -1, "[-C ...] [-A ...] repairdescriptions [<distributions>]"},
+ {"forcerepairdescriptions", A_Fact(repairdescriptions),
+ 0, -1, "[-C ...] [-A ...] [force]repairdescriptions [<distributions>]"},
+ {"redochecksums", A_Fact(redochecksums),
+ 0, -1, "[-T ...] [-C ...] [-A ...] redo [<distributions>]"},
+ {"collectnewchecksums", A_F(collectnewchecksums),
+ 0, 0, "collectnewchecksums"},
+ {"checkpool", A_F(checkpool),
+ 0, 1, "checkpool [fast]"},
+ {"rereference", A_R(rereference),
+ 0, -1, "rereference [<distributions>]"},
+ {"dumpreferences", A_R(dumpreferences)|MAY_UNUSED,
+ 0, 0, "dumpreferences", },
+ {"dumpunreferenced", A_RF(dumpunreferenced),
+ 0, 0, "dumpunreferenced", },
+ {"deleteifunreferenced", A_RF(deleteifunreferenced),
+ 0, -1, "deleteifunreferenced"},
+ {"deleteunreferenced", A_RF(deleteunreferenced),
+ 0, 0, "deleteunreferenced", },
+ {"retrack", A_D(retrack),
+ 0, -1, "retrack [<distributions>]"},
+ {"dumptracks", A_ROB(dumptracks)|MAY_UNUSED,
+ 0, -1, "dumptracks [<distributions>]"},
+ {"removealltracks", A_D(removealltracks)|MAY_UNUSED,
+ 1, -1, "removealltracks <distributions>"},
+ {"tidytracks", A_D(tidytracks),
+ 0, -1, "tidytracks [<distributions>]"},
+ {"removetrack", A_D(removetrack),
+ 3, 3, "removetrack <distribution> <sourcename> <version>"},
+ {"update", A_Dact(update)|NEED_RESTRICT,
+ 0, -1, "update [<distributions>]"},
+ {"checkupdate", A_Bact(checkupdate)|NEED_RESTRICT,
+ 0, -1, "checkupdate [<distributions>]"},
+ {"dumpupdate", A_Bact(dumpupdate)|NEED_RESTRICT,
+ 0, -1, "dumpupdate [<distributions>]"},
+ {"predelete", A_Dact(predelete),
+ 0, -1, "predelete [<distributions>]"},
+ {"pull", A_Dact(pull)|NEED_RESTRICT,
+ 0, -1, "pull [<distributions>]"},
+ {"copy", A_Dact(copy),
+ 3, -1, "[-C <component> ] [-A <architecture>] [-T <packagetype>] copy <destination-distribution> <source-distribution> <package-names to pull>"},
+ {"copysrc", A_Dact(copysrc),
+ 3, -1, "[-C <component> ] [-A <architecture>] [-T <packagetype>] copysrc <destination-distribution> <source-distribution> <source-package-name> [<source versions>]"},
+ {"copymatched", A_Dact(copymatched),
+ 3, 3, "[-C <component> ] [-A <architecture>] [-T <packagetype>] copymatched <destination-distribution> <source-distribution> <glob>"},
+ {"copyfilter", A_Dact(copyfilter),
+ 3, 3, "[-C <component> ] [-A <architecture>] [-T <packagetype>] copyfilter <destination-distribution> <source-distribution> <formula>"},
+ {"move", A_Dact(move),
+ 3, -1, "[-C <component> ] [-A <architecture>] [-T <packagetype>] move <destination-distribution> <source-distribution> <package-names to move>"},
+ {"movesrc", A_Dact(movesrc),
+ 3, -1, "[-C <component> ] [-A <architecture>] [-T <packagetype>] movesrc <destination-distribution> <source-distribution> <source-package-name> [<source versions>]"},
+ {"movematched", A_Dact(movematched),
+ 3, 3, "[-C <component> ] [-A <architecture>] [-T <packagetype>] movematched <destination-distribution> <source-distribution> <glob>"},
+ {"movefilter", A_Dact(movefilter),
+ 3, 3, "[-C <component> ] [-A <architecture>] [-T <packagetype>] movefilter <destination-distribution> <source-distribution> <formula>"},
+ {"restore", A_Dact(restore),
+ 3, -1, "[-C <component> ] [-A <architecture>] [-T <packagetype>] restore <distribution> <snapshot-name> <package-names to restore>"},
+ {"restoresrc", A_Dact(restoresrc),
+ 3, -1, "[-C <component> ] [-A <architecture>] [-T <packagetype>] restoresrc <distribution> <snapshot-name> <source-package-name> [<source versions>]"},
+ {"restorematched", A_Dact(restorematched),
+ 3, 3, "[-C <component> ] [-A <architecture>] [-T <packagetype>] restorematched <distribution> <snapshot-name> <glob>"},
+ {"restorefilter", A_Dact(restorefilter),
+ 3, 3, "[-C <component> ] [-A <architecture>] [-T <packagetype>] restorefilter <distribution> <snapshot-name> <formula>"},
+ {"dumppull", A_Bact(dumppull)|NEED_RESTRICT,
+ 0, -1, "dumppull [<distributions>]"},
+ {"checkpull", A_Bact(checkpull)|NEED_RESTRICT,
+ 0, -1, "checkpull [<distributions>]"},
+ {"includedeb", A_Dactsp(includedeb)|NEED_DELNEW,
+ 2, -1, "[--delete] includedeb <distribution> <.deb-file>"},
+ {"includeudeb", A_Dactsp(includedeb)|NEED_DELNEW,
+ 2, -1, "[--delete] includeudeb <distribution> <.udeb-file>"},
+ {"includeddeb", A_Dactsp(includedeb)|NEED_DELNEW,
+ 2, -1, "[--delete] includeddeb <distribution> <.ddeb-file>"},
+ {"includedsc", A_Dactsp(includedsc)|NEED_DELNEW,
+ 2, 2, "[--delete] includedsc <distribution> <package>"},
+ {"include", A_Dactsp(include)|NEED_DELNEW,
+ 2, 2, "[--delete] include <distribution> <.changes-file>"},
+ {"generatefilelists", A_F(generatefilelists),
+ 0, 1, "generatefilelists [reread]"},
+ {"translatefilelists", A__T(translatefilelists),
+ 0, 0, "translatefilelists"},
+ {"translatelegacychecksums", A_N(translatelegacychecksums),
+ 0, 0, "translatelegacychecksums"},
+ {"_listconfidentifiers", A_C(listconfidentifiers),
+ 0, -1, "_listconfidentifiers"},
+ {"_listdbidentifiers", A_ROB(listdbidentifiers)|MAY_UNUSED,
+ 0, -1, "_listdbidentifiers"},
+ {"_listcodenames", A_C(listcodenames),
+ 0, 0, "_listcodenames"},
+ {"clearvanished", A_D(clearvanished)|MAY_UNUSED,
+ 0, 0, "[--delete] clearvanished"},
+ {"processincoming", A_D(processincoming)|NEED_DELNEW,
+ 1, 2, "processincoming <rule-name> [<.changes file>]"},
+ {"gensnapshot", A_R(gensnapshot),
+ 2, 2, "gensnapshot <distribution> <date or other name>"},
+ {"unreferencesnapshot", A__R(unreferencesnapshot),
+ 2, 2, "gensnapshot <distribution> <name of snapshot>"},
+ {"rerunnotifiers", A_Bact(rerunnotifiers),
+ 0, -1, "rerunnotifiers [<distributions>]"},
+ {"cleanlists", A_L(cleanlists),
+ 0, 0, "cleanlists"},
+ {"build-needing", A_ROBact(buildneeded),
+ 2, 3, "[-C <component>] build-needing <codename> <architecture> [<glob>]"},
+ {"flood", A_Dact(flood)|MAY_UNUSED,
+ 1, 2, "[-C <component> ] [-A <architecture>] [-T <packagetype>] flood <codename> [<architecture>]"},
+ {"unusedsources", A_B(unusedsources),
+ 0, -1, "unusedsources [<codenames>]"},
+ {"sourcemissing", A_B(sourcemissing),
+ 0, -1, "sourcemissing [<codenames>]"},
+ {"reportcruft", A_B(reportcruft),
+ 0, -1, "reportcruft [<codenames>]"},
+ {NULL, NULL , 0, 0, 0, NULL}
+};
+#undef A_N
+#undef A_B
+#undef A_ROB
+#undef A_C
+#undef A_F
+#undef A_R
+#undef A_RF
+#undef A_F
+#undef A__T
+
+static retvalue callaction(command_t command, const struct action *action, int argc, const char *argv[]) {
+ retvalue result, r;
+ struct distribution *alldistributions = NULL;
+ bool deletederef, deletenew;
+ int needs;
+ struct atomlist as, *architectures = NULL;
+ struct atomlist cs, *components = NULL;
+ struct atomlist ps, *packagetypes = NULL;
+
+ assert(action != NULL);
+
+ causingcommand = command;
+
+ if (action->minargs >= 0 && argc < 1 + action->minargs) {
+ fprintf(stderr,
+"Error: Too few arguments for command '%s'!\nSyntax: reprepro %s\n",
+ argv[0], action->wrongargmessage);
+ return RET_ERROR;
+ }
+ if (action->maxargs >= 0 && argc > 1 + action->maxargs) {
+ fprintf(stderr,
+"Error: Too many arguments for command '%s'!\nSyntax: reprepro %s\n",
+ argv[0], action->wrongargmessage);
+ return RET_ERROR;
+ }
+ needs = action->needs;
+
+ if (!ISSET(needs, NEED_ACT) && (x_architecture != NULL)) {
+ if (!IGNORING(unusedoption,
+"Action '%s' cannot be restricted to an architecture!\n"
+"neither --archiecture nor -A make sense here.\n",
+ action->name))
+ return RET_ERROR;
+ }
+ if (!ISSET(needs, NEED_ACT) && (x_component != NULL)) {
+ if (!IGNORING(unusedoption,
+"Action '%s' cannot be restricted to a component!\n"
+"neither --component nor -C make sense here.\n",
+ action->name))
+ return RET_ERROR;
+ }
+ if (!ISSET(needs, NEED_ACT) && (x_packagetype != NULL)) {
+ if (!IGNORING(unusedoption,
+"Action '%s' cannot be restricted to a packagetype!\n"
+"neither --packagetype nor -T make sense here.\n",
+ action->name))
+ return RET_ERROR;
+ }
+
+ if (!ISSET(needs, NEED_SP) && (x_section != NULL)) {
+ if (!IGNORING(unusedoption,
+"Action '%s' cannot take a section option!\n"
+"neither --section nor -S make sense here.\n",
+ action->name))
+ return RET_ERROR;
+ }
+ if (!ISSET(needs, NEED_SP) && (x_priority != NULL)) {
+ if (!IGNORING(unusedoption,
+"Action '%s' cannot take a priority option!\n"
+"neither --priority nor -P make sense here.\n",
+ action->name))
+ return RET_ERROR;
+ }
+ if (!ISSET(needs, NEED_RESTRICT) && (cmdline_bin_filter.set
+ || cmdline_src_filter.set)) {
+ if (!IGNORING(unusedoption,
+"Action '%s' cannot take a --restrict-* option!\n",
+ action->name))
+ return RET_ERROR;
+ }
+
+ if (ISSET(needs, NEED_DATABASE))
+ needs |= NEED_CONFIG;
+ if (ISSET(needs, NEED_CONFIG)) {
+ r = distribution_readall(&alldistributions);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ if (!ISSET(needs, NEED_DATABASE)) {
+ assert ((needs & ~NEED_CONFIG) == 0);
+
+ result = action->start(alldistributions,
+ x_section, x_priority,
+ NULL, NULL, NULL,
+ argc, argv);
+ logger_wait();
+
+ if (!RET_WAS_ERROR(result)) {
+ r = distribution_exportlist(export, alldistributions);
+ RET_ENDUPDATE(result, r);
+ }
+
+ r = distribution_freelist(alldistributions);
+ RET_ENDUPDATE(result, r);
+ return result;
+ }
+
+ if (ISSET(needs, NEED_ACT)) {
+ const char *unknownitem;
+ if (x_architecture != NULL) {
+ r = atomlist_filllist(at_architecture, &as,
+ x_architecture, &unknownitem);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error: Architecture '%s' as given to --architecture is not know.\n"
+"(it does not appear as architecture in %s/distributions (did you mistype?))\n",
+ unknownitem, global.confdir);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_freelist(alldistributions);
+ return r;
+ }
+ architectures = &as;
+ } else {
+ atomlist_init(&as);
+ }
+ if (x_component != NULL) {
+ r = atomlist_filllist(at_component, &cs,
+ x_component, &unknownitem);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error: Component '%s' as given to --component is not know.\n"
+"(it does not appear as component in %s/distributions (did you mistype?))\n",
+ unknownitem, global.confdir);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_freelist(alldistributions);
+ return r;
+ }
+ components = &cs;
+ } else {
+ atomlist_init(&cs);
+ }
+ if (x_packagetype != NULL) {
+ r = atomlist_filllist(at_packagetype, &ps,
+ x_packagetype, &unknownitem);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Error: Packagetype '%s' as given to --packagetype is not know.\n"
+"(only dsc, deb, udeb, ddeb and combinations of those are allowed)\n",
+ unknownitem);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ (void)distribution_freelist(alldistributions);
+ return r;
+ }
+ packagetypes = &ps;
+ } else {
+ atomlist_init(&ps);
+ }
+ if (ps.count == 1 && ps.atoms[0] == pt_dsc &&
+ limitations_missed(architectures,
+ architecture_source)) {
+ fprintf(stderr,
+"Error: -T dsc is not possible with -A not including source!\n");
+ return RET_ERROR;
+ }
+ if (as.count == 1 && as.atoms[0] == architecture_source &&
+ limitations_missed(packagetypes, pt_dsc)) {
+ fprintf(stderr,
+"Error: -A source is not possible with -T not including dsc!\n");
+ return RET_ERROR;
+ }
+ }
+
+ deletederef = ISSET(needs, NEED_DEREF) && !keepunreferenced;
+ deletenew = ISSET(needs, NEED_DELNEW) && !keepunusednew;
+
+ result = database_create(alldistributions,
+ fast, ISSET(needs, NEED_NO_PACKAGES),
+ ISSET(needs, MAY_UNUSED), ISSET(needs, IS_RO),
+ waitforlock, verbosedatabase || (verbose >= 30));
+ if (!RET_IS_OK(result)) {
+ (void)distribution_freelist(alldistributions);
+ return result;
+ }
+
+ /* adding files may check references to see if they were added */
+ if (ISSET(needs, NEED_FILESDB))
+ needs |= NEED_REFERENCES;
+
+ if (ISSET(needs, NEED_REFERENCES))
+ result = database_openreferences();
+
+ assert (result != RET_NOTHING);
+ if (RET_IS_OK(result)) {
+
+ if (ISSET(needs, NEED_FILESDB))
+ result = database_openfiles();
+
+ if (RET_IS_OK(result)) {
+ if (outhook != NULL) {
+ r = outhook_start();
+ RET_UPDATE(result, r);
+ }
+ }
+
+ assert (result != RET_NOTHING);
+ if (RET_IS_OK(result)) {
+
+ if (deletederef) {
+ assert (ISSET(needs, NEED_REFERENCES));
+ }
+
+ if (!interrupted()) {
+ result = action->start(alldistributions,
+ x_section, x_priority,
+ architectures, components, packagetypes,
+ argc, argv);
+ /* wait for package specific loggers */
+ logger_wait();
+
+ /* remove files added but not used */
+ pool_tidyadded(deletenew);
+
+ /* tell an outhook about added files */
+ if (outhook != NULL)
+ pool_sendnewfiles();
+ /* export changed/lookedat distributions */
+ if (!RET_WAS_ERROR(result)) {
+ r = distribution_exportlist(export,
+ alldistributions);
+ RET_ENDUPDATE(result, r);
+ }
+
+ /* delete files losing references, or
+ * tell how many lost their references */
+
+ // TODO: instead check if any distribution that
+ // was not exported lost files
+ // (and in a far future do not remove references
+ // before the index is written)
+ if (deletederef && RET_WAS_ERROR(result)) {
+ deletederef = false;
+ if (pool_havedereferenced) {
+ fprintf(stderr,
+"Not deleting possibly left over files due to previous errors.\n"
+"(To keep the files in the still existing index files from vanishing)\n"
+"Use dumpunreferenced/deleteunreferenced to show/delete files without references.\n");
+ }
+ }
+ r = pool_removeunreferenced(deletederef);
+ RET_ENDUPDATE(result, r);
+
+ if (outhook != NULL) {
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = outhook_call(outhook);
+ RET_ENDUPDATE(result, r);
+ }
+ }
+ }
+ }
+ if (!interrupted()) {
+ logger_wait();
+ }
+ if (ISSET(needs, NEED_ACT)) {
+ atomlist_done(&as);
+ atomlist_done(&cs);
+ atomlist_done(&ps);
+ }
+ logger_warn_waiting();
+ r = database_close();
+ RET_ENDUPDATE(result, r);
+ r = distribution_freelist(alldistributions);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+enum { LO_DELETE=1,
+LO_KEEPUNREFERENCED,
+LO_KEEPUNUSEDNEW,
+LO_KEEPUNNEEDEDLISTS,
+LO_NOTHINGISERROR,
+LO_NOLISTDOWNLOAD,
+LO_ASKPASSPHRASE,
+LO_ONLYSMALLDELETES,
+LO_KEEPDIRECTORIES,
+LO_KEEPTEMPORARIES,
+LO_FAST,
+LO_SKIPOLD,
+LO_GUESSGPGTTY,
+LO_NODELETE,
+LO_NOKEEPUNREFERENCED,
+LO_NOKEEPUNUSEDNEW,
+LO_NOKEEPUNNEEDEDLISTS,
+LO_NONOTHINGISERROR,
+LO_LISTDOWNLOAD,
+LO_NOASKPASSPHRASE,
+LO_NOONLYSMALLDELETES,
+LO_NOKEEPDIRECTORIES,
+LO_NOKEEPTEMPORARIES,
+LO_NOFAST,
+LO_NOSKIPOLD,
+LO_NOGUESSGPGTTY,
+LO_VERBOSEDB,
+LO_NOVERBOSEDB,
+LO_EXPORT,
+LO_OUTDIR,
+LO_DISTDIR,
+LO_DBDIR,
+LO_LOGDIR,
+LO_LISTDIR,
+LO_OVERRIDEDIR,
+LO_CONFDIR,
+LO_METHODDIR,
+LO_VERSION,
+LO_WAITFORLOCK,
+LO_SPACECHECK,
+LO_SAFETYMARGIN,
+LO_DBSAFETYMARGIN,
+LO_GUNZIP,
+LO_BUNZIP2,
+LO_UNLZMA,
+LO_UNXZ,
+LO_LZIP,
+LO_UNZSTD,
+LO_GNUPGHOME,
+LO_LISTFORMAT,
+LO_LISTSKIP,
+LO_LISTMAX,
+LO_MORGUEDIR,
+LO_SHOWPERCENT,
+LO_RESTRICT_BIN,
+LO_RESTRICT_SRC,
+LO_RESTRICT_FILE_BIN,
+LO_RESTRICT_FILE_SRC,
+LO_ENDHOOK,
+LO_OUTHOOK,
+LO_UNIGNORE};
+static int longoption = 0;
+static const char *programname;
+
+static void setexport(const char *argument) {
+ if (strcasecmp(argument, "silent-never") == 0) {
+ CONFIGSET(export, EXPORT_SILENT_NEVER);
+ return;
+ }
+ if (strcasecmp(argument, "never") == 0) {
+ CONFIGSET(export, EXPORT_NEVER);
+ return;
+ }
+ if (strcasecmp(argument, "changed") == 0) {
+ CONFIGSET(export, EXPORT_CHANGED);
+ return;
+ }
+ if (strcasecmp(argument, "normal") == 0) {
+ CONFIGSET(export, EXPORT_NORMAL);
+ return;
+ }
+ if (strcasecmp(argument, "lookedat") == 0) {
+ CONFIGSET(export, EXPORT_NORMAL);
+ return;
+ }
+ if (strcasecmp(argument, "force") == 0) {
+ CONFIGSET(export, EXPORT_FORCE);
+ return;
+ }
+ fprintf(stderr,
+"Error: --export needs an argument of 'silenv-never', 'never', 'changed', 'lookedat' or 'force', but got '%s'\n",
+ argument);
+ exit(EXIT_FAILURE);
+}
+
+static unsigned long long parse_number(const char *name, const char *argument, long long max) {
+ long long l;
+ char *p;
+
+ l = strtoll(argument, &p, 10);
+ if (p==NULL || *p != '\0' || l < 0) {
+ fprintf(stderr, "Invalid argument to %s: '%s'\n", name, argument);
+ exit(EXIT_FAILURE);
+ }
+ if (l == LLONG_MAX || l > max) {
+ fprintf(stderr, "Too large argument for to %s: '%s'\n", name, argument);
+ exit(EXIT_FAILURE);
+ }
+ return l;
+}
+
+static void handle_option(int c, const char *argument) {
+ retvalue r;
+ int i;
+
+ switch (c) {
+ case 'h':
+ printf(
+"reprepro - Produce and Manage a Debian package repository\n\n"
+"options:\n"
+" -h, --help: Show this help\n"
+" -i --ignore <flag>: Ignore errors of type <flag>.\n"
+" --keepunreferencedfiles: Do not delete files no longer needed.\n"
+" --delete: Delete included files if reasonable.\n"
+" -b, --basedir <dir>: Base directory\n"
+" --outdir <dir>: Set pool and dists base directory\n"
+" --distdir <dir>: Override dists directory.\n"
+" --dbdir <dir>: Directory to place the database in.\n"
+" --listdir <dir>: Directory to place downloaded lists in.\n"
+" --confdir <dir>: Directory to search configuration in.\n"
+" --logdir <dir>: Directory to put requeted log files in.\n"
+" --methodir <dir>: Use instead of /usr/lib/apt/methods/\n"
+" -S, --section <section>: Force include* to set section.\n"
+" -P, --priority <priority>: Force include* to set priority.\n"
+" -C, --component <component>: Add,list or delete only in component.\n"
+" -A, --architecture <architecture>: Add,list or delete only to architecture.\n"
+" -T, --type <type>: Add,list or delete only type (dsc,deb,udeb,ddeb).\n"
+"\n"
+"actions (selection, for more see manpage):\n"
+" dumpreferences: Print all saved references\n"
+" dumpunreferenced: Print registered files without reference\n"
+" deleteunreferenced: Delete and forget all unreferenced files\n"
+" checkpool: Check if all files in the pool are still in proper shape.\n"
+" check [<distributions>]\n"
+" Check for all needed files to be registered properly.\n"
+" export [<distributions>]\n"
+" Force (re)generation of Packages.gz/Packages/Sources.gz/Release\n"
+" update [<distributions>]\n"
+" Update the given distributions from the configured sources.\n"
+" remove <distribution> <packagename>\n"
+" Remove the given package from the specified distribution.\n"
+" include <distribution> <.changes-file>\n"
+" Include the given upload.\n"
+" includedeb <distribution> <.deb-file>\n"
+" Include the given binary package.\n"
+" includeddeb <distribution> <.ddeb-file>\n"
+" Include the given debug binary package.\n"
+" includeudeb <distribution> <.udeb-file>\n"
+" Include the given installer binary package.\n"
+" includedsc <distribution> <.dsc-file>\n"
+" Include the given source package.\n"
+" list <distribution> <package-name>\n"
+" List all packages by the given name occurring in the given distribution.\n"
+" listfilter <distribution> <condition>\n"
+" List all packages in the given distribution matching the condition.\n"
+" clearvanished\n"
+" Remove everything no longer referenced in the distributions config file.\n"
+"\n");
+ exit(EXIT_SUCCESS);
+ case '\0':
+ switch (longoption) {
+ case LO_UNIGNORE:
+ r = set_ignore(argument, false, config_state);
+ if (RET_WAS_ERROR(r)) {
+ exit(EXIT_FAILURE);
+ }
+ break;
+ case LO_SHOWPERCENT:
+ global.showdownloadpercent++;
+ break;
+ case LO_DELETE:
+ delete++;
+ break;
+ case LO_NODELETE:
+ delete--;
+ break;
+ case LO_KEEPUNREFERENCED:
+ CONFIGSET(keepunreferenced, true);
+ break;
+ case LO_NOKEEPUNREFERENCED:
+ CONFIGSET(keepunreferenced, false);
+ break;
+ case LO_KEEPUNUSEDNEW:
+ CONFIGSET(keepunusednew, true);
+ break;
+ case LO_NOKEEPUNUSEDNEW:
+ CONFIGSET(keepunusednew, false);
+ break;
+ case LO_KEEPUNNEEDEDLISTS:
+ /* this is the only option now and ignored
+ * for compatibility reasond */
+ break;
+ case LO_NOKEEPUNNEEDEDLISTS:
+ fprintf(stderr,
+"Warning: --nokeepuneededlists no longer exists.\n"
+"Use cleanlists to clean manually.\n");
+ break;
+ case LO_KEEPTEMPORARIES:
+ CONFIGGSET(keeptemporaries, true);
+ break;
+ case LO_NOKEEPTEMPORARIES:
+ CONFIGGSET(keeptemporaries, false);
+ break;
+ case LO_ONLYSMALLDELETES:
+ CONFIGGSET(onlysmalldeletes, true);
+ break;
+ case LO_NOONLYSMALLDELETES:
+ CONFIGGSET(onlysmalldeletes, false);
+ break;
+ case LO_KEEPDIRECTORIES:
+ CONFIGGSET(keepdirectories, true);
+ break;
+ case LO_NOKEEPDIRECTORIES:
+ CONFIGGSET(keepdirectories, false);
+ break;
+ case LO_NOTHINGISERROR:
+ CONFIGSET(nothingiserror, true);
+ break;
+ case LO_NONOTHINGISERROR:
+ CONFIGSET(nothingiserror, false);
+ break;
+ case LO_NOLISTDOWNLOAD:
+ CONFIGSET(nolistsdownload, true);
+ break;
+ case LO_LISTDOWNLOAD:
+ CONFIGSET(nolistsdownload, false);
+ break;
+ case LO_ASKPASSPHRASE:
+ CONFIGSET(askforpassphrase, true);
+ break;
+ case LO_NOASKPASSPHRASE:
+ CONFIGSET(askforpassphrase, false);
+ break;
+ case LO_GUESSGPGTTY:
+ CONFIGSET(guessgpgtty, true);
+ break;
+ case LO_NOGUESSGPGTTY:
+ CONFIGSET(guessgpgtty, false);
+ break;
+ case LO_SKIPOLD:
+ CONFIGSET(skipold, true);
+ break;
+ case LO_NOSKIPOLD:
+ CONFIGSET(skipold, false);
+ break;
+ case LO_FAST:
+ CONFIGSET(fast, true);
+ break;
+ case LO_NOFAST:
+ CONFIGSET(fast, false);
+ break;
+ case LO_VERBOSEDB:
+ CONFIGSET(verbosedatabase, true);
+ break;
+ case LO_NOVERBOSEDB:
+ CONFIGSET(verbosedatabase, false);
+ break;
+ case LO_EXPORT:
+ setexport(argument);
+ break;
+ case LO_OUTDIR:
+ CONFIGDUP(x_outdir, argument);
+ break;
+ case LO_DISTDIR:
+ CONFIGDUP(x_distdir, argument);
+ break;
+ case LO_DBDIR:
+ CONFIGDUP(x_dbdir, argument);
+ break;
+ case LO_LISTDIR:
+ CONFIGDUP(x_listdir, argument);
+ break;
+ case LO_CONFDIR:
+ CONFIGDUP(x_confdir, argument);
+ break;
+ case LO_LOGDIR:
+ CONFIGDUP(x_logdir, argument);
+ break;
+ case LO_METHODDIR:
+ CONFIGDUP(x_methoddir, argument);
+ break;
+ case LO_MORGUEDIR:
+ CONFIGDUP(x_morguedir, argument);
+ break;
+ case LO_VERSION:
+ fprintf(stderr,
+"%s: This is " PACKAGE " version " VERSION "\n",
+ programname);
+ exit(EXIT_SUCCESS);
+ case LO_WAITFORLOCK:
+ CONFIGSET(waitforlock, parse_number(
+ "--waitforlock",
+ argument, LONG_MAX));
+ break;
+ case LO_SPACECHECK:
+ if (strcasecmp(argument, "none") == 0) {
+ CONFIGSET(spacecheckmode, scm_NONE);
+ } else if (strcasecmp(argument, "full") == 0) {
+ CONFIGSET(spacecheckmode, scm_FULL);
+ } else {
+ fprintf(stderr,
+"Unknown --spacecheck argument: '%s'!\n", argument);
+ exit(EXIT_FAILURE);
+ }
+ break;
+ case LO_SAFETYMARGIN:
+ CONFIGSET(reservedotherspace, parse_number(
+ "--safetymargin",
+ argument, LONG_MAX));
+ break;
+ case LO_DBSAFETYMARGIN:
+ CONFIGSET(reserveddbspace, parse_number(
+ "--dbsafetymargin",
+ argument, LONG_MAX));
+ break;
+ case LO_GUNZIP:
+ CONFIGDUP(gunzip, argument);
+ break;
+ case LO_BUNZIP2:
+ CONFIGDUP(bunzip2, argument);
+ break;
+ case LO_UNLZMA:
+ CONFIGDUP(unlzma, argument);
+ break;
+ case LO_UNXZ:
+ CONFIGDUP(unxz, argument);
+ break;
+ case LO_LZIP:
+ CONFIGDUP(lunzip, argument);
+ break;
+ case LO_UNZSTD:
+ CONFIGDUP(unzstd, argument);
+ break;
+ case LO_GNUPGHOME:
+ CONFIGDUP(gnupghome, argument);
+ break;
+ case LO_ENDHOOK:
+ CONFIGDUP(endhook, argument);
+ break;
+ case LO_OUTHOOK:
+ CONFIGDUP(outhook, argument);
+ break;
+ case LO_LISTMAX:
+ i = parse_number("--list-max",
+ argument, INT_MAX);
+ if (i == 0)
+ i = -1;
+ CONFIGSET(listmax, i);
+ break;
+ case LO_LISTSKIP:
+ i = parse_number("--list-skip",
+ argument, INT_MAX);
+ CONFIGSET(listskip, i);
+ break;
+ case LO_LISTFORMAT:
+ if (strcmp(argument, "NONE") == 0) {
+ CONFIGSET(listformat, NULL);
+ } else
+ CONFIGDUP(listformat, argument);
+ break;
+ case LO_RESTRICT_BIN:
+ r = filterlist_cmdline_add_pkg(false,
+ argument);
+ if (RET_WAS_ERROR(r))
+ exit(EXIT_FAILURE);
+ break;
+ case LO_RESTRICT_SRC:
+ r = filterlist_cmdline_add_pkg(true,
+ argument);
+ if (RET_WAS_ERROR(r))
+ exit(EXIT_FAILURE);
+ break;
+ case LO_RESTRICT_FILE_BIN:
+ r = filterlist_cmdline_add_file(false,
+ argument);
+ if (RET_WAS_ERROR(r))
+ exit(EXIT_FAILURE);
+ break;
+ case LO_RESTRICT_FILE_SRC:
+ r = filterlist_cmdline_add_file(true,
+ argument);
+ if (RET_WAS_ERROR(r))
+ exit(EXIT_FAILURE);
+ break;
+ default:
+ fputs(
+"Error parsing arguments!\n", stderr);
+ exit(EXIT_FAILURE);
+ }
+ longoption = 0;
+ break;
+ case 's':
+ verbose--;
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'V':
+ verbose+=5;
+ break;
+ case 'f':
+ fprintf(stderr,
+"Ignoring no longer existing option -f/--force!\n");
+ break;
+ case 'b':
+ CONFIGDUP(x_basedir, argument);
+ break;
+ case 'i':
+ r = set_ignore(argument, true, config_state);
+ if (RET_WAS_ERROR(r)) {
+ exit(EXIT_FAILURE);
+ }
+ break;
+ case 'C':
+ if (x_component != NULL &&
+ strcmp(x_component, argument) != 0) {
+ fprintf(stderr,
+"Multiple '-%c' are not supported!\n", 'C');
+ exit(EXIT_FAILURE);
+ }
+ CONFIGDUP(x_component, argument);
+ break;
+ case 'A':
+ if (x_architecture != NULL &&
+ strcmp(x_architecture, argument) != 0) {
+ fprintf(stderr,
+"Multiple '-%c' are not supported!\n", 'A');
+ exit(EXIT_FAILURE);
+ }
+ CONFIGDUP(x_architecture, argument);
+ break;
+ case 'T':
+ if (x_packagetype != NULL &&
+ strcmp(x_packagetype, argument) != 0) {
+ fprintf(stderr,
+"Multiple '-%c' are not supported!\n", 'T');
+ exit(EXIT_FAILURE);
+ }
+ CONFIGDUP(x_packagetype, argument);
+ break;
+ case 'S':
+ if (x_section != NULL &&
+ strcmp(x_section, argument) != 0) {
+ fprintf(stderr,
+"Multiple '-%c' are not supported!\n", 'S');
+ exit(EXIT_FAILURE);
+ }
+ CONFIGDUP(x_section, argument);
+ break;
+ case 'P':
+ if (x_priority != NULL &&
+ strcmp(x_priority, argument) != 0) {
+ fprintf(stderr,
+"Multiple '-%c' are not supported!\n", 'P');
+ exit(EXIT_FAILURE);
+ }
+ CONFIGDUP(x_priority, argument);
+ break;
+ case '?':
+ /* getopt_long should have already given an error msg */
+ exit(EXIT_FAILURE);
+ default:
+ fprintf(stderr, "Not supported option '-%c'\n", c);
+ exit(EXIT_FAILURE);
+ }
+}
+
+static volatile bool was_interrupted = false;
+static bool interruption_printed = false;
+
+bool interrupted(void) {
+ if (was_interrupted) {
+ if (!interruption_printed) {
+ interruption_printed = true;
+ fprintf(stderr,
+"\n\nInterruption in progress, interrupt again to force-stop it (and risking database corruption!)\n\n");
+ }
+ return true;
+ } else
+ return false;
+}
+
+static void interrupt_signaled(int) /*__attribute__((signal))*/;
+static void interrupt_signaled(UNUSED(int s)) {
+ was_interrupted = true;
+}
+
+static void myexit(int) __attribute__((__noreturn__));
+static void myexit(int status) {
+ free(x_dbdir);
+ free(x_distdir);
+ free(x_listdir);
+ free(x_logdir);
+ free(x_confdir);
+ free(x_basedir);
+ free(x_outdir);
+ free(x_methoddir);
+ free(x_component);
+ free(x_architecture);
+ free(x_packagetype);
+ free(x_section);
+ free(x_priority);
+ free(x_morguedir);
+ free(gnupghome);
+ free(endhook);
+ free(outhook);
+ pool_free();
+ exit(status);
+}
+
+static void disallow_plus_prefix(const char *dir, const char *name, const char *allowed) {
+ if (dir[0] != '+')
+ return;
+ if (dir[1] == '\0' || dir[2] != '/') {
+ fprintf(stderr,
+"Error: %s starts with +, but does not continue with '+b/'.\n",
+ name);
+ myexit(EXIT_FAILURE);
+ }
+ if (strchr(allowed, dir[1]) != NULL)
+ return;
+ fprintf(stderr, "Error: %s is not allowed to start with '+%c/'.\n"
+"(if your directory is named like that, set it to './+%c/')\n",
+ name, dir[1], dir[1]);
+ myexit(EXIT_FAILURE);
+}
+
+static char *expand_plus_prefix(/*@only@*/char *dir, const char *name, const char *allowed, bool freedir) {
+ const char *fromdir;
+ char *newdir;
+
+ disallow_plus_prefix(dir, name, allowed);
+
+ if (dir[0] == '/' || (dir[0] == '.' && dir[1] == '/'))
+ return dir;
+ if (dir[0] != '+') {
+ fprintf(stderr,
+"Warning: %s '%s' does not start with '/', './', or '+'.\n"
+"This currently means it is relative to the current working directory,\n"
+"but that might change in the future or cause an error instead!\n",
+ name, dir);
+ return dir;
+ }
+ if (dir[1] == 'b') {
+ fromdir = x_basedir;
+ } else if (dir[1] == 'o') {
+ fromdir = x_outdir;
+ } else if (dir[1] == 'c') {
+ fromdir = x_confdir;
+ } else {
+ abort();
+ return dir;
+ }
+ if (dir[3] == '\0')
+ newdir = strdup(fromdir);
+ else
+ newdir = calc_dirconcat(fromdir, dir + 3);
+ if (FAILEDTOALLOC(newdir)) {
+ (void)fputs("Out of Memory!\n", stderr);
+ exit(EXIT_FAILURE);
+ }
+ if (freedir)
+ free(dir);
+ return newdir;
+}
+
+static inline int callendhook(int status, char *argv[]) {
+ char exitcode[4];
+
+ /* Try to close all open fd but 0,1,2 */
+ closefrom(3);
+
+ if (snprintf(exitcode, 4, "%u", ((unsigned int)status)&255U) > 3)
+ memcpy(exitcode, "255", 4);
+ sethookenvironment(causingfile, NULL, NULL, exitcode);
+ fflush(stdout);
+ fflush(stderr);
+ argv[0] = endhook,
+ (void)execv(endhook, argv);
+ fprintf(stderr, "Error executing '%s': %s\n", endhook,
+ strerror(errno));
+ return EXIT_RET(RET_ERROR);
+}
+
+int main(int argc, char *argv[]) {
+ static struct option longopts[] = {
+ {"delete", no_argument, &longoption, LO_DELETE},
+ {"nodelete", no_argument, &longoption, LO_NODELETE},
+ {"basedir", required_argument, NULL, 'b'},
+ {"ignore", required_argument, NULL, 'i'},
+ {"unignore", required_argument, &longoption, LO_UNIGNORE},
+ {"noignore", required_argument, &longoption, LO_UNIGNORE},
+ {"methoddir", required_argument, &longoption, LO_METHODDIR},
+ {"outdir", required_argument, &longoption, LO_OUTDIR},
+ {"distdir", required_argument, &longoption, LO_DISTDIR},
+ {"dbdir", required_argument, &longoption, LO_DBDIR},
+ {"listdir", required_argument, &longoption, LO_LISTDIR},
+ {"confdir", required_argument, &longoption, LO_CONFDIR},
+ {"logdir", required_argument, &longoption, LO_LOGDIR},
+ {"section", required_argument, NULL, 'S'},
+ {"priority", required_argument, NULL, 'P'},
+ {"component", required_argument, NULL, 'C'},
+ {"architecture", required_argument, NULL, 'A'},
+ {"type", required_argument, NULL, 'T'},
+ {"help", no_argument, NULL, 'h'},
+ {"verbose", no_argument, NULL, 'v'},
+ {"silent", no_argument, NULL, 's'},
+ {"version", no_argument, &longoption, LO_VERSION},
+ {"nothingiserror", no_argument, &longoption, LO_NOTHINGISERROR},
+ {"nolistsdownload", no_argument, &longoption, LO_NOLISTDOWNLOAD},
+ {"keepunreferencedfiles", no_argument, &longoption, LO_KEEPUNREFERENCED},
+ {"keepunusednewfiles", no_argument, &longoption, LO_KEEPUNUSEDNEW},
+ {"keepunneededlists", no_argument, &longoption, LO_KEEPUNNEEDEDLISTS},
+ {"onlysmalldeletes", no_argument, &longoption, LO_ONLYSMALLDELETES},
+ {"keepdirectories", no_argument, &longoption, LO_KEEPDIRECTORIES},
+ {"keeptemporaries", no_argument, &longoption, LO_KEEPTEMPORARIES},
+ {"ask-passphrase", no_argument, &longoption, LO_ASKPASSPHRASE},
+ {"nonothingiserror", no_argument, &longoption, LO_NONOTHINGISERROR},
+ {"nonolistsdownload", no_argument, &longoption, LO_LISTDOWNLOAD},
+ {"listsdownload", no_argument, &longoption, LO_LISTDOWNLOAD},
+ {"nokeepunreferencedfiles", no_argument, &longoption, LO_NOKEEPUNREFERENCED},
+ {"nokeepunusednewfiles", no_argument, &longoption, LO_NOKEEPUNUSEDNEW},
+ {"nokeepunneededlists", no_argument, &longoption, LO_NOKEEPUNNEEDEDLISTS},
+ {"noonlysmalldeletes", no_argument, &longoption, LO_NOONLYSMALLDELETES},
+ {"nokeepdirectories", no_argument, &longoption, LO_NOKEEPDIRECTORIES},
+ {"nokeeptemporaries", no_argument, &longoption, LO_NOKEEPTEMPORARIES},
+ {"noask-passphrase", no_argument, &longoption, LO_NOASKPASSPHRASE},
+ {"guessgpgtty", no_argument, &longoption, LO_GUESSGPGTTY},
+ {"noguessgpgtty", no_argument, &longoption, LO_NOGUESSGPGTTY},
+ {"nonoguessgpgtty", no_argument, &longoption, LO_GUESSGPGTTY},
+ {"fast", no_argument, &longoption, LO_FAST},
+ {"nofast", no_argument, &longoption, LO_NOFAST},
+ {"verbosedb", no_argument, &longoption, LO_VERBOSEDB},
+ {"noverbosedb", no_argument, &longoption, LO_NOVERBOSEDB},
+ {"verbosedatabase", no_argument, &longoption, LO_VERBOSEDB},
+ {"noverbosedatabase", no_argument, &longoption, LO_NOVERBOSEDB},
+ {"skipold", no_argument, &longoption, LO_SKIPOLD},
+ {"noskipold", no_argument, &longoption, LO_NOSKIPOLD},
+ {"nonoskipold", no_argument, &longoption, LO_SKIPOLD},
+ {"force", no_argument, NULL, 'f'},
+ {"export", required_argument, &longoption, LO_EXPORT},
+ {"waitforlock", required_argument, &longoption, LO_WAITFORLOCK},
+ {"checkspace", required_argument, &longoption, LO_SPACECHECK},
+ {"spacecheck", required_argument, &longoption, LO_SPACECHECK},
+ {"safetymargin", required_argument, &longoption, LO_SAFETYMARGIN},
+ {"dbsafetymargin", required_argument, &longoption, LO_DBSAFETYMARGIN},
+ {"gunzip", required_argument, &longoption, LO_GUNZIP},
+ {"bunzip2", required_argument, &longoption, LO_BUNZIP2},
+ {"unlzma", required_argument, &longoption, LO_UNLZMA},
+ {"unxz", required_argument, &longoption, LO_UNXZ},
+ {"lunzip", required_argument, &longoption, LO_LZIP},
+ {"unzstd", required_argument, &longoption, LO_UNZSTD},
+ {"gnupghome", required_argument, &longoption, LO_GNUPGHOME},
+ {"list-format", required_argument, &longoption, LO_LISTFORMAT},
+ {"list-skip", required_argument, &longoption, LO_LISTSKIP},
+ {"list-max", required_argument, &longoption, LO_LISTMAX},
+ {"morguedir", required_argument, &longoption, LO_MORGUEDIR},
+ {"show-percent", no_argument, &longoption, LO_SHOWPERCENT},
+ {"restrict", required_argument, &longoption, LO_RESTRICT_SRC},
+ {"restrict-source", required_argument, &longoption, LO_RESTRICT_SRC},
+ {"restrict-src", required_argument, &longoption, LO_RESTRICT_SRC},
+ {"restrict-binary", required_argument, &longoption, LO_RESTRICT_BIN},
+ {"restrict-file", required_argument, &longoption, LO_RESTRICT_FILE_SRC},
+ {"restrict-file-source", required_argument, &longoption, LO_RESTRICT_FILE_SRC},
+ {"restrict-file-src", required_argument, &longoption, LO_RESTRICT_FILE_SRC},
+ {"restrict-file-binary", required_argument, &longoption, LO_RESTRICT_FILE_BIN},
+ {"endhook", required_argument, &longoption, LO_ENDHOOK},
+ {"outhook", required_argument, &longoption, LO_OUTHOOK},
+ {NULL, 0, NULL, 0}
+ };
+ const struct action *a;
+ retvalue r;
+ int c;
+ struct sigaction sa;
+ char *tempconfdir;
+
+ sigemptyset(&sa.sa_mask);
+#if defined(SA_ONESHOT)
+ sa.sa_flags = SA_ONESHOT;
+#elif defined(SA_RESETHAND)
+ sa.sa_flags = SA_RESETHAND;
+#elif !defined(SPLINT)
+# error "missing argument to sigaction!"
+#endif
+ sa.sa_handler = interrupt_signaled;
+ (void)sigaction(SIGTERM, &sa, NULL);
+ (void)sigaction(SIGABRT, &sa, NULL);
+ (void)sigaction(SIGINT, &sa, NULL);
+ (void)sigaction(SIGQUIT, &sa, NULL);
+
+ (void)signal(SIGPIPE, SIG_IGN);
+
+ programname = argv[0];
+
+ config_state = CONFIG_OWNER_DEFAULT;
+ CONFIGDUP(x_basedir, STD_BASE_DIR);
+ CONFIGDUP(x_confdir, "+b/conf");
+ CONFIGDUP(x_methoddir, STD_METHOD_DIR);
+ CONFIGDUP(x_outdir, "+b/");
+ CONFIGDUP(x_distdir, "+o/dists");
+ CONFIGDUP(x_dbdir, "+b/db");
+ CONFIGDUP(x_logdir, "+b/logs");
+ CONFIGDUP(x_listdir, "+b/lists");
+
+ config_state = CONFIG_OWNER_CMDLINE;
+ if (interrupted())
+ exit(EXIT_RET(RET_ERROR_INTERRUPTED));
+
+ while ((c = getopt_long(argc, argv, "+fVvshb:P:i:A:C:S:T:", longopts, NULL)) != -1) {
+ handle_option(c, optarg);
+ }
+ if (optind >= argc) {
+ fputs(
+"No action given. (see --help for available options and actions)\n", stderr);
+ exit(EXIT_FAILURE);
+ }
+ if (interrupted())
+ exit(EXIT_RET(RET_ERROR_INTERRUPTED));
+
+ /* only for this CONFIG_OWNER_ENVIRONMENT is a bit stupid,
+ * but perhaps it gets more... */
+ config_state = CONFIG_OWNER_ENVIRONMENT;
+ if (getenv("REPREPRO_BASE_DIR") != NULL) {
+ CONFIGDUP(x_basedir, getenv("REPREPRO_BASE_DIR"));
+ }
+ if (getenv("REPREPRO_CONFIG_DIR") != NULL) {
+ CONFIGDUP(x_confdir, getenv("REPREPRO_CONFIG_DIR"));
+ }
+
+ disallow_plus_prefix(x_basedir, "basedir", "");
+ tempconfdir = expand_plus_prefix(x_confdir, "confdir", "b", false);
+
+ config_state = CONFIG_OWNER_FILE;
+ optionsfile_parse(tempconfdir, longopts, handle_option);
+ if (tempconfdir != x_confdir)
+ free(tempconfdir);
+
+ disallow_plus_prefix(x_basedir, "basedir", "");
+ disallow_plus_prefix(x_methoddir, "methoddir", "");
+ x_confdir = expand_plus_prefix(x_confdir, "confdir", "b", true);
+ x_outdir = expand_plus_prefix(x_outdir, "outdir", "bc", true);
+ x_logdir = expand_plus_prefix(x_logdir, "logdir", "boc", true);
+ x_dbdir = expand_plus_prefix(x_dbdir, "dbdir", "boc", true);
+ x_distdir = expand_plus_prefix(x_distdir, "distdir", "boc", true);
+ x_listdir = expand_plus_prefix(x_listdir, "listdir", "boc", true);
+ if (x_morguedir != NULL)
+ x_morguedir = expand_plus_prefix(x_morguedir, "morguedir",
+ "boc", true);
+ if (endhook != NULL) {
+ if (endhook[0] == '+' || endhook[0] == '/' ||
+ (endhook[0] == '.' && endhook[1] == '/')) {
+ endhook = expand_plus_prefix(endhook, "endhook", "boc",
+ true);
+ } else {
+ char *h;
+
+ h = calc_dirconcat(x_confdir, endhook);
+ free(endhook);
+ endhook = h;
+ if (endhook == NULL)
+ exit(EXIT_RET(RET_ERROR_OOM));
+ }
+ }
+ if (outhook != NULL) {
+ if (outhook[0] == '+' || outhook[0] == '/' ||
+ (outhook[0] == '.' && outhook[1] == '/')) {
+ outhook = expand_plus_prefix(outhook, "outhook", "boc",
+ true);
+ } else {
+ char *h;
+
+ h = calc_dirconcat(x_confdir, outhook);
+ free(outhook);
+ outhook = h;
+ if (outhook == NULL)
+ exit(EXIT_RET(RET_ERROR_OOM));
+ }
+ }
+
+ if (guessgpgtty && (getenv("GPG_TTY")==NULL) && isatty(0)) {
+ static char terminalname[1024];
+ ssize_t len;
+
+ len = readlink("/proc/self/fd/0", terminalname, 1023);
+ if (len > 0 && len < 1024) {
+ terminalname[len] = '\0';
+ setenv("GPG_TTY", terminalname, 0);
+ } else if (verbose > 10) {
+ fprintf(stderr,
+"Could not readlink /proc/self/fd/0 (error was %s), not setting GPG_TTY.\n",
+ strerror(errno));
+ }
+ }
+
+ if (delete < D_COPY)
+ delete = D_COPY;
+ if (interrupted())
+ exit(EXIT_RET(RET_ERROR_INTERRUPTED));
+ global.basedir = x_basedir;
+ global.dbdir = x_dbdir;
+ global.outdir = x_outdir;
+ global.confdir = x_confdir;
+ global.distdir = x_distdir;
+ global.logdir = x_logdir;
+ global.methoddir = x_methoddir;
+ global.listdir = x_listdir;
+ global.morguedir = x_morguedir;
+
+ if (gunzip != NULL && gunzip[0] == '+')
+ gunzip = expand_plus_prefix(gunzip, "gunzip", "boc", true);
+ if (bunzip2 != NULL && bunzip2[0] == '+')
+ bunzip2 = expand_plus_prefix(bunzip2, "bunzip2", "boc", true);
+ if (unlzma != NULL && unlzma[0] == '+')
+ unlzma = expand_plus_prefix(unlzma, "unlzma", "boc", true);
+ if (unxz != NULL && unxz[0] == '+')
+ unxz = expand_plus_prefix(unxz, "unxz", "boc", true);
+ if (lunzip != NULL && lunzip[0] == '+')
+ lunzip = expand_plus_prefix(lunzip, "lunzip", "boc", true);
+ if (unzstd != NULL && lunzip[0] == '+')
+ lunzip = expand_plus_prefix(unzstd, "unzstd", "boc", true);
+ uncompressions_check(gunzip, bunzip2, unlzma, unxz, lunzip, unzstd);
+ free(gunzip);
+ free(bunzip2);
+ free(unlzma);
+ free(unxz);
+ free(lunzip);
+ free(unzstd);
+
+ a = all_actions;
+ while (a->name != NULL) {
+ a++;
+ }
+ r = atoms_init(a - all_actions);
+ if (r == RET_ERROR_OOM)
+ (void)fputs("Out of Memory!\n", stderr);
+ if (RET_WAS_ERROR(r))
+ exit(EXIT_RET(r));
+ for (a = all_actions; a->name != NULL ; a++) {
+ atoms_commands[1 + (a - all_actions)] = a->name;
+ }
+
+ if (gnupghome != NULL) {
+ gnupghome = expand_plus_prefix(gnupghome,
+ "gnupghome", "boc", true);
+ if (setenv("GNUPGHOME", gnupghome, 1) != 0) {
+ int e = errno;
+
+ fprintf(stderr,
+"Error %d setting GNUPGHOME to '%s': %s\n",
+ e, gnupghome, strerror(e));
+ myexit(EXIT_FAILURE);
+ }
+ }
+
+ a = all_actions;
+ while (a->name != NULL) {
+ if (strcasecmp(a->name, argv[optind]) == 0) {
+ signature_init(askforpassphrase);
+ r = callaction(1 + (a - all_actions), a,
+ argc-optind, (const char**)argv+optind);
+ /* yeah, freeing all this stuff before exiting is
+ * stupid, but it makes valgrind logs easier
+ * readable */
+ signatures_done();
+ free_known_keys();
+ if (RET_WAS_ERROR(r)) {
+ if (r == RET_ERROR_OOM)
+ (void)fputs("Out of Memory!\n", stderr);
+ else if (verbose >= 0)
+ (void)fputs(
+"There have been errors!\n",
+ stderr);
+ }
+ if (endhook != NULL) {
+ assert (optind > 0);
+ /* only returns upon error: */
+ r = callendhook(EXIT_RET(r), argv + optind - 1);
+ }
+ myexit(EXIT_RET(r));
+ } else
+ a++;
+ }
+
+ fprintf(stderr,
+"Unknown action '%s'. (see --help for available options and actions)\n",
+ argv[optind]);
+ signatures_done();
+ myexit(EXIT_FAILURE);
+}
+
+retvalue package_newcontrol_by_cursor(struct package_cursor *cursor, const char *newcontrol, size_t newcontrollen) {
+ return cursor_replace(cursor->target->packages, cursor->cursor,
+ newcontrol, newcontrollen);
+}
diff --git a/md5.c b/md5.c
new file mode 100644
index 0000000..b9039a1
--- /dev/null
+++ b/md5.c
@@ -0,0 +1,251 @@
+/*
+ * This code implements the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest. This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ *
+ * Changed so as no longer to depend on Colin Plumb's `usual.h' header
+ * definitions; now uses stuff from dpkg's config.h.
+ * - Ian Jackson <ijackson@nyx.cs.du.edu>.
+ * Still in the public domain.
+ *
+ * Changed to no longer need things from dpkg,
+ * and made MD5Transfor static...
+ * - Bernhard R. Link <brlink@debian.org>
+ * Still in public domain.
+ */
+
+#include <config.h>
+
+#include <string.h> /* for memcpy() */
+#include <sys/types.h> /* for stupid systems */
+#include <netinet/in.h> /* for ntohl() */
+
+#include "md5.h"
+static void
+MD5Transform(UWORD32 buf[4], UWORD32 const in[16]);
+
+#ifdef WORDS_BIGENDIAN
+static void
+byteSwap(UWORD32 *buf, unsigned words)
+{
+ md5byte *p = (md5byte *)buf;
+
+ do {
+ *buf++ = (UWORD32)((unsigned)p[3] << 8 | p[2]) << 16 |
+ ((unsigned)p[1] << 8 | p[0]);
+ p += 4;
+ } while (--words);
+}
+#else
+/* I'm assuming there is only big and little endian, PDP_ENDIAN users
+ * will have bad luck... */
+#define byteSwap(buf,words)
+#endif
+
+/*
+ * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void
+MD5Init(struct MD5Context *ctx)
+{
+ ctx->buf[0] = 0x67452301U;
+ ctx->buf[1] = 0xefcdab89U;
+ ctx->buf[2] = 0x98badcfeU;
+ ctx->buf[3] = 0x10325476U;
+
+ ctx->bytes[0] = 0;
+ ctx->bytes[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void
+MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned int len)
+{
+ UWORD32 t;
+
+ /* Update byte count */
+
+ t = ctx->bytes[0];
+ if ((ctx->bytes[0] = t + len) < t)
+ ctx->bytes[1]++; /* Carry from low to high */
+
+ t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */
+ if (t > len) {
+ memcpy((md5byte *)ctx->in + 64 - t, buf, len);
+ return;
+ }
+ /* First chunk is an odd size */
+ memcpy((md5byte *)ctx->in + 64 - t, buf, t);
+ byteSwap(ctx->in, 16);
+ MD5Transform(ctx->buf, ctx->in);
+ buf += t;
+ len -= t;
+
+ /* Process data in 64-byte chunks */
+ while (len >= 64) {
+ memcpy(ctx->in, buf, 64);
+ byteSwap(ctx->in, 16);
+ MD5Transform(ctx->buf, ctx->in);
+ buf += 64;
+ len -= 64;
+ }
+
+ /* Handle any remaining bytes of data. */
+ memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void
+MD5Final(md5byte digest[16], struct MD5Context *ctx)
+{
+ int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */
+ md5byte *p = (md5byte *)ctx->in + count;
+
+ /* Set the first char of padding to 0x80. There is always room. */
+ *p++ = 0x80;
+
+ /* Bytes of padding needed to make 56 bytes (-8..55) */
+ count = 56 - 1 - count;
+
+ if (count < 0) { /* Padding forces an extra block */
+ memset(p, 0, count + 8);
+ byteSwap(ctx->in, 16);
+ MD5Transform(ctx->buf, ctx->in);
+ p = (md5byte *)ctx->in;
+ count = 56;
+ }
+ memset(p, 0, count);
+ byteSwap(ctx->in, 14);
+
+ /* Append length in bits and transform */
+ ctx->in[14] = ctx->bytes[0] << 3;
+ ctx->in[15] = ctx->bytes[1] << 3 | ctx->bytes[0] >> 29;
+ MD5Transform(ctx->buf, ctx->in);
+
+ byteSwap(ctx->buf, 4);
+ memcpy(digest, ctx->buf, 16);
+ memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
+}
+
+#ifndef ASM_MD5
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f,w,x,y,z,in,s) \
+ (w += f(x,y,z) + in, w = (w<<s | w>>(32-s)) + x)
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data. MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void
+MD5Transform(UWORD32 buf[4], UWORD32 const in[16])
+{
+ register UWORD32 a, b, c, d;
+
+ a = buf[0];
+ b = buf[1];
+ c = buf[2];
+ d = buf[3];
+
+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+ buf[0] += a;
+ buf[1] += b;
+ buf[2] += c;
+ buf[3] += d;
+}
+
+#endif
diff --git a/md5.h b/md5.h
new file mode 100644
index 0000000..f2fa221
--- /dev/null
+++ b/md5.h
@@ -0,0 +1,41 @@
+/*
+ * This is the header file for the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest. This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ *
+ * Changed so as no longer to depend on Colin Plumb's `usual.h'
+ * header definitions; now uses stuff from dpkg's config.h
+ * - Ian Jackson <ijackson@nyx.cs.du.edu>.
+ * Still in the public domain.
+ */
+
+#ifndef MD5_H
+#define MD5_H
+
+#define MD5_DIGEST_SIZE 16
+
+#define md5byte unsigned char
+#define UWORD32 unsigned int
+
+struct MD5Context {
+ UWORD32 buf[4];
+ UWORD32 bytes[2];
+ UWORD32 in[16];
+};
+
+void MD5Init(/*@out@*/struct MD5Context *context);
+void MD5Update(struct MD5Context *context, md5byte const *buf, unsigned int len);
+void MD5Final(/*@out@*/unsigned char digest[MD5_DIGEST_SIZE], struct MD5Context *context);
+
+#endif /* !MD5_H */
diff --git a/mprintf.c b/mprintf.c
new file mode 100644
index 0000000..581d9ae
--- /dev/null
+++ b/mprintf.c
@@ -0,0 +1,70 @@
+#include <config.h>
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include "mprintf.h"
+
+// TODO: check for asprintf in configure and
+// write a replacement for such situations.
+
+char * mprintf(const char *fmt, ...) {
+ char *p;
+ int r;
+ va_list va;
+
+ va_start(va, fmt);
+ r = vasprintf(&p, fmt, va);
+ va_end(va);
+ /* return NULL both when r is < 0 and when NULL was returned */
+ if (r < 0)
+ return NULL;
+ else
+ return p;
+}
+
+char * vmprintf(const char *fmt, va_list va) {
+ char *p;
+ int r;
+
+ r = vasprintf(&p, fmt, va);
+ /* return NULL both when r is < 0 and when NULL was returned */
+ if (r < 0)
+ return NULL;
+ else
+ return p;
+}
+
+#ifndef HAVE_DPRINTF
+int dprintf(int fd, const char *format, ...){
+ char *buffer;
+ int ret;
+
+ va_list va;
+
+ va_start(va, format);
+ buffer = vmprintf(format, va);
+ va_end(va);
+ if (buffer == NULL)
+ return -1;
+ ret = write(fd, buffer, strlen(buffer));
+ free(buffer);
+ return ret;
+}
+#endif
+
+#ifndef HAVE_STRNDUP
+/* That's not the best possible strndup implementation, but it suffices for what
+ * it is used here */
+char *strndup(const char *str, size_t n) {
+ char *r = malloc(n+1);
+ if (r == NULL)
+ return r;
+ memcpy(r, str, n);
+ r[n] = '\0';
+ return r;
+}
+#endif
diff --git a/mprintf.h b/mprintf.h
new file mode 100644
index 0000000..4d01be0
--- /dev/null
+++ b/mprintf.h
@@ -0,0 +1,12 @@
+#ifndef REPREPRO_MPRINTF
+#define REPREPRO_MPRINTF
+
+#include <stdarg.h>
+
+/* This is just a asprintf-wrapper to be more easily used
+ * and alwasy returns NULL on error */
+
+/*@null@*/char * mprintf(const char *, ...) __attribute__ ((format (printf, 1, 2)));
+/*@null@*/char * vmprintf(const char *, va_list);
+
+#endif
diff --git a/names.c b/names.c
new file mode 100644
index 0000000..1f214de
--- /dev/null
+++ b/names.c
@@ -0,0 +1,150 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+#include <config.h>
+
+#include <assert.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+
+char *calc_addsuffix(const char *str1, const char *str2) {
+ return mprintf("%s.%s", str1, str2);
+}
+
+char *calc_dirconcat(const char *str1, const char *str2) {
+ return mprintf("%s/%s", str1, str2);
+}
+
+char *calc_dirconcat3(const char *str1, const char *str2, const char *str3) {
+ return mprintf("%s/%s/%s", str1, str2, str3);
+}
+
+/* Create a strlist consisting out of calc_dirconcat'ed entries of the old */
+retvalue calc_dirconcats(const char *directory, const struct strlist *basefilenames,
+ struct strlist *files) {
+ retvalue r;
+ int i;
+
+ assert (directory != NULL && basefilenames != NULL && files != NULL);
+
+ r = strlist_init_n(basefilenames->count, files);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = RET_NOTHING;
+ for (i = 0 ; i < basefilenames->count ; i++) {
+ char *file;
+
+ file = calc_dirconcat(directory, basefilenames->values[i]);
+ if (FAILEDTOALLOC(file)) {
+ strlist_done(files);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_add(files, file);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(files);
+ return r;
+ }
+ }
+ return r;
+
+}
+
+retvalue calc_inplacedirconcats(const char *directory, struct strlist *files) {
+ int i;
+
+ assert (directory != NULL && files != NULL );
+ for (i = 0 ; i < files->count ; i++) {
+ char *file;
+
+ file = calc_dirconcat(directory, files->values[i]);
+ if (FAILEDTOALLOC(file))
+ return RET_ERROR_OOM;
+ free(files->values[i]);
+ files->values[i] = file;
+ }
+ return RET_OK;
+}
+
+void names_overversion(const char **version, bool epochsuppressed) {
+ const char *n = *version;
+ bool hadepoch = epochsuppressed;
+
+ if (*n < '0' || *n > '9') {
+ if ((*n < 'a' || *n > 'z') && (*n < 'A' || *n > 'Z'))
+ return;
+ } else
+ n++;
+ while (*n >= '0' && *n <= '9')
+ n++;
+ if (*n == ':') {
+ hadepoch = true;
+ n++;
+ }
+ while ((*n >= '0' && *n <= '9') || (*n >= 'a' && *n <= 'z')
+ || (*n >= 'A' && *n <= 'Z') || *n == '.' || *n == '~'
+ || *n == '-' || *n == '+' || (hadepoch && *n == ':'))
+ n++;
+ *version = n;
+}
+
+char *calc_trackreferee(const char *codename, const char *sourcename, const char *sourceversion) {
+ return mprintf("%s %s %s", codename, sourcename, sourceversion);
+}
+
+char *calc_changes_basename(const char *name, const char *version, const struct strlist *architectures) {
+ size_t name_l, version_l, l;
+ int i;
+ char *n, *p;
+
+ name_l = strlen(name);
+ version_l = strlen(version);
+ l = name_l + version_l + sizeof("__.changes");
+
+ for (i = 0 ; i < architectures->count ; i++) {
+ l += strlen(architectures->values[i]);
+ if (i != 0)
+ l++;
+ }
+ n = malloc(l);
+ if (FAILEDTOALLOC(n))
+ return n;
+ p = n;
+ memcpy(p, name, name_l); p+=name_l;
+ *(p++) = '_';
+ memcpy(p, version, version_l); p+=version_l;
+ *(p++) = '_';
+ for (i = 0 ; i < architectures->count ; i++) {
+ size_t a_l = strlen(architectures->values[i]);
+ if (i != 0)
+ *(p++) = '+';
+ assert ((size_t)((p+a_l)-n) < l);
+ memcpy(p, architectures->values[i], a_l);
+ p += a_l;
+ }
+ assert ((size_t)(p-n) < l-8);
+ memcpy(p, ".changes", 9); p += 9;
+ assert (*(p-1) == '\0');
+ assert ((size_t)(p-n) == l);
+ return n;
+}
diff --git a/names.h b/names.h
new file mode 100644
index 0000000..f03761a
--- /dev/null
+++ b/names.h
@@ -0,0 +1,38 @@
+#ifndef REPREPRO_NAMES_H
+#define REPREPRO_NAMES_H
+
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+
+char *calc_addsuffix(const char *, const char *);
+char *calc_dirconcat(const char *, const char *);
+char *calc_dirconcat3(const char *, const char *, const char *);
+
+char *calc_changes_basename(const char *, const char *, const struct strlist *);
+char *calc_trackreferee(const char *, const char *, const char *);
+#define calc_snapshotbasedir(codename, name) mprintf("%s/%s/snapshots/%s", global.distdir, codename, name)
+
+
+/* Create a strlist consisting out of calc_dirconcat'ed entries of the old */
+retvalue calc_dirconcats(const char *, const struct strlist *, /*@out@*/struct strlist *);
+retvalue calc_inplacedirconcats(const char *, struct strlist *);
+
+/* move over a version number,
+ * if epochsuppresed is true, colons may happen even without epoch there */
+void names_overversion(const char **, bool /*epochsuppressed*/);
+
+/* check for forbidden characters */
+retvalue propersourcename(const char *);
+retvalue properfilenamepart(const char *);
+retvalue properfilename(const char *);
+retvalue properfilenames(const struct strlist *);
+retvalue properpackagename(const char *);
+retvalue properversion(const char *);
+
+static inline bool endswith(const char *name, const char *suffix) {
+ size_t ln = strlen(name), ls = strlen(suffix);
+ return ln > ls && strcmp(name + (ln - ls), suffix) == 0;
+}
+
+#endif
diff --git a/needbuild.c b/needbuild.c
new file mode 100644
index 0000000..fa8801a
--- /dev/null
+++ b/needbuild.c
@@ -0,0 +1,303 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2009,2012,2013,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <limits.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+
+#include "error.h"
+#include "atoms.h"
+#include "strlist.h"
+#include "chunks.h"
+#include "trackingt.h"
+#include "tracking.h"
+#include "globmatch.h"
+#include "package.h"
+#include "needbuild.h"
+
+/* list all source packages in a distribution that needs buildd action
+
+ For each source package check:
+ - if tracking is enabled and there is a .log or .changes file
+ for the given arch -> SKIP
+ - if there is a binary package for the given architecture -> SKIP
+ - if the package's Architecture field excludes this arch -> SKIP
+ - if the package's Binary field only lists existing ones
+ (i.e. architecture all) -> SKIP
+*/
+
+static retvalue tracked_source_needs_build(architecture_t architecture, const char *sourcename, const char *sourceversion, const char *dscfilename, const struct strlist *binary, const struct trackedpackage *tp, bool printarch) {
+ bool found_binary[binary->count];
+ const char *archstring = atoms_architectures[architecture];
+ size_t archstringlen= strlen(archstring);
+ int i;
+
+ memset(found_binary, 0, sizeof(bool)*binary->count);
+ for (i = 0 ; i < tp->filekeys.count ; i++) {
+ enum filetype ft = tp->filetypes[i];
+ const char *fk = tp->filekeys.values[i];
+
+ if (ft == ft_XTRA_DATA)
+ continue;
+ if (ft == ft_ALL_BINARY) {
+ int j;
+
+ if (architecture == architecture_all) {
+ /* found an _all.deb, nothing to do */
+ return RET_NOTHING;
+ }
+
+ /* determine which binary files are arch all
+ packages: */
+ for (j = 0 ; j < binary->count ; j++) {
+ const char *b = binary->values[j];
+ size_t l = strlen(b);
+
+ if (strncmp(fk, b, l) == 0 &&
+ fk[l] == '_')
+ found_binary[j] = true;
+ }
+ continue;
+ }
+ if (ft == ft_ARCH_BINARY) {
+ const char *a = strrchr(fk, '_');
+
+ if (a == NULL)
+ continue;
+ a++;
+ if (strncmp(a, archstring, archstringlen) != 0 ||
+ a[archstringlen] != '.')
+ continue;
+ /* found an .deb with this architecture,
+ so nothing is to be done */
+ return RET_NOTHING;
+ }
+ if (ft == ft_LOG || ft == ft_BUILDINFO || ft == ft_CHANGES) {
+ const char *a = strrchr(fk, '_');
+ const char *e;
+
+ if (a == NULL)
+ continue;
+ a++;
+ while ((e = strchr(a, '+')) != NULL) {
+ if ((size_t)(e-a) != archstringlen) {
+ a = e+1;
+ continue;
+ }
+ if (memcmp(a, archstring, archstringlen) != 0){
+ a = e+1;
+ continue;
+ }
+ /* found something for this architecture */
+ return RET_NOTHING;
+ }
+ e = strchr(a, '.');
+ if (e == NULL)
+ continue;
+ if ((size_t)(e-a) != archstringlen) {
+ a = e+1;
+ continue;
+ }
+ if (memcmp(a, archstring, archstringlen) != 0){
+ a = e+1;
+ continue;
+ }
+ /* found something for this architecture */
+ return RET_NOTHING;
+ }
+ }
+ /* nothing for this architecture was found, check if is has any binary
+ packages that are lacking: */
+ for (i = 0 ; i < binary->count ; i++) {
+ if (!found_binary[i]) {
+ if (printarch)
+ printf("%s %s %s %s\n",
+ sourcename, sourceversion,
+ dscfilename, archstring);
+ else
+ printf("%s %s %s\n",
+ sourcename, sourceversion,
+ dscfilename);
+ return RET_OK;
+ }
+ }
+ /* all things listed in Binary already exists, nothing to do: */
+ return RET_NOTHING;
+}
+
+struct needbuild_data { architecture_t architecture;
+ trackingdb tracks;
+ /*@null@*/ const char *glob;
+ bool printarch;
+};
+
+static retvalue check_source_needs_build(struct package *package, void *data) {
+ struct target *target = package->target;
+ struct needbuild_data *d = data;
+ struct strlist binary, architectures, filekeys;
+ const char *dscfilename = NULL;
+ int i;
+ retvalue r;
+
+ if (d->glob != NULL && !globmatch(package->name, d->glob))
+ return RET_NOTHING;
+
+ r = package_getversion(package);
+ if (!RET_IS_OK(r))
+ return r;
+ r = chunk_getwordlist(package->control, "Architecture", &architectures);
+ if (RET_IS_OK(r)) {
+ bool skip = true;
+ const char *req = atoms_architectures[d->architecture];
+ const char *hyphen, *os;
+ size_t osl;
+
+ hyphen = strchr(req, '-');
+ if (hyphen == NULL) {
+ os = "linux";
+ osl = 5;
+ } else {
+ os = req;
+ osl = hyphen - req;
+ }
+
+ for (i = 0 ; i < architectures.count ; i++) {
+ const char *a = architectures.values[i];
+
+ if (strcmp(a, req) == 0) {
+ skip = false;
+ break;
+ }
+ /* "all" is not part of "any" or "*-any" */
+ if (d->architecture == architecture_all)
+ continue;
+ if (strcmp(a, "any") == 0) {
+ skip = false;
+ break;
+ }
+
+ size_t al = strlen(a);
+
+ if (al < 4 || memcmp(a + al - 4, "-any", 4) != 0)
+ continue;
+
+ if (al == osl + 4 && memcmp(a, os, osl) == 0) {
+ skip = false;
+ break;
+ }
+ }
+ strlist_done(&architectures);
+ if (skip) {
+ return RET_NOTHING;
+ }
+ }
+ r = chunk_getwordlist(package->control, "Binary", &binary);
+ if (!RET_IS_OK(r)) {
+ return r;
+ }
+ r = target->getfilekeys(package->control, &filekeys);
+ if (!RET_IS_OK(r)) {
+ strlist_done(&binary);
+ return r;
+ }
+ for (i = 0 ; i < filekeys.count ; i++) {
+ if (endswith(filekeys.values[i], ".dsc")) {
+ dscfilename = filekeys.values[i];
+ break;
+ }
+ }
+ if (dscfilename == NULL) {
+ fprintf(stderr,
+"Warning: source package '%s' in '%s' without dsc file!\n",
+ package->name, target->identifier);
+ strlist_done(&binary);
+ strlist_done(&filekeys);
+ return RET_NOTHING;
+ }
+
+ if (d->tracks != NULL) {
+ struct trackedpackage *tp;
+
+ r = tracking_get(d->tracks, package->name, package->version, &tp);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&binary);
+ strlist_done(&filekeys);
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ r = tracked_source_needs_build(
+ d->architecture, package->name,
+ package->version, dscfilename,
+ &binary, tp, d->printarch);
+ trackedpackage_free(tp);
+ strlist_done(&binary);
+ strlist_done(&filekeys);
+ return r;
+ }
+ fprintf(stderr,
+"Warning: %s's tracking data of %s (%s) is out of date. Run retrack to repair!\n",
+ target->distribution->codename,
+ package->name, package->version);
+ }
+ // TODO: implement without tracking
+ strlist_done(&binary);
+ strlist_done(&filekeys);
+ return RET_NOTHING;
+}
+
+
+retvalue find_needs_build(struct distribution *distribution, architecture_t architecture, const struct atomlist *onlycomponents, const char *glob, bool printarch) {
+ retvalue result, r;
+ struct needbuild_data d;
+
+ d.architecture = architecture;
+ d.glob = glob;
+ d.printarch = printarch;
+
+ if (distribution->tracking == dt_NONE) {
+ fprintf(stderr,
+"ERROR: '%s' has no source package Tracking enabled and\n"
+"build-needing is currently only implemented for distributions where\n"
+"this is enabled.\n"
+"(i.e. you need to add e.g. Tracking: minimal in conf/distribution\n"
+"and run retrack (and repeat running it after every update and pull.)\n",
+ distribution->codename);
+ return RET_ERROR;
+ }
+
+ if (distribution->tracking != dt_NONE) {
+ r = tracking_initialize(&d.tracks, distribution, true);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING)
+ d.tracks = NULL;
+ } else
+ d.tracks = NULL;
+
+ result = package_foreach_c(distribution,
+ onlycomponents, architecture_source, pt_dsc,
+ check_source_needs_build, &d);
+
+ r = tracking_done(d.tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
diff --git a/needbuild.h b/needbuild.h
new file mode 100644
index 0000000..f8470d9
--- /dev/null
+++ b/needbuild.h
@@ -0,0 +1,16 @@
+#ifndef REPREPRO_NEEDBUILD_H
+#define REPREPRO_NEEDBUILD_H
+
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_DISTRIBUTION_H
+#include "distribution.h"
+#endif
+
+retvalue find_needs_build(struct distribution *, architecture_t, const struct atomlist *, /*@null@*/const char *glob, bool printarch);
+
+#endif
diff --git a/optionsfile.c b/optionsfile.c
new file mode 100644
index 0000000..e9099a7
--- /dev/null
+++ b/optionsfile.c
@@ -0,0 +1,128 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2005,2006 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+
+#include "error.h"
+#include "names.h"
+#include "optionsfile.h"
+
+void optionsfile_parse(const char *directory, const struct option *longopts, void handle_option(int, const char *)) {
+ FILE *f;
+ char *filename;
+ char buffer[1000];
+ int linenr = 0;
+ const struct option *option;
+
+ filename = calc_dirconcat(directory, "options");
+ if (FAILEDTOALLOC(filename)) {
+ (void)fputs("Out of memory!\n", stderr);
+ exit(EXIT_FAILURE);
+ }
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ free(filename);
+ return;
+ }
+ while (fgets(buffer, 999, f) != NULL) {
+ size_t l;
+ char *optionname, *argument;
+
+ linenr++;
+
+ l = strlen(buffer);
+ if (l == 0 || buffer[l-1] != '\n') {
+ fprintf(stderr,
+"%s:%d: Ignoring too long (or incomplete) line.\n",
+ filename, linenr);
+ do {
+ if (fgets(buffer, 999, f) == NULL)
+ break;
+ l = strlen(buffer);
+ } while (l > 0 && buffer[l-1] != '\n');
+ continue;
+ }
+ do{
+ buffer[l-1] = '\0';
+ l--;
+ } while (l > 0 && xisspace(buffer[l-1]));
+
+ if (l == 0)
+ continue;
+
+ optionname = buffer;
+ while (*optionname != '\0' && xisspace(*optionname))
+ optionname++;
+ assert (*optionname != '\0');
+ if (*optionname == '#' || *optionname == ';')
+ continue;
+ argument = optionname;
+ while (*argument != '\0' && !xisspace(*argument))
+ argument++;
+ while (*argument != '\0' && xisspace(*argument)) {
+ *argument = '\0';
+ argument++;
+ }
+ if (*argument == '\0')
+ argument = NULL;
+ option = longopts;
+ while (option->name != NULL && strcmp(option->name, optionname) != 0)
+ option++;
+ if (option->name == NULL) {
+ fprintf(stderr, "%s:%d: unknown option '%s'!\n",
+ filename, linenr, optionname);
+ exit(EXIT_FAILURE);
+ }
+ if (option->has_arg==no_argument && argument != NULL) {
+ fprintf(stderr,
+"%s:%d: option '%s' has an unexpected argument '%s'!\n",
+ filename, linenr, optionname, argument);
+ exit(EXIT_FAILURE);
+ }
+ if (option->has_arg==required_argument && argument == NULL) {
+ fprintf(stderr,
+"%s:%d: option '%s' is missing an argument!\n",
+ filename, linenr, optionname);
+ exit(EXIT_FAILURE);
+ }
+ if (option->flag == NULL)
+ handle_option(option->val, argument);
+ else {
+ *option->flag = option->val;
+ handle_option(0, argument);
+ }
+ }
+ if (ferror(f) != 0) {
+ int e = ferror(f);
+ fprintf(stderr, "%s: error while reading config file: %d=%s\n",
+ filename, e, strerror(e));
+ exit(EXIT_FAILURE);
+ }
+ if (fclose(f) != 0) {
+ int e = errno;
+ fprintf(stderr, "%s: error while reading config file: %d=%s\n",
+ filename, e, strerror(e));
+ exit(EXIT_FAILURE);
+ }
+ free(filename);
+}
diff --git a/optionsfile.h b/optionsfile.h
new file mode 100644
index 0000000..4a60914
--- /dev/null
+++ b/optionsfile.h
@@ -0,0 +1,13 @@
+#ifndef REPREPRO_OPTIONSFILE_H
+#define REPREPRO_OPTIONSFILE_H
+
+#include <getopt.h>
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+
+void optionsfile_parse(const char * /*directory*/, const struct option *, void handle_option(int, const char *));
+
+#endif /*REPREPRO_OPTIONSFILE_H*/
diff --git a/outhook.c b/outhook.c
new file mode 100644
index 0000000..2ca6d1f
--- /dev/null
+++ b/outhook.c
@@ -0,0 +1,196 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "error.h"
+#include "filecntl.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "dirs.h"
+#include "hooks.h"
+#include "outhook.h"
+
+static FILE *outlogfile = NULL;
+static char *outlogfilename = NULL;
+static bool outlognonempty = false;
+
+retvalue outhook_start(void) {
+ retvalue r;
+ int fd;
+ char *template;
+
+ assert (outlogfilename == NULL);
+ assert (outlogfile == NULL);
+
+ r = dirs_create(global.logdir);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ template = mprintf("%s/%010llu-XXXXXX.outlog",
+ global.logdir, (unsigned long long)time(NULL));
+ if (FAILEDTOALLOC(template))
+ return RET_ERROR_OOM;
+ fd = mkstemps(template, 7);
+ if (fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating new file in %s: %s\n",
+ e, global.logdir, strerror(e));
+ free(template);
+ return RET_ERRNO(e);
+ }
+ outlogfile = fdopen(fd, "w");
+ if (outlogfile == NULL) {
+ int e = errno;
+ (void)close(fd);
+ fprintf(stderr, "Error %d from fdopen: %s\n",
+ e, strerror(e));
+ free(template);
+ return RET_ERRNO(e);
+ }
+ outlogfilename = template;
+ return RET_OK;
+}
+
+void outhook_send(const char *command, const char *arg1, const char *arg2, const char *arg3) {
+ assert (command != NULL);
+ assert (arg1 != NULL);
+ assert (arg3 == NULL || arg2 != NULL);
+ if (outlogfile == NULL)
+ return;
+
+ if (arg2 == NULL)
+ fprintf(outlogfile, "%s\t%s\n", command, arg1);
+ else if (arg3 == NULL)
+ fprintf(outlogfile, "%s\t%s\t%s\n", command, arg1, arg2);
+ else
+ fprintf(outlogfile, "%s\t%s\t%s\t%s\n", command,
+ arg1, arg2, arg3);
+ outlognonempty = true;
+}
+
+void outhook_sendpool(component_t component, const char *sourcename, const char *name) {
+ assert (name != NULL);
+ if (outlogfile == NULL)
+ return;
+ if (sourcename == NULL || *sourcename == '\0')
+ fprintf(outlogfile, "POOLNEW\t%s\n", name);
+ else if (sourcename[0] == 'l' && sourcename[1] == 'i' &&
+ sourcename[2] == 'b' && sourcename[3] != '\0')
+ fprintf(outlogfile, "POOLNEW\tpool/%s/lib%c/%s/%s\n",
+ atoms_components[component],
+ sourcename[3], sourcename, name);
+ else
+ fprintf(outlogfile, "POOLNEW\tpool/%s/%c/%s/%s\n",
+ atoms_components[component],
+ sourcename[0], sourcename, name);
+ outlognonempty = true;
+}
+
+static retvalue callouthook(const char *scriptname, const char *logfilename) {
+ pid_t child;
+
+ child = fork();
+ if (child == 0) {
+ /* Try to close all open fd but 0,1,2 */
+ closefrom(3);
+ sethookenvironment(causingfile, NULL, NULL, NULL);
+ (void)execl(scriptname, scriptname, logfilename, (char*)NULL);
+ {
+ int e = errno;
+ fprintf(stderr, "Error %d executing '%s': %s\n",
+ e, scriptname, strerror(e));
+ }
+ _exit(255);
+ }
+ if (child < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d forking: %s!\n", e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ while (true) {
+ int status;
+ pid_t pid;
+
+ pid = waitpid(child, &status, 0);
+ if (pid == child) {
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) == 0) {
+ return RET_OK;
+ }
+ fprintf(stderr,
+"Outhook '%s' '%s' failed with exit code %d!\n",
+ scriptname, logfilename,
+ (int)(WEXITSTATUS(status)));
+ } else if (WIFSIGNALED(status)) {
+ fprintf(stderr,
+"Outhook '%s' '%s' killed by signal %d!\n",
+ scriptname, logfilename,
+ (int)(WTERMSIG(status)));
+ } else {
+ fprintf(stderr,
+"Outhook '%s' '%s' failed!\n",
+ scriptname, logfilename);
+ }
+ return RET_ERROR;
+ } else if (pid == (pid_t)-1) {
+ int e = errno;
+
+ if (e == EINTR)
+ continue;
+ fprintf(stderr,
+"Error %d calling waitpid on outhook child: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ }
+ /* NOT REACHED */
+}
+
+retvalue outhook_call(const char *scriptname) {
+ retvalue result;
+
+ assert (outlogfile != NULL);
+ assert (outlogfilename != NULL);
+
+ if (ferror(outlogfile) != 0) {
+ (void)fclose(outlogfile);
+ fprintf(stderr, "Errors creating '%s'!\n",
+ outlogfilename);
+ result = RET_ERROR;
+ } else if (fclose(outlogfile) != 0) {
+ fprintf(stderr, "Errors creating '%s'!\n",
+ outlogfilename);
+ result = RET_ERROR;
+ } else if (!outlognonempty) {
+ unlink(outlogfilename);
+ result = RET_OK;
+ } else {
+ result = callouthook(scriptname, outlogfilename);
+ }
+ outlogfile = NULL;
+ free(outlogfilename);
+ outlogfilename = NULL;
+ return result;
+}
diff --git a/outhook.h b/outhook.h
new file mode 100644
index 0000000..eb66c15
--- /dev/null
+++ b/outhook.h
@@ -0,0 +1,13 @@
+#ifndef REPREPRO_OUTHOOK_H
+#define REPREPRO_OUTHOOK_H
+
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+
+retvalue outhook_start(void);
+void outhook_send(const char *, const char *, const char *, const char *);
+void outhook_sendpool(component_t, const char *, const char *);
+retvalue outhook_call(const char *);
+
+#endif
diff --git a/override.c b/override.c
new file mode 100644
index 0000000..0a228b7
--- /dev/null
+++ b/override.c
@@ -0,0 +1,410 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2004,2005,2007,2010 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#include <time.h>
+#include <search.h>
+#include "error.h"
+#include "chunks.h"
+#include "sources.h"
+#include "names.h"
+#include "globmatch.h"
+#include "override.h"
+#include "configparser.h"
+
+struct overridedata {
+ struct strlist fields;
+};
+
+struct overridepackage {
+ char *packagename;
+ struct overridedata data;
+};
+
+struct overridepattern {
+ struct overridepattern *next;
+ char *pattern;
+ struct overridedata data;
+};
+
+struct overridefile {
+ /* a <search.h> tree root of struct overridepackage */
+ void *packages;
+ struct overridepattern *patterns;
+};
+
+#ifdef HAVE_TDESTROY
+static void freeoverridepackage(void *n) {
+ struct overridepackage *p = n;
+
+ free(p->packagename);
+ strlist_done(&p->data.fields);
+ free(p);
+}
+#endif
+
+void override_free(struct overridefile *info) {
+ struct overridepattern *i;
+
+ if (info == NULL)
+ return;
+
+#ifdef HAVE_TDESTROY
+ tdestroy(info->packages, freeoverridepackage);
+#endif
+ while ((i = info->patterns) != NULL) {
+ if (i == NULL)
+ return;
+ strlist_done(&i->data.fields);
+ free(i->pattern);
+ info->patterns = i->next;
+ free(i);
+ }
+ free(info);
+}
+
+static bool forbidden_field_name(bool source, const char *field) {
+ if (strcasecmp(field, "Package") == 0)
+ return true;
+ if (strcasecmp(field, "Version") == 0)
+ return true;
+ if (source) {
+ if (strcasecmp(field, "Files") == 0)
+ return true;
+ if (strcasecmp(field, "Directory") == 0)
+ return true;
+ if (strcasecmp(field, "Checksums-Sha256") == 0)
+ return true;
+ if (strcasecmp(field, "Checksums-Sha1") == 0)
+ return true;
+ return false;
+ } else {
+ if (strcasecmp(field, "Filename") == 0)
+ return true;
+ if (strcasecmp(field, "MD5sum") == 0)
+ return true;
+ if (strcasecmp(field, "SHA1") == 0)
+ return true;
+ if (strcasecmp(field, "SHA256") == 0)
+ return true;
+ if (strcasecmp(field, "Size") == 0)
+ return true;
+ return false;
+ }
+}
+
+static retvalue add_override_field(struct overridedata *data, const char *secondpart, const char *thirdpart, bool source) {
+ retvalue r;
+ char *p;
+
+ if (forbidden_field_name(source, secondpart)) {
+ fprintf(stderr,
+"Error: field '%s' not allowed in override files.\n",
+ secondpart);
+ return RET_ERROR;
+ }
+ if (secondpart[0] == '$') {
+ if (strcasecmp(secondpart, "$Delete") == 0) {
+ if (forbidden_field_name(source, thirdpart)) {
+ fprintf(stderr,
+"Error: field '%s' not allowed in override files (not even as to be deleted).\n",
+ thirdpart);
+ return RET_ERROR;
+ }
+ } else if (strcasecmp(secondpart, "$Component") != 0) {
+ fprintf(stderr,
+"Warning: special override field '%s' unknown and will be ignored\n",
+ secondpart);
+ }
+ }
+ p = strdup(secondpart);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ r = strlist_add(&data->fields, p);
+ if (RET_WAS_ERROR(r))
+ return r;
+ p = strdup(thirdpart);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ r = strlist_add(&data->fields, p);
+ return r;
+}
+
+static struct overridepackage *new_package(const char *name) {
+ struct overridepackage *p;
+
+ p = zNEW(struct overridepackage);
+ if (FAILEDTOALLOC(p))
+ return NULL;
+ p->packagename = strdup(name);
+ if (FAILEDTOALLOC(p->packagename)) {
+ free(p);
+ return NULL;
+ }
+ return p;
+}
+
+static int opackage_compare(const void *a, const void *b) {
+ const struct overridepackage *p1 = a, *p2 = b;
+
+ return strcmp(p1->packagename, p2->packagename);
+}
+
+static retvalue add_override(struct overridefile *i, const char *firstpart, const char *secondpart, const char *thirdpart, bool source) {
+ struct overridepackage *pkg, **node;
+ retvalue r;
+ const char *c;
+ struct overridepattern *p, **l;
+
+ c = firstpart;
+ while (*c != '\0' && *c != '*' && *c != '[' && *c != '?')
+ c++;
+ if (*c != '\0') {
+ /* This is a pattern, put into the pattern list */
+ l = &i->patterns;
+ while ((p = *l) != NULL
+ && strcmp(p->pattern, firstpart) != 0) {
+ l = &p->next;
+ }
+ if (p == NULL) {
+ p = zNEW(struct overridepattern);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ p->pattern = strdup(firstpart);
+ if (FAILEDTOALLOC(p->pattern)) {
+ free(p);
+ return RET_ERROR_OOM;
+ }
+ }
+ r = add_override_field(&p->data,
+ secondpart, thirdpart, source);
+ if (RET_WAS_ERROR(r)) {
+ if (*l != p) {
+ free(p->pattern);
+ free(p);
+ }
+ return r;
+ }
+ *l = p;
+ return RET_OK;
+ }
+
+ pkg = new_package(firstpart);
+ if (FAILEDTOALLOC(pkg))
+ return RET_ERROR_OOM;
+ node = tsearch(pkg, &i->packages, opackage_compare);
+ if (FAILEDTOALLOC(node))
+ return RET_ERROR_OOM;
+ if (*node == pkg) {
+ r = strlist_init_n(6, &pkg->data.fields);
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else {
+ free(pkg->packagename);
+ free(pkg);
+ pkg = *node;
+ }
+ return add_override_field(&(*node)->data,
+ secondpart, thirdpart, source);
+}
+
+retvalue override_read(const char *filename, struct overridefile **info, bool source) {
+ struct overridefile *i;
+ FILE *file;
+ char buffer[1001];
+
+ if (filename == NULL) {
+ *info = NULL;
+ return RET_OK;
+ }
+ char *fn = configfile_expandname(filename, NULL);
+ if (FAILEDTOALLOC(fn))
+ return RET_ERROR_OOM;
+ file = fopen(fn, "r");
+ free(fn);
+
+ if (file == NULL) {
+ int e = errno;
+ fprintf(stderr, "Error %d opening override file '%s': %s\n",
+ e, filename, strerror(e));
+ return RET_ERRNO(e);
+ }
+ i = zNEW(struct overridefile);
+ if (FAILEDTOALLOC(i)) {
+ (void)fclose(file);
+ return RET_ERROR_OOM;
+ }
+
+ while (fgets(buffer, 1000, file) != NULL){
+ retvalue r;
+ const char *firstpart, *secondpart, *thirdpart;
+ char *p;
+ size_t l = strlen(buffer);
+
+ if (buffer[l-1] != '\n') {
+ if (l >= 999) {
+ fprintf(stderr,
+"Too long line in '%s'!\n",
+ filename);
+ override_free(i);
+ (void)fclose(file);
+ return RET_ERROR;
+ }
+ fprintf(stderr, "Missing line terminator in '%s'!\n",
+ filename);
+ } else {
+ l--;
+ buffer[l] = '\0';
+ }
+ while (l>0 && xisspace(buffer[l])) {
+ buffer[l] = '\0';
+ l--;
+ }
+ if (l== 0)
+ continue;
+ p = buffer;
+ while (*p !='\0' && xisspace(*p))
+ *(p++)='\0';
+ firstpart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ while (*p !='\0' && xisspace(*p))
+ *(p++)='\0';
+ secondpart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ while (*p !='\0' && xisspace(*p))
+ *(p++)='\0';
+ thirdpart = p;
+ r = add_override(i, firstpart, secondpart, thirdpart, source);
+ if (RET_WAS_ERROR(r)) {
+ override_free(i);
+ (void)fclose(file);
+ return r;
+ }
+ }
+ (void)fclose(file);
+ if (i->packages != NULL || i->patterns != NULL) {
+ *info = i;
+ return RET_OK;
+ } else {
+ override_free(i);
+ *info = NULL;
+ return RET_NOTHING;
+ }
+}
+
+const struct overridedata *override_search(const struct overridefile *overrides, const char *package) {
+ struct overridepackage pkg, **node;
+ struct overridepattern *p;
+
+ if (overrides == NULL)
+ return NULL;
+
+ pkg.packagename = (char*)package;
+ node = tfind(&pkg, &overrides->packages, opackage_compare);
+ if (node != NULL && *node != NULL)
+ return &(*node)->data;
+ for (p = overrides->patterns ; p != NULL ; p = p->next) {
+ if (globmatch(package, p->pattern))
+ return &p->data;
+ }
+ return NULL;
+}
+
+const char *override_get(const struct overridedata *override, const char *field) {
+ int i;
+
+ if (override == NULL)
+ return NULL;
+
+ for (i = 0 ; i+1 < override->fields.count ; i+=2) {
+ // TODO currently case-sensitiv. warn if otherwise?
+ if (strcmp(override->fields.values[i], field) == 0)
+ return override->fields.values[i+1];
+ }
+ return NULL;
+}
+
+/* add new fields to otherreplaces, but not "Section", or "Priority".
+ * incorporates otherreplaces, or frees them on error,
+ * returns otherreplaces when nothing was to do, NULL on RET_ERROR_OOM*/
+struct fieldtoadd *override_addreplacefields(const struct overridedata *override, struct fieldtoadd *otherreplaces) {
+ int i;
+
+ if (override == NULL)
+ return otherreplaces;
+
+ for (i = 0 ; i+1 < override->fields.count ; i+=2) {
+ if (strcmp(override->fields.values[i],
+ SECTION_FIELDNAME) != 0 &&
+ strcmp(override->fields.values[i],
+ PRIORITY_FIELDNAME) != 0 &&
+ override->fields.values[i][0] != '$') {
+ otherreplaces = addfield_new(
+ override->fields.values[i],
+ override->fields.values[i+1],
+ otherreplaces);
+ if (otherreplaces == NULL)
+ return NULL;
+ } else if (strcasecmp(override->fields.values[i],
+ "$delete") == 0) {
+ otherreplaces = deletefield_new(
+ override->fields.values[i+1], otherreplaces);
+ if (otherreplaces == NULL)
+ return NULL;
+ }
+ }
+ return otherreplaces;
+
+}
+
+retvalue override_allreplacefields(const struct overridedata *override, struct fieldtoadd **fields_p) {
+ int i;
+ struct fieldtoadd *fields = NULL;
+
+ assert (override != NULL);
+
+ for (i = 0 ; i+1 < override->fields.count ; i+=2) {
+ if (override->fields.values[i][0] != '$') {
+ fields = addfield_new(
+ override->fields.values[i],
+ override->fields.values[i+1],
+ fields);
+ if (FAILEDTOALLOC(fields))
+ return RET_ERROR_OOM;
+ } else if (strcasecmp(override->fields.values[i],
+ "$delete") == 0) {
+ fields = deletefield_new(
+ override->fields.values[i+1], fields);
+ if (FAILEDTOALLOC(fields))
+ return RET_ERROR_OOM;
+ }
+ }
+ if (fields == NULL)
+ return RET_NOTHING;
+ *fields_p = fields;
+ return RET_OK;
+}
diff --git a/override.h b/override.h
new file mode 100644
index 0000000..f97d359
--- /dev/null
+++ b/override.h
@@ -0,0 +1,35 @@
+#ifndef REPREPRO_OVERRIDE_H
+#define REPREPRO_OVERRIDE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_CHUNKS_H
+#include "chunks.h"
+#endif
+
+struct overridefile;
+struct overridedata;
+
+/* to avoid typos */
+#define PRIORITY_FIELDNAME "Priority"
+#define SECTION_FIELDNAME "Section"
+
+void override_free(/*@only@*//*@null@*/struct overridefile *);
+retvalue override_read(const char *filename, /*@out@*/struct overridefile **, bool /*source*/);
+
+/*@null@*//*@dependent@*/const struct overridedata *override_search(/*@null@*/const struct overridefile *, const char * /*package*/);
+/*@null@*//*@dependent@*/const char *override_get(/*@null@*/const struct overridedata *, const char * /*field*/);
+
+/* add new fields to otherreplaces, but not "Section", or "Priority".
+ * incorporates otherreplaces, or frees them on error */
+/*@null@*/struct fieldtoadd *override_addreplacefields(const struct overridedata *, /*@only@*/struct fieldtoadd *);
+
+/* as above, but all fields. and may return NULL if there are no overrides */
+retvalue override_allreplacefields(const struct overridedata *, /*@out@*/struct fieldtoadd **);
+
+#endif
diff --git a/package.h b/package.h
new file mode 100644
index 0000000..7a46a04
--- /dev/null
+++ b/package.h
@@ -0,0 +1,87 @@
+#ifndef REPREPRO_PACKAGE_H
+#define REPREPRO_PACKAGE_H
+
+#include "atoms.h"
+
+struct package {
+ /*@temp@*/ struct target *target;
+ const char *name;
+ const char *control;
+ size_t controllen;
+ /* for the following NULL means not yet extracted: */
+ const char *version;
+ const char *source;
+ const char *sourceversion;
+ architecture_t architecture;
+
+ /* used to keep the memory that might be needed for the above,
+ * only to be used to free once this struct is abandoned */
+ char *pkgchunk, *pkgname, *pkgversion, *pkgsource, *pkgsrcversion;
+};
+struct distribution;
+struct target;
+struct atomlist;
+struct logger;
+struct trackingdata;
+
+typedef retvalue action_each_target(struct target *, void *);
+typedef retvalue action_each_package(struct package *, void *);
+
+/* call <action> for each package of <distribution> */
+retvalue package_foreach(struct distribution *, /*@null@*/const struct atomlist *, /*@null@*/const struct atomlist *, /*@null@*/const struct atomlist *, action_each_package, /*@null@*/action_each_target, void *);
+/* same but different ways to restrict it */
+retvalue package_foreach_c(struct distribution *, /*@null@*/const struct atomlist *, architecture_t, packagetype_t, action_each_package, void *);
+
+/* delete every package decider returns RET_OK for */
+retvalue package_remove_each(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, action_each_package /*decider*/, struct trackingdata *, void *);
+
+
+retvalue package_get(struct target *, const char * /*name*/, /*@null@*/ const char */*version*/, /*@out@*/ struct package *);
+
+static inline void package_done(struct package *pkg) {
+ free(pkg->pkgname);
+ free(pkg->pkgchunk);
+ free(pkg->pkgversion);
+ free(pkg->pkgsource);
+ free(pkg->pkgsrcversion);
+ memset(pkg, 0, sizeof(*pkg));
+}
+
+retvalue package_getversion(struct package *);
+retvalue package_getsource(struct package *);
+retvalue package_getarchitecture(struct package *);
+
+static inline char *package_dupversion(struct package *package) {
+ assert (package->version != NULL);
+ if (package->pkgversion == NULL)
+ return strdup(package->version);
+ else {
+ // steal version from package
+ // (caller must ensure it is not freed while still needed)
+ char *v = package->pkgversion;
+ package->pkgversion = NULL;
+ return v;
+ }
+}
+
+struct package_cursor {
+ /*@temp@*/struct target *target;
+ struct cursor *cursor;
+ struct package current;
+ bool close_database;
+};
+
+retvalue package_openiterator(struct target *, bool /*readonly*/, bool /*duplicate*/, /*@out@*/struct package_cursor *);
+retvalue package_openduplicateiterator(struct target *t, const char *name, long long, /*@out@*/struct package_cursor *tc);
+bool package_next(struct package_cursor *);
+retvalue package_closeiterator(struct package_cursor *);
+
+retvalue package_remove(struct package *, /*@null@*/struct logger *, /*@null@*/struct trackingdata *);
+retvalue package_remove_by_cursor(struct package_cursor *, /*@null@*/struct logger *, /*@null@*/struct trackingdata *);
+retvalue package_newcontrol_by_cursor(struct package_cursor *, const char *, size_t);
+
+retvalue package_check(struct package *, void *);
+retvalue package_referenceforsnapshot(struct package *, void *);
+retvalue package_rerunnotifiers(struct package *, void *);
+
+#endif
diff --git a/pool.c b/pool.c
new file mode 100644
index 0000000..c651ae0
--- /dev/null
+++ b/pool.c
@@ -0,0 +1,869 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2008,2009,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <search.h>
+#include <unistd.h>
+#include "error.h"
+#include "ignore.h"
+#include "mprintf.h"
+#include "atoms.h"
+#include "strlist.h"
+#include "dirs.h"
+#include "pool.h"
+#include "reference.h"
+#include "files.h"
+#include "sources.h"
+#include "outhook.h"
+
+/* for now save them only in memory. In later times some way to store
+ * them on disk would be nice */
+
+static component_t reserved_components = 0;
+static void **file_changes_per_component = NULL;
+static void *legacy_file_changes = NULL;
+bool pool_havedereferenced = false;
+bool pool_havedeleted = false;
+
+#define pl_ADDED 1
+#define pl_UNREFERENCED 2
+#define pl_DELETED 4
+
+static int legacy_compare(const void *a, const void *b) {
+ const char *v1 = a, *v2 = b;
+ v1++;
+ v2++;
+ return strcmp(v1, v2);
+}
+
+struct source_node {
+ void *file_changes;
+ char sourcename[];
+};
+
+static int source_node_compare(const void *a, const void *b) {
+ const struct source_node *v1 = a, *v2 = b;
+ return strcmp(v1->sourcename, v2->sourcename);
+}
+
+static retvalue split_filekey(const char *filekey, /*@out@*/component_t *component_p, /*@out@*/struct source_node **node_p, /*@out@*/const char **basename_p) {
+ const char *p, *lastp, *source;
+ struct source_node *node;
+ component_t c;
+
+ if (unlikely(memcmp(filekey, "pool/", 5) != 0))
+ return RET_NOTHING;
+ lastp = filekey + 4;
+ filekey = lastp + 1;
+ /* components can include slashes, so look for the first valid component
+ * followed by something looking like a proper directory.
+ * This might missdetect the component, but as it only is used for
+ * the current run it will hopefully always detect the same place
+ * (and all that is important is that it is the same place) */
+ while (true) {
+ p = strchr(lastp + 1, '/');
+ if (unlikely(p == NULL))
+ return RET_NOTHING;
+ lastp = p;
+ c = component_find_l(filekey, (size_t)(p - filekey));
+ if (unlikely(!atom_defined(c)))
+ continue;
+ p++;
+ if (p[0] != '\0' && p[1] == '/' && p[0] != '/' && p[2] == p[0]) {
+ p += 2;
+ if (unlikely(p[0] == 'l' && p[1] == 'i' && p[2] == 'b'))
+ continue;
+ source = p;
+ break;
+ } else if (p[0] == 'l' && p[1] == 'i' && p[2] == 'b' && p[3] != '\0'
+ && p[4] == '/' && p[5] == 'l' && p[6] == 'i'
+ && p[7] == 'b' && p[3] != '/' && p[8] == p[3]) {
+ source = p + 5;
+ break;
+ } else
+ continue;
+ }
+ p = strchr(source, '/');
+ if (unlikely(p == NULL))
+ return RET_NOTHING;
+ node = malloc(sizeof(struct source_node) + (p - source) + 1);
+ if (FAILEDTOALLOC(node))
+ return RET_ERROR_OOM;
+ node->file_changes = NULL;
+ memcpy(node->sourcename, source, p - source);
+ node->sourcename[p - source] = '\0';
+ p++;
+ *basename_p = p;
+ *node_p = node;
+ *component_p = c;
+ return RET_OK;
+}
+
+/* name can be either basename (in a source directory) or a full
+ * filekey (in legacy fallback mode) */
+static retvalue remember_name(void **root_p, const char *name, char mode, char mode_and) {
+ char **p;
+
+ p = tsearch(name - 1, root_p, legacy_compare);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ if (*p == name - 1) {
+ size_t l = strlen(name);
+ *p = malloc(l + 2);
+ if (FAILEDTOALLOC(*p))
+ return RET_ERROR_OOM;
+ **p = mode;
+ memcpy((*p) + 1, name, l + 1);
+ } else {
+ **p &= mode_and;
+ **p |= mode;
+ }
+ return RET_OK;
+}
+
+static retvalue remember_filekey(const char *filekey, char mode, char mode_and) {
+ retvalue r;
+ component_t c;
+ struct source_node *node, **found;
+ const char *basefilename;
+
+ r = split_filekey(filekey, &c, &node, &basefilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_OK) {
+ assert (atom_defined(c));
+ if (c > reserved_components) {
+ void ** h;
+
+ assert (c <= components_count());
+
+ h = realloc(file_changes_per_component,
+ sizeof(void*) * (c + 1));
+ if (FAILEDTOALLOC(h))
+ return RET_ERROR_OOM;
+ file_changes_per_component = h;
+ while (reserved_components < c) {
+ h[++reserved_components] = NULL;
+ }
+ }
+ assert (file_changes_per_component != NULL);
+ found = tsearch(node, &file_changes_per_component[c],
+ source_node_compare);
+ if (FAILEDTOALLOC(found))
+ return RET_ERROR_OOM;
+ if (*found != node) {
+ free(node);
+ node = *found;
+ }
+ return remember_name(&node->file_changes, basefilename,
+ mode, mode_and);
+ }
+ fprintf(stderr, "Warning: strange filekey '%s'!\n", filekey);
+ return remember_name(&legacy_file_changes, filekey, mode, mode_and);
+}
+
+retvalue pool_dereferenced(const char *filekey) {
+ pool_havedereferenced = true;
+ return remember_filekey(filekey, pl_UNREFERENCED, 0xFF);
+};
+
+retvalue pool_markadded(const char *filekey) {
+ return remember_filekey(filekey, pl_ADDED, ~pl_DELETED);
+};
+
+/* so much code, just for the case the morguedir is on another partition than
+ * the pool dir... */
+
+static inline retvalue copyfile(const char *source, const char *destination, int outfd, off_t length) {
+ int infd, err;
+ ssize_t readbytes;
+ void *buffer;
+ size_t bufsize = 1024*1024;
+
+ buffer = malloc(bufsize);
+ if (FAILEDTOALLOC(buffer)) {
+ (void)close(outfd);
+ (void)unlink(destination);
+ bufsize = 16*1024;
+ buffer = malloc(bufsize);
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+ }
+
+ infd = open(source, O_RDONLY|O_NOCTTY);
+ if (infd < 0) {
+ int en = errno;
+
+ fprintf(stderr,
+"error %d opening file %s to be copied into the morgue: %s\n",
+ en, source, strerror(en));
+ free(buffer);
+ (void)close(outfd);
+ (void)unlink(destination);
+ return RET_ERRNO(en);
+ }
+ while ((readbytes = read(infd, buffer, bufsize)) > 0) {
+ const char *start = buffer;
+
+ if ((off_t)readbytes > length) {
+ fprintf(stderr,
+"Mismatch of sizes of '%s': files is larger than expected!\n",
+ destination);
+ break;
+ }
+ while (readbytes > 0) {
+ ssize_t written;
+
+ written = write(outfd, start, readbytes);
+ if (written > 0) {
+ assert (written <= readbytes);
+ readbytes -= written;
+ start += written;
+ } else if (written < 0) {
+ int en = errno;
+
+ (void)close(infd);
+ (void)close(outfd);
+ (void)unlink(destination);
+ free(buffer);
+
+ fprintf(stderr,
+"error %d writing to morgue file %s: %s\n",
+ en, destination, strerror(en));
+ return RET_ERRNO(en);
+ }
+ }
+ }
+ free(buffer);
+ if (readbytes == 0) {
+ err = close(infd);
+ if (err != 0)
+ readbytes = -1;
+ infd = -1;
+ }
+ if (readbytes < 0) {
+ int en = errno;
+ fprintf(stderr,
+"error %d reading file %s to be copied into the morgue: %s\n",
+ en, source, strerror(en));
+ if (infd >= 0)
+ (void)close(infd);
+ (void)close(outfd);
+ (void)unlink(destination);
+ return RET_ERRNO(en);
+ }
+ if (infd >= 0)
+ (void)close(infd);
+ err = close(outfd);
+ if (err != 0) {
+ int en = errno;
+
+ fprintf(stderr, "error %d writing to morgue file %s: %s\n",
+ en, destination, strerror(en));
+ (void)unlink(destination);
+ return RET_ERRNO(en);
+ }
+ return RET_OK;
+}
+
+static inline retvalue morgue_name(const char *filekey, char **name_p, int *fd_p) {
+ const char *name = dirs_basename(filekey);
+ char *firsttry = calc_dirconcat(global.morguedir, name);
+ int fd, en, number;
+ retvalue r;
+
+ if (FAILEDTOALLOC(firsttry))
+ return RET_ERROR_OOM;
+
+ fd = open(firsttry, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666);
+ if (fd >= 0) {
+ assert (fd > 2);
+ *name_p = firsttry;
+ *fd_p = fd;
+ return RET_OK;
+ }
+ en = errno;
+ if (en == ENOENT) {
+ r = dirs_make_recursive(global.morguedir);
+ if (RET_WAS_ERROR(r)) {
+ free(firsttry);
+ return r;
+ }
+ /* try again */
+ fd = open(firsttry, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666);
+ if (fd >= 0) {
+ assert (fd > 2);
+ *name_p = firsttry;
+ *fd_p = fd;
+ return RET_OK;
+ }
+ en = errno;
+ }
+ if (en != EEXIST) {
+ fprintf(stderr, "error %d creating morgue-file %s: %s\n",
+ en, firsttry, strerror(en));
+ free(firsttry);
+ return RET_ERRNO(en);
+ }
+ /* file exists, try names with -number appended: */
+ for (number = 1 ; number < 1000 ; number++) {
+ char *try = mprintf("%s-%d", firsttry, number);
+
+ if (FAILEDTOALLOC(try)) {
+ free(firsttry);
+ return RET_ERROR_OOM;
+ }
+ fd = open(try, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666);
+ if (fd >= 0) {
+ assert (fd > 2);
+ free(firsttry);
+ *name_p = try;
+ *fd_p = fd;
+ return RET_OK;
+ }
+ free(try);
+ }
+ free(firsttry);
+ fprintf(stderr, "Could not create a new file '%s' in morguedir '%s'!\n",
+ name, global.morguedir);
+ return RET_ERROR;
+}
+
+/* if file not there, return RET_NOTHING */
+static inline retvalue movefiletomorgue(const char *filekey, const char *filename, bool new) {
+ char *morguefilename = NULL;
+ int err;
+ retvalue r;
+
+ if (!new && global.morguedir != NULL) {
+ int morguefd = -1;
+ struct stat s;
+
+ r = morgue_name(filekey, &morguefilename, &morguefd);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ err = lstat(filename, &s);
+ if (err != 0) {
+ int en = errno;
+ if (errno == ENOENT) {
+ (void)close(morguefd);
+ (void)unlink(morguefilename);
+ free(morguefilename);
+ return RET_NOTHING;
+ }
+ fprintf(stderr,
+"error %d looking at file %s to be moved into the morgue: %s\n",
+ en, filename, strerror(en));
+ (void)close(morguefd);
+ (void)unlink(morguefilename);
+ free(morguefilename);
+ return RET_ERRNO(en);
+ }
+ if (S_ISLNK(s.st_mode)) {
+ /* no need to copy a symbolic link: */
+ (void)close(morguefd);
+ (void)unlink(morguefilename);
+ free(morguefilename);
+ morguefilename = NULL;
+ } else if (S_ISREG(s.st_mode)) {
+ err = rename(filename, morguefilename);
+ if (err == 0) {
+ (void)close(morguefd);
+ free(morguefilename);
+ return RET_OK;
+ }
+ r = copyfile(filename, morguefilename, morguefd,
+ s.st_size);
+ if (RET_WAS_ERROR(r)) {
+ free(morguefilename);
+ return r;
+ }
+ } else {
+ fprintf(stderr,
+"Strange (non-regular) file '%s' in the pool.\nPlease delete manually!\n",
+ filename);
+ (void)close(morguefd);
+ (void)unlink(morguefilename);
+ free(morguefilename);
+ morguefilename = NULL;
+ return RET_ERROR;
+ }
+ }
+ err = unlink(filename);
+ if (err != 0) {
+ int en = errno;
+ if (errno == ENOENT)
+ return RET_NOTHING;
+ fprintf(stderr, "error %d while unlinking %s: %s\n",
+ en, filename, strerror(en));
+ if (morguefilename != NULL) {
+ (void)unlink(morguefilename);
+ free(morguefilename);
+ }
+ return RET_ERRNO(en);
+ } else {
+ free(morguefilename);
+ return RET_OK;
+ }
+}
+
+/* delete the file and possible parent directories,
+ * if not new and morguedir set, first move/copy there */
+static retvalue deletepoolfile(const char *filekey, bool new) {
+ char *filename;
+ retvalue r;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ if (!new)
+ outhook_send("POOLDELETE", filekey, NULL, NULL);
+ filename = files_calcfullfilename(filekey);
+ if (FAILEDTOALLOC(filename))
+ return RET_ERROR_OOM;
+ /* move to morgue or simply delete: */
+ r = movefiletomorgue(filekey, filename, new);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "%s not found, forgetting anyway\n", filename);
+ }
+ if (!RET_IS_OK(r)) {
+ free(filename);
+ return r;
+ }
+ if (!global.keepdirectories) {
+ /* try to delete parent directories, until one gives
+ * errors (hopefully because it still contains files) */
+ size_t fixedpartlen = strlen(global.outdir);
+ char *p;
+ int err, en;
+
+ while ((p = strrchr(filename, '/')) != NULL) {
+ /* do not try to remove parts of the mirrordir */
+ if ((size_t)(p-filename) <= fixedpartlen+1)
+ break;
+ *p ='\0';
+ /* try to rmdir the directory, this will
+ * fail if there are still other files or directories
+ * in it: */
+ err = rmdir(filename);
+ if (err == 0) {
+ if (verbose > 1) {
+ printf(
+"removed now empty directory %s\n",
+ filename);
+ }
+ } else {
+ en = errno;
+ if (en != ENOTEMPTY) {
+ //TODO: check here if only some
+ //other error was first and it
+ //is not empty so we do not have
+ //to remove it anyway...
+ fprintf(stderr,
+"ignoring error %d trying to rmdir %s: %s\n", en, filename, strerror(en));
+ }
+ /* parent directories will contain this one
+ * thus not be empty, in other words:
+ * everything's done */
+ break;
+ }
+ }
+
+ }
+ free(filename);
+ return RET_OK;
+}
+
+
+retvalue pool_delete(const char *filekey) {
+ retvalue r;
+
+ if (verbose >= 1)
+ printf("deleting and forgetting %s\n", filekey);
+
+ r = deletepoolfile(filekey, false);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ return files_remove(filekey);
+}
+
+/* called from files_remove: */
+retvalue pool_markdeleted(const char *filekey) {
+ pool_havedeleted = true;
+ return remember_filekey(filekey, pl_DELETED, ~pl_UNREFERENCED);
+};
+
+/* libc's twalk misses a callback_data pointer, so we need some temporary
+ * global variables: */
+static retvalue result;
+static bool first, onlycount;
+static long woulddelete_count;
+static component_t current_component;
+static const char *sourcename = NULL;
+
+static void removeifunreferenced(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ char *node; const char *filekey;
+ retvalue r;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ if (interrupted())
+ return;
+
+ node = *(char **)nodep;
+ filekey = node + 1;
+ if ((*node & pl_UNREFERENCED) == 0)
+ return;
+ r = references_isused(filekey);
+ if (r != RET_NOTHING)
+ return;
+
+ if (onlycount) {
+ woulddelete_count++;
+ return;
+ }
+
+ if (verbose >= 0 && first) {
+ printf("Deleting files no longer referenced...\n");
+ first = false;
+ }
+ if (verbose >= 1)
+ printf("deleting and forgetting %s\n", filekey);
+ r = deletepoolfile(filekey, (*node & pl_ADDED) != 0);
+ RET_UPDATE(result, r);
+ if (!RET_WAS_ERROR(r)) {
+ r = files_removesilent(filekey);
+ RET_UPDATE(result, r);
+ if (!RET_WAS_ERROR(r))
+ *node &= ~pl_UNREFERENCED;
+ if (RET_IS_OK(r))
+ *node |= pl_DELETED;
+ }
+}
+
+
+static void removeifunreferenced2(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ char *node;
+ char *filekey;
+ retvalue r;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ if (interrupted())
+ return;
+
+ node = *(char **)nodep;
+ if ((*node & pl_UNREFERENCED) == 0)
+ return;
+ filekey = calc_filekey(current_component, sourcename, node + 1);
+ r = references_isused(filekey);
+ if (r != RET_NOTHING) {
+ free(filekey);
+ return;
+ }
+ if (onlycount) {
+ woulddelete_count++;
+ free(filekey);
+ return;
+ }
+ if (verbose >= 0 && first) {
+ printf("Deleting files no longer referenced...\n");
+ first = false;
+ }
+ if (verbose >= 1)
+ printf("deleting and forgetting %s\n", filekey);
+ r = deletepoolfile(filekey, (*node & pl_ADDED) != 0);
+ RET_UPDATE(result, r);
+ if (!RET_WAS_ERROR(r)) {
+ r = files_removesilent(filekey);
+ RET_UPDATE(result, r);
+ if (!RET_WAS_ERROR(r))
+ *node &= ~pl_UNREFERENCED;
+ if (RET_IS_OK(r))
+ *node |= pl_DELETED;
+ }
+ RET_UPDATE(result, r);
+ free(filekey);
+}
+
+static void removeunreferenced_from_component(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ struct source_node *node;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ if (interrupted())
+ return;
+
+ node = *(struct source_node **)nodep;
+ sourcename = node->sourcename;
+ twalk(node->file_changes, removeifunreferenced2);
+}
+
+retvalue pool_removeunreferenced(bool delete) {
+ component_t c;
+
+ if (!delete && verbose <= 0)
+ return RET_NOTHING;
+
+ result = RET_NOTHING;
+ first = true;
+ onlycount = !delete;
+ woulddelete_count = 0;
+ for (c = 1 ; c <= reserved_components ; c++) {
+ assert (file_changes_per_component != NULL);
+ current_component = c;
+ twalk(file_changes_per_component[c],
+ removeunreferenced_from_component);
+ }
+ twalk(legacy_file_changes, removeifunreferenced);
+ if (interrupted())
+ result = RET_ERROR_INTERRUPTED;
+ if (!delete && woulddelete_count > 0) {
+ printf(
+"%lu files lost their last reference.\n"
+"(dumpunreferenced lists such files, use deleteunreferenced to delete them.)\n",
+ woulddelete_count);
+ }
+ return result;
+}
+
+static void removeunusednew(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ char *node; const char *filekey;
+ retvalue r;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ if (interrupted())
+ return;
+
+ node = *(char **)nodep;
+ filekey = node + 1;
+ /* only look at newly added and not already deleted */
+ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED)
+ return;
+ r = references_isused(filekey);
+ if (r != RET_NOTHING)
+ return;
+
+ if (onlycount) {
+ woulddelete_count++;
+ return;
+ }
+
+ if (verbose >= 0 && first) {
+ printf(
+"Deleting files just added to the pool but not used.\n"
+"(to avoid use --keepunusednewfiles next time)\n");
+ first = false;
+ }
+ if (verbose >= 1)
+ printf("deleting and forgetting %s\n", filekey);
+ r = deletepoolfile(filekey, true);
+ RET_UPDATE(result, r);
+ if (!RET_WAS_ERROR(r)) {
+ r = files_removesilent(filekey);
+ RET_UPDATE(result, r);
+ /* don't remove pl_ADDED here, otherwise the hook
+ * script will be told to remove something not added */
+ if (!RET_WAS_ERROR(r))
+ *node &= ~pl_UNREFERENCED;
+ if (RET_IS_OK(r))
+ *node |= pl_DELETED;
+ }
+}
+
+
+static void removeunusednew2(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ char *node;
+ char *filekey;
+ retvalue r;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ if (interrupted())
+ return;
+
+ node = *(char **)nodep;
+ /* only look at newly added and not already deleted */
+ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED)
+ return;
+ filekey = calc_filekey(current_component, sourcename, node + 1);
+ r = references_isused(filekey);
+ if (r != RET_NOTHING) {
+ free(filekey);
+ return;
+ }
+ if (onlycount) {
+ woulddelete_count++;
+ free(filekey);
+ return;
+ }
+ if (verbose >= 0 && first) {
+ printf(
+"Deleting files just added to the pool but not used.\n"
+"(to avoid use --keepunusednewfiles next time)\n");
+ first = false;
+ }
+ if (verbose >= 1)
+ printf("deleting and forgetting %s\n", filekey);
+ r = deletepoolfile(filekey, true);
+ RET_UPDATE(result, r);
+ if (!RET_WAS_ERROR(r)) {
+ r = files_removesilent(filekey);
+ RET_UPDATE(result, r);
+ /* don't remove pl_ADDED here, otherwise the hook
+ * script will be told to remove something not added */
+ if (!RET_WAS_ERROR(r))
+ *node &= ~pl_UNREFERENCED;
+ if (RET_IS_OK(r))
+ *node |= pl_DELETED;
+ }
+ RET_UPDATE(result, r);
+ free(filekey);
+}
+
+static void removeunusednew_from_component(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ struct source_node *node;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ if (interrupted())
+ return;
+
+ node = *(struct source_node **)nodep;
+ sourcename = node->sourcename;
+ twalk(node->file_changes, removeunusednew2);
+}
+
+void pool_tidyadded(bool delete) {
+ component_t c;
+
+ if (!delete && verbose < 0)
+ return;
+
+ result = RET_NOTHING;
+ first = true;
+ onlycount = !delete;
+ woulddelete_count = 0;
+ for (c = 1 ; c <= reserved_components ; c++) {
+ assert (file_changes_per_component != NULL);
+ current_component = c;
+ twalk(file_changes_per_component[c],
+ removeunusednew_from_component);
+ }
+ // this should not really happen at all, but better safe then sorry:
+ twalk(legacy_file_changes, removeunusednew);
+ if (!delete && woulddelete_count > 0) {
+ printf(
+"%lu files were added but not used.\n"
+"The next deleteunreferenced call will delete them.\n",
+ woulddelete_count);
+ }
+ return;
+
+}
+
+static void reportnewlegacyfiles(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ char *node;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ node = *(char **)nodep;
+ /* only look at newly added and not already deleted */
+ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED)
+ return;
+ outhook_sendpool(atom_unknown, NULL, node + 1);
+}
+
+
+static void reportnewproperfiles(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ char *node;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ node = *(char **)nodep;
+ /* only look at newly added and not already deleted */
+ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED)
+ return;
+ outhook_sendpool(current_component, sourcename, node + 1);
+}
+
+static void reportnewfiles(const void *nodep, const VISIT which, UNUSED(const int depth)) {
+ struct source_node *node;
+
+ if (which != leaf && which != postorder)
+ return;
+
+ node = *(struct source_node **)nodep;
+ sourcename = node->sourcename;
+ twalk(node->file_changes, reportnewproperfiles);
+}
+
+void pool_sendnewfiles(void) {
+ component_t c;
+
+ for (c = 1 ; c <= reserved_components ; c++) {
+ assert (file_changes_per_component != NULL);
+ current_component = c;
+ twalk(file_changes_per_component[c],
+ reportnewfiles);
+ }
+ twalk(legacy_file_changes, reportnewlegacyfiles);
+ return;
+
+}
+
+#ifdef HAVE_TDESTROY
+static void sourcename_free(void *n) {
+ struct source_node *node = n;
+
+ tdestroy(node->file_changes, free);
+ free(node);
+}
+#endif
+
+void pool_free(void) {
+#ifdef HAVE_TDESTROY
+ component_t c;
+
+ for (c = 1 ; c <= reserved_components ; c++) {
+ tdestroy(file_changes_per_component[c], sourcename_free);
+ }
+ reserved_components = 0;
+ free(file_changes_per_component);
+ file_changes_per_component = NULL;
+ tdestroy(legacy_file_changes, free);
+ legacy_file_changes = NULL;
+#endif
+}
diff --git a/pool.h b/pool.h
new file mode 100644
index 0000000..a346c48
--- /dev/null
+++ b/pool.h
@@ -0,0 +1,33 @@
+#ifndef REPREPRO_POOL_H
+#define REPREPRO_POOL_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+
+extern bool pool_havedereferenced;
+
+/* called from references.c to note the file lost a reference */
+retvalue pool_dereferenced(const char *);
+/* called from files.c to note the file was added or forgotten */
+retvalue pool_markadded(const char *);
+retvalue pool_markdeleted(const char *);
+
+/* Remove all files that lost their last reference, or only count them */
+retvalue pool_removeunreferenced(bool /*delete*/);
+
+/* Delete all added files that are not used, or only count them */
+void pool_tidyadded(bool deletenew);
+
+/* delete and forget a single file */
+retvalue pool_delete(const char *);
+
+/* notify outhook of new files */
+void pool_sendnewfiles(void);
+
+/* free all memory, to make valgrind happier */
+void pool_free(void);
+#endif
diff --git a/printlistformat.c b/printlistformat.c
new file mode 100644
index 0000000..b1442bb
--- /dev/null
+++ b/printlistformat.c
@@ -0,0 +1,232 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2009,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+
+#include "error.h"
+#include "atoms.h"
+#include "chunks.h"
+#include "target.h"
+#include "distribution.h"
+#include "dirs.h"
+#include "package.h"
+#include "printlistformat.h"
+
+retvalue listformat_print(const char *listformat, struct package *package) {
+ struct target *target = package->target;
+ retvalue r;
+ const char *p, *q;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: listformat_print(package={name: %s, version: %s, pkgname: %s}) called.\n",
+ package->name, package->version, package->pkgname);
+
+ if (listformat == NULL) {
+
+ r = package_getversion(package);
+ if (RET_IS_OK(r)) {
+ printf( "%s: %s %s\n",
+ target->identifier, package->name,
+ package->version);
+ } else {
+ printf("Could not retrieve version from %s in %s\n",
+ package->name, target->identifier);
+ }
+ return r;
+ }
+ /* try to produce the same output dpkg-query --show produces: */
+ for (p = listformat ; *p != '\0' ; p++) {
+ long length;
+ char *value;
+ const char *v;
+
+ if (*p == '\\') {
+ p++;
+ if (*p == '\0')
+ break;
+ switch (*p) {
+ case 'n':
+ putchar('\n');
+ break;
+ case 't':
+ putchar('\t');
+ break;
+ case 'r':
+ putchar('\r');
+ break;
+ /* extension \0 produces zero byte
+ * (useful for xargs -0) */
+ case '0':
+ putchar('\0');
+ break;
+ default:
+ putchar(*p);
+ }
+ continue;
+ }
+ if (*p != '$' || p[1] != '{') {
+ putchar(*p);
+ continue;
+ }
+ p++;
+ /* substitute veriable */
+ q = p;
+ while (*q != '\0' && *q != '}' && *q != ';')
+ q++;
+ if (*q == '\0' || q == p) {
+ putchar('$');
+ putchar('{');
+ continue;
+ }
+ if (q - p == 12 && strncasecmp(p, "{$identifier", 12) == 0) {
+ value = NULL;
+ v = target->identifier;
+ } else if ( (q - p == 10 && strncasecmp(p, "{$basename", 10) == 0)
+ || (q - p == 14 && strncasecmp(p, "{$fullfilename", 14) == 0)
+ || (q - p == 9 && strncasecmp(p, "{$filekey", 9) == 0)) {
+ struct strlist filekeys;
+ r = target->getfilekeys(package->control, &filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r) && filekeys.count > 0) {
+ if (q - p == 9) { /* filekey */
+ value = filekeys.values[0];
+ filekeys.values[0] = NULL;
+ v = value;
+ } else if (q - p == 10) { /* basename */
+ value = filekeys.values[0];
+ filekeys.values[0] = NULL;
+ v = dirs_basename(value);;
+ } else { /* fullfilename */
+ value = calc_dirconcat(global.basedir,
+ filekeys.values[0]);
+ if (FAILEDTOALLOC(value))
+ return RET_ERROR_OOM;
+ v = value;
+ }
+ strlist_done(&filekeys);
+ } else {
+ value = NULL;
+ v = "";
+ }
+ } else if (q - p == 6 && strncasecmp(p, "{$type", 6) == 0) {
+ value = NULL;
+ v = atoms_packagetypes[target->packagetype];
+ } else if (q - p == 10 &&
+ strncasecmp(p, "{$codename", 10) == 0) {
+ value = NULL;
+ v = target->distribution->codename;
+ } else if (q - p == 14 &&
+ strncasecmp(p, "{$architecture", 14) == 0) {
+ value = NULL;
+ v = atoms_architectures[target->architecture];
+ } else if (q - p == 11 &&
+ strncasecmp(p, "{$component", 11) == 0) {
+ value = NULL;
+ v = atoms_components[target->component];
+ } else if (q - p == 8 && strncasecmp(p, "{$source", 8) == 0) {
+ r = package_getsource(package);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ value = NULL;
+ v = package->source;
+ } else {
+ value = NULL;
+ v = "";
+ }
+ } else if (q - p == 15 && strncasecmp(p, "{$sourceversion", 15) == 0) {
+ r = package_getsource(package);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ value = NULL;
+ v = package->sourceversion;
+ } else {
+ value = NULL;
+ v = "";
+ }
+ } else if (q - p == 8 && strncasecmp(p, "{package", 8) == 0) {
+ value = NULL;
+ v = package->name;
+ } else {
+ char *variable = strndup(p + 1, q - (p + 1));
+ if (FAILEDTOALLOC(variable))
+ return RET_ERROR_OOM;
+ r = chunk_getwholedata(package->control,
+ variable, &value);
+ free(variable);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ v = value;
+ while (*v != '\0' && xisspace(*v))
+ v++;
+ } else {
+ value = NULL;
+ v = "";
+ }
+ }
+ if (*q == ';') {
+ /* dpkg-query allows octal an hexadecimal,
+ * so we do, too */
+ length = strtol(q + 1, (char**)&p, 0);
+ if (*p != '}') {
+ free(value);
+ putchar('$');
+ putchar('{');
+ continue;
+ }
+ } else {
+ p = q;
+ length = 0;
+ }
+ /* as in dpkg-query, length 0 means unlimited */
+ if (length == 0) {
+ fputs(v, stdout);
+ } else {
+ long value_length = strlen(v);
+
+ if (length < 0) {
+ length = -length;
+ while (value_length < length) {
+ putchar(' ');
+ length--;
+ }
+ }
+ if (value_length > length) {
+ fwrite(v, length, 1, stdout);
+ length = 0;
+ } else if (value_length > 0) {
+ fwrite(v, value_length, 1, stdout);
+ length -= value_length;
+ }
+ while (length-- > 0)
+ putchar(' ');
+ }
+ free(value);
+ }
+ return RET_OK;
+}
+
diff --git a/printlistformat.h b/printlistformat.h
new file mode 100644
index 0000000..80a33d1
--- /dev/null
+++ b/printlistformat.h
@@ -0,0 +1,7 @@
+#ifndef REPREPRO_PRINTLISTFORMAT
+#define REPREPRO_PRINTLISTFORMAT
+
+struct package;
+retvalue listformat_print(const char *, struct package *);
+
+#endif
diff --git a/pull.c b/pull.c
new file mode 100644
index 0000000..3c5673d
--- /dev/null
+++ b/pull.c
@@ -0,0 +1,1114 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006,2007,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "error.h"
+#include "ignore.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "pull.h"
+#include "upgradelist.h"
+#include "distribution.h"
+#include "tracking.h"
+#include "termdecide.h"
+#include "filterlist.h"
+#include "log.h"
+#include "configparser.h"
+#include "package.h"
+
+/***************************************************************************
+ * step one: *
+ * parse CONFDIR/pull to get pull information saved in *
+ * pull_rule structs *
+ **************************************************************************/
+
+/* the data for some upstream part to get pull from, some
+ * some fields can be NULL or empty */
+struct pull_rule {
+ struct pull_rule *next;
+ //e.g. "Name: woody"
+ char *name;
+ //e.g. "From: woody"
+ char *from;
+ //e.g. "Architectures: i386 sparc mips" (not set means all)
+ struct atomlist architectures_from;
+ struct atomlist architectures_into;
+ bool architectures_set;
+ //e.g. "Components: main contrib" (not set means all)
+ struct atomlist components;
+ bool components_set;
+ //e.g. "UDebComponents: main" // (not set means all)
+ struct atomlist udebcomponents;
+ bool udebcomponents_set;
+ // We don't have equivalents for ddebs yet since we don't know
+ // what the Debian archive layout is going to look like
+ // NULL means no condition
+ /*@null@*/term *includecondition;
+ struct filterlist filterlist;
+ struct filterlist filtersrclist;
+ /*----only set after _addsourcedistribution----*/
+ /*@NULL@*/ struct distribution *distribution;
+ bool used;
+};
+
+static void pull_rule_free(/*@only@*/struct pull_rule *pull) {
+ if (pull == NULL)
+ return;
+ free(pull->name);
+ free(pull->from);
+ atomlist_done(&pull->architectures_from);
+ atomlist_done(&pull->architectures_into);
+ atomlist_done(&pull->components);
+ atomlist_done(&pull->udebcomponents);
+ term_free(pull->includecondition);
+ filterlist_release(&pull->filterlist);
+ filterlist_release(&pull->filtersrclist);
+ free(pull);
+}
+
+void pull_freerules(struct pull_rule *p) {
+ while (p != NULL) {
+ struct pull_rule *rule;
+
+ rule = p;
+ p = rule->next;
+ pull_rule_free(rule);
+ }
+}
+
+CFlinkedlistinit(pull_rule)
+CFvalueSETPROC(pull_rule, name)
+CFvalueSETPROC(pull_rule, from)
+CFatomlistSETPROC(pull_rule, components, at_component)
+CFatomlistSETPROC(pull_rule, udebcomponents, at_component)
+CFfilterlistSETPROC(pull_rule, filterlist)
+CFfilterlistSETPROC(pull_rule, filtersrclist)
+CFtermSETPROC(pull_rule, includecondition)
+
+CFUSETPROC(pull_rule, architectures) {
+ CFSETPROCVAR(pull_rule, this);
+ retvalue r;
+
+ this->architectures_set = true;
+ r = config_getsplitatoms(iter, "Architectures",
+ at_architecture,
+ &this->architectures_from,
+ &this->architectures_into);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Warning parsing %s, line %u: an empty Architectures field\n"
+"causes the whole rule to do nothing.\n",
+ config_filename(iter),
+ config_markerline(iter));
+ }
+ return r;
+}
+
+static const struct configfield pullconfigfields[] = {
+ CFr("Name", pull_rule, name),
+ CFr("From", pull_rule, from),
+ CF("Architectures", pull_rule, architectures),
+ CF("Components", pull_rule, components),
+ CF("UDebComponents", pull_rule, udebcomponents),
+ CF("FilterFormula", pull_rule, includecondition),
+ CF("FilterSrcList", pull_rule, filtersrclist),
+ CF("FilterList", pull_rule, filterlist)
+};
+
+retvalue pull_getrules(struct pull_rule **rules) {
+ struct pull_rule *pull = NULL;
+ retvalue r;
+
+ r = configfile_parse("pulls", IGNORABLE(unknownfield),
+ configparser_pull_rule_init, linkedlistfinish,
+ "pull rule",
+ pullconfigfields, ARRAYCOUNT(pullconfigfields), &pull);
+ if (RET_IS_OK(r))
+ *rules = pull;
+ else if (r == RET_NOTHING) {
+ assert (pull == NULL);
+ *rules = NULL;
+ r = RET_OK;
+ } else {
+ // TODO special handle unknownfield
+ pull_freerules(pull);
+ }
+ return r;
+}
+
+/***************************************************************************
+ * step two: *
+ * create pull_distribution structs to hold all additional information for *
+ * a distribution *
+ **************************************************************************/
+
+struct pull_target;
+static void pull_freetargets(struct pull_target *targets);
+
+struct pull_distribution {
+ struct pull_distribution *next;
+ /*@dependant@*/struct distribution *distribution;
+ struct pull_target *targets;
+ /*@dependant@*/struct pull_rule *rules[];
+};
+
+void pull_freedistributions(struct pull_distribution *d) {
+ while (d != NULL) {
+ struct pull_distribution *next;
+
+ next = d->next;
+ pull_freetargets(d->targets);
+ free(d);
+ d = next;
+ }
+}
+
+static retvalue pull_initdistribution(struct pull_distribution **pp,
+ struct distribution *distribution,
+ struct pull_rule *rules) {
+ struct pull_distribution *p;
+ int i;
+
+ assert(distribution != NULL);
+ if (distribution->pulls.count == 0)
+ return RET_NOTHING;
+
+ p = malloc(sizeof(struct pull_distribution)+
+ sizeof(struct pull_rules *)*distribution->pulls.count);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ p->next = NULL;
+ p->distribution = distribution;
+ p->targets = NULL;
+ for (i = 0 ; i < distribution->pulls.count ; i++) {
+ const char *name = distribution->pulls.values[i];
+ if (strcmp(name, "-") == 0) {
+ p->rules[i] = NULL;
+ } else {
+ struct pull_rule *rule = rules;
+ while (rule && strcmp(rule->name, name) != 0)
+ rule = rule->next;
+ if (rule == NULL) {
+ fprintf(stderr,
+"Error: Unknown pull rule '%s' in distribution '%s'!\n",
+ name, distribution->codename);
+ free(p);
+ return RET_ERROR_MISSING;
+ }
+ p->rules[i] = rule;
+ rule->used = true;
+ }
+ }
+ *pp = p;
+ return RET_OK;
+}
+
+static retvalue pull_init(struct pull_distribution **pulls,
+ struct pull_rule *rules,
+ struct distribution *distributions) {
+ struct pull_distribution *p = NULL, **pp = &p;
+ struct distribution *d;
+ retvalue r;
+
+ for (d = distributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ r = pull_initdistribution(pp, d, rules);
+ if (RET_WAS_ERROR(r)) {
+ pull_freedistributions(p);
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ assert (*pp != NULL);
+ pp = &(*pp)->next;
+ }
+ }
+ *pulls = p;
+ return RET_OK;
+}
+
+/***************************************************************************
+ * step three: *
+ * load the config of the distributions mentioned in the rules *
+ **************************************************************************/
+
+static retvalue pull_loadsourcedistributions(struct distribution *alldistributions, struct pull_rule *rules) {
+ struct pull_rule *rule;
+ struct distribution *d;
+
+ for (rule = rules ; rule != NULL ; rule = rule->next) {
+ if (rule->used && rule->distribution == NULL) {
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (strcmp(d->codename, rule->from) == 0) {
+ rule->distribution = d;
+ break;
+ }
+ }
+ if (d == NULL) {
+ fprintf(stderr,
+"Error: Unknown distribution '%s' referenced in pull rule '%s'\n",
+ rule->from, rule->name);
+ return RET_ERROR_MISSING;
+ }
+ }
+ }
+ return RET_OK;
+}
+
+/***************************************************************************
+ * step four: *
+ * create pull_targets and pull_sources *
+ **************************************************************************/
+
+struct pull_source {
+ struct pull_source *next;
+ /* NULL, if this is a delete rule */
+ struct target *source;
+ struct pull_rule *rule;
+};
+struct pull_target {
+ /*@null@*/struct pull_target *next;
+ /*@null@*/struct pull_source *sources;
+ /*@dependent@*/struct target *target;
+ /*@null@*/struct upgradelist *upgradelist;
+};
+
+static void pull_freetargets(struct pull_target *targets) {
+ while (targets != NULL) {
+ struct pull_target *target = targets;
+ targets = target->next;
+ while (target->sources != NULL) {
+ struct pull_source *source = target->sources;
+ target->sources = source->next;
+ free(source);
+ }
+ free(target);
+ }
+}
+
+static retvalue pull_createsource(struct pull_rule *rule,
+ struct target *target,
+ struct pull_source ***s) {
+ const struct atomlist *c;
+ const struct atomlist *a_from, *a_into;
+ int ai;
+
+ assert (rule != NULL);
+ assert (rule->distribution != NULL);
+
+ if (rule->architectures_set) {
+ a_from = &rule->architectures_from;
+ a_into = &rule->architectures_into;
+ } else {
+ a_from = &rule->distribution->architectures;
+ a_into = &rule->distribution->architectures;
+ }
+ if (target->packagetype == pt_udeb) {
+ if (rule->udebcomponents_set)
+ c = &rule->udebcomponents;
+ else
+ c = &rule->distribution->udebcomponents;
+ } else {
+ if (rule->components_set)
+ c = &rule->components;
+ else
+ c = &rule->distribution->components;
+ }
+
+ if (!atomlist_in(c, target->component))
+ return RET_NOTHING;
+
+ for (ai = 0 ; ai < a_into->count ; ai++) {
+ struct pull_source *source;
+
+ if (a_into->atoms[ai] != target->architecture)
+ continue;
+
+ source = NEW(struct pull_source);
+ if (FAILEDTOALLOC(source))
+ return RET_ERROR_OOM;
+
+ source->next = NULL;
+ source->rule = rule;
+ source->source = distribution_getpart(rule->distribution,
+ target->component,
+ a_from->atoms[ai],
+ target->packagetype);
+ **s = source;
+ *s = &source->next;
+ }
+ return RET_OK;
+}
+
+static retvalue pull_createdelete(struct pull_source ***s) {
+ struct pull_source *source;
+
+ source = NEW(struct pull_source);
+ if (FAILEDTOALLOC(source))
+ return RET_ERROR_OOM;
+
+ source->next = NULL;
+ source->rule = NULL;
+ source->source = NULL;
+ **s = source;
+ *s = &source->next;
+ return RET_OK;
+}
+
+static retvalue generatepulltarget(struct pull_distribution *pd, struct target *target) {
+ struct pull_source **s;
+ struct pull_target *pt;
+ retvalue r;
+ int i;
+
+ pt = NEW(struct pull_target);
+ if (FAILEDTOALLOC(pt))
+ return RET_ERROR_OOM;
+ pt->target = target;
+ pt->next = pd->targets;
+ pt->upgradelist = NULL;
+ pt->sources = NULL;
+ s = &pt->sources;
+ pd->targets = pt;
+
+ for (i = 0 ; i < pd->distribution->pulls.count ; i++) {
+ struct pull_rule *rule = pd->rules[i];
+
+ if (rule == NULL)
+ r = pull_createdelete(&s);
+ else
+ r = pull_createsource(rule, target, &s);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ return RET_OK;
+}
+
+static retvalue pull_generatetargets(struct pull_distribution *pull_distributions, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) {
+ struct pull_distribution *pd;
+ struct target *target;
+ retvalue r;
+
+ for (pd = pull_distributions ; pd != NULL ; pd = pd->next) {
+ for (target = pd->distribution->targets ; target != NULL ;
+ target = target->next) {
+
+ if (!target_matches(target, components, architectures, packagetypes))
+ continue;
+
+ r = generatepulltarget(pd, target);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ return RET_OK;
+}
+
+/***************************************************************************
+ * Some checking to be able to warn against typos *
+ **************************************************************************/
+
+static bool *preparefoundlist(const struct atomlist *list) {
+ bool *found;
+ int i, j;
+
+ found = nzNEW(list->count, bool);
+ if (FAILEDTOALLOC(found))
+ return found;
+ for (i = 0 ; i < list->count ; i++) {
+ if (found[i])
+ continue;
+ for (j = i + 1 ; j < list->count ; j++)
+ if (list->atoms[i] == list->atoms[j])
+ found[j] = true;
+ }
+ return found;
+}
+
+
+static inline void markasused(const struct strlist *pulls, const char *rulename, const struct atomlist *needed, const struct atomlist *have, bool *found) {
+ int i, j, o;
+
+ for (i = 0 ; i < pulls->count ; i++) {
+ if (strcmp(pulls->values[i], rulename) != 0)
+ continue;
+ for (j = 0 ; j < have->count ; j++) {
+ o = atomlist_ofs(needed, have->atoms[j]);
+ if (o >= 0)
+ found[o] = true;
+ }
+ }
+}
+
+static void checkifarchitectureisused(const struct atomlist *architectures, const struct distribution *alldistributions, const struct pull_rule *rule, const char *action) {
+ bool *found;
+ const struct distribution *d;
+ int i;
+
+ assert (rule != NULL);
+ if (architectures->count == 0)
+ return;
+ found = preparefoundlist(architectures);
+ if (found == NULL)
+ return;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ markasused(&d->pulls, rule->name,
+ architectures, &d->architectures,
+ found);
+ }
+ for (i = 0 ; i < architectures->count ; i++) {
+ if (found[i])
+ continue;
+ fprintf(stderr,
+"Warning: pull rule '%s' wants to %s architecture '%s',\n"
+"but no distribution using this has such an architecture.\n"
+"(This will simply be ignored and is not even checked when using --fast).\n",
+ rule->name, action,
+ atoms_architectures[architectures->atoms[i]]);
+ }
+ free(found);
+ return;
+}
+
+static void checkifcomponentisused(const struct atomlist *components, const struct distribution *alldistributions, const struct pull_rule *rule, const char *action) {
+ bool *found;
+ const struct distribution *d;
+ int i;
+
+ assert (rule != NULL);
+ if (components->count == 0)
+ return;
+ found = preparefoundlist(components);
+ if (found == NULL)
+ return;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ markasused(&d->pulls, rule->name,
+ components, &d->components,
+ found);
+ }
+ for (i = 0 ; i < components->count ; i++) {
+ if (found[i])
+ continue;
+ fprintf(stderr,
+"Warning: pull rule '%s' wants to %s component '%s',\n"
+"but no distribution using this has such an component.\n"
+"(This will simply be ignored and is not even checked when using --fast).\n",
+ rule->name, action,
+ atoms_components[components->atoms[i]]);
+ }
+ free(found);
+ return;
+}
+
+static void checkifudebcomponentisused(const struct atomlist *udebcomponents, const struct distribution *alldistributions, const struct pull_rule *rule, const char *action) {
+ bool *found;
+ const struct distribution *d;
+ int i;
+
+ assert (rule != NULL);
+ if (udebcomponents->count == 0)
+ return;
+ found = preparefoundlist(udebcomponents);
+ if (found == NULL)
+ return;
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ markasused(&d->pulls, rule->name,
+ udebcomponents, &d->udebcomponents,
+ found);
+ }
+ for (i = 0 ; i < udebcomponents->count ; i++) {
+ if (found[i])
+ continue;
+ fprintf(stderr,
+"Warning: pull rule '%s' wants to %s udeb component '%s',\n"
+"but no distribution using this has such an udeb component.\n"
+"(This will simply be ignored and is not even checked when using --fast).\n",
+ rule->name, action,
+ atoms_components[udebcomponents->atoms[i]]);
+ }
+ free(found);
+ return;
+}
+
+static void checksubset(const struct atomlist *needed, const struct atomlist *have, const char *rulename, const char *from, const char *what, const char **atoms) {
+ int i, j;
+
+ for (i = 0 ; i < needed->count ; i++) {
+ atom_t value = needed->atoms[i];
+
+ for (j = 0 ; j < i ; j++) {
+ if (value == needed->atoms[j])
+ break;
+ }
+ if (j < i)
+ continue;
+
+ if (!atomlist_in(have, value)) {
+ fprintf(stderr,
+"Warning: pull rule '%s' wants to get something from %s '%s',\n"
+"but there is no such %s in distribution '%s'.\n"
+"(This will simply be ignored and is not even checked when using --fast).\n",
+ rulename, what,
+ atoms[value], what, from);
+ }
+ }
+}
+
+static void searchunused(const struct distribution *alldistributions, const struct pull_rule *rule) {
+ if (rule->distribution != NULL) {
+ // TODO: move this part of the checks into parsing?
+ checksubset(&rule->architectures_from,
+ &rule->distribution->architectures,
+ rule->name, rule->from, "architecture",
+ atoms_architectures);
+ checksubset(&rule->components,
+ &rule->distribution->components,
+ rule->name, rule->from, "component",
+ atoms_components);
+ checksubset(&rule->udebcomponents,
+ &rule->distribution->udebcomponents,
+ rule->name, rule->from, "udeb component",
+ atoms_components);
+ }
+
+ if (rule->distribution == NULL) {
+ assert (strcmp(rule->from, "*") == 0);
+ checkifarchitectureisused(&rule->architectures_from,
+ alldistributions, rule, "get something from");
+ /* no need to check component and udebcomponent, as those
+ * are the same with the others */
+ }
+ checkifarchitectureisused(&rule->architectures_into,
+ alldistributions, rule, "put something into");
+ checkifcomponentisused(&rule->components,
+ alldistributions, rule, "put something into");
+ checkifudebcomponentisused(&rule->udebcomponents,
+ alldistributions, rule, "put something into");
+}
+
+static void pull_searchunused(const struct distribution *alldistributions, struct pull_rule *pull_rules) {
+ struct pull_rule *rule;
+
+ for (rule = pull_rules ; rule != NULL ; rule = rule->next) {
+ if (!rule->used)
+ continue;
+
+ searchunused(alldistributions, rule);
+ }
+}
+
+/***************************************************************************
+ * combination of the steps two, three and four *
+ **************************************************************************/
+
+retvalue pull_prepare(struct distribution *alldistributions, struct pull_rule *rules, bool fast, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *types, struct pull_distribution **pd) {
+ struct pull_distribution *pulls;
+ retvalue r;
+
+ r = pull_init(&pulls, rules, alldistributions);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = pull_loadsourcedistributions(alldistributions, rules);
+ if (RET_WAS_ERROR(r)) {
+ pull_freedistributions(pulls);
+ return r;
+ }
+ if (!fast)
+ pull_searchunused(alldistributions, rules);
+
+ r = pull_generatetargets(pulls, components, architectures, types);
+ if (RET_WAS_ERROR(r)) {
+ pull_freedistributions(pulls);
+ return r;
+ }
+ *pd = pulls;
+ return RET_OK;
+}
+
+/***************************************************************************
+ * step five: *
+ * decide what gets pulled *
+ **************************************************************************/
+
+static upgrade_decision ud_decide_by_rule(void *privdata, struct target *target, struct package *new, /*@null@*/const char *old_version) {
+ struct pull_rule *rule = privdata;
+ upgrade_decision decision = UD_UPGRADE;
+ retvalue r;
+ struct filterlist *fl;
+ const char *n, *v;
+ bool cmdline_still_undecided;
+
+ if (target->packagetype == pt_dsc) {
+ assert (strcmp(new->name, new->source) == 0);
+ assert (strcmp(new->version, new->sourceversion) == 0);
+ if (rule->filtersrclist.set)
+ fl = &rule->filtersrclist;
+ else
+ fl = &rule->filterlist;
+ n = new->name;
+ v = new->version;
+ } else {
+ if (rule->filterlist.set) {
+ fl = &rule->filterlist;
+ n = new->name;
+ v = new->version;
+ } else {
+ fl = &rule->filtersrclist;
+ n = new->source;
+ v = new->sourceversion;
+ }
+ }
+
+ switch (filterlist_find(n, v, fl)) {
+ case flt_deinstall:
+ case flt_purge:
+ return UD_NO;
+ case flt_warning:
+ return UD_LOUDNO;
+ case flt_supersede:
+ decision = UD_SUPERSEDE;
+ break;
+ case flt_hold:
+ decision = UD_HOLD;
+ break;
+ case flt_error:
+ /* cannot yet be handled! */
+ fprintf(stderr,
+"Package name marked to be unexpected('error'): '%s'!\n", new->name);
+ return UD_ERROR;
+ case flt_upgradeonly:
+ if (old_version == NULL)
+ return UD_NO;
+ break;
+ case flt_install:
+ break;
+ case flt_unchanged:
+ case flt_auto_hold:
+ assert (false);
+ break;
+ }
+
+ cmdline_still_undecided = false;
+ switch (filterlist_find(new->source, new->sourceversion,
+ &cmdline_src_filter)) {
+ case flt_deinstall:
+ case flt_purge:
+ return UD_NO;
+ case flt_warning:
+ return UD_LOUDNO;
+ case flt_auto_hold:
+ cmdline_still_undecided = true;
+ decision = UD_HOLD;
+ break;
+ case flt_hold:
+ decision = UD_HOLD;
+ break;
+ case flt_supersede:
+ decision = UD_SUPERSEDE;
+ break;
+ case flt_error:
+ /* cannot yet be handled! */
+ fprintf(stderr,
+"Package name marked to be unexpected('error'): '%s'!\n", new->name);
+ return UD_ERROR;
+ case flt_upgradeonly:
+ if (old_version == NULL)
+ return UD_NO;
+ break;
+ case flt_install:
+ decision = UD_UPGRADE;
+ break;
+ case flt_unchanged:
+ cmdline_still_undecided = true;
+ break;
+ }
+
+
+ if (target->packagetype != pt_dsc) {
+ switch (filterlist_find(new->name, new->version,
+ &cmdline_bin_filter)) {
+ case flt_deinstall:
+ case flt_purge:
+ return UD_NO;
+ case flt_warning:
+ return UD_LOUDNO;
+ case flt_hold:
+ decision = UD_HOLD;
+ break;
+ case flt_supersede:
+ decision = UD_SUPERSEDE;
+ break;
+ case flt_error:
+ /* cannot yet be handled! */
+ fprintf(stderr,
+"Package name marked to be unexpected('error'): '%s'!\n", new->name);
+ return UD_ERROR;
+ case flt_upgradeonly:
+ if (old_version == NULL)
+ return UD_NO;
+ break;
+ case flt_install:
+ decision = UD_UPGRADE;
+ break;
+ case flt_unchanged:
+ break;
+ case flt_auto_hold:
+ /* hold only if it was not in the src-filter */
+ if (cmdline_still_undecided)
+ decision = UD_HOLD;
+ break;
+ }
+ } else if (cmdline_bin_filter.defaulttype == flt_auto_hold) {
+ if (cmdline_still_undecided)
+ decision = UD_HOLD;
+ }
+
+ /* formula tested last as it is the most expensive */
+ if (rule->includecondition != NULL) {
+ r = term_decidepackage(rule->includecondition, new, target);
+ if (RET_WAS_ERROR(r))
+ return UD_ERROR;
+ if (r == RET_NOTHING) {
+ return UD_NO;
+ }
+ }
+
+ return decision;
+}
+
+static inline retvalue pull_searchformissing(/*@null@*/FILE *out, struct pull_target *p) {
+ struct pull_source *source;
+ retvalue result, r;
+
+ if (verbose > 2 && out != NULL)
+ fprintf(out, " pulling into '%s'\n", p->target->identifier);
+ assert(p->upgradelist == NULL);
+ r = upgradelist_initialize(&p->upgradelist, p->target);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ for (source=p->sources ; source != NULL ; source=source->next) {
+
+ if (source->rule == NULL) {
+ if (verbose > 4 && out != NULL)
+ fprintf(out,
+" marking everything to be deleted\n");
+ r = upgradelist_deleteall(p->upgradelist);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return result;
+ continue;
+ }
+
+ if (verbose > 4 && out != NULL)
+ fprintf(out, " looking what to get from '%s'\n",
+ source->source->identifier);
+ r = upgradelist_pull(p->upgradelist, source->source,
+ ud_decide_by_rule, source->rule, source);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return result;
+ }
+
+ return result;
+}
+
+static retvalue pull_search(/*@null@*/FILE *out, struct pull_distribution *d) {
+ retvalue result, r;
+ struct pull_target *u;
+
+ result = RET_NOTHING;
+ for (u=d->targets ; u != NULL ; u=u->next) {
+ r = pull_searchformissing(out, u);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ return result;
+}
+
+static bool pull_isbigdelete(struct pull_distribution *d) {
+ struct pull_target *u, *v;
+
+ for (u = d->targets ; u != NULL ; u=u->next) {
+ if (upgradelist_isbigdelete(u->upgradelist)) {
+ d->distribution->omitted = true;
+ for (v = d->targets ; v != NULL ; v = v->next) {
+ upgradelist_free(v->upgradelist);
+ v->upgradelist = NULL;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+
+static void pull_from_callback(void *privdata, const char **rule_p, const char **from_p) {
+ struct pull_source *source = privdata;
+
+ *rule_p = source->rule->name;
+ *from_p = source->rule->from;
+}
+
+static retvalue pull_install(struct pull_distribution *distribution) {
+ retvalue result, r;
+ struct pull_target *u;
+ struct distribution *d = distribution->distribution;
+
+ assert (logger_isprepared(d->logger));
+
+ result = RET_NOTHING;
+ for (u=distribution->targets ; u != NULL ; u=u->next) {
+ r = upgradelist_install(u->upgradelist, d->logger,
+ false, pull_from_callback);
+ RET_UPDATE(d->status, r);
+ RET_UPDATE(result, r);
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ if (RET_IS_OK(result) && d->tracking != dt_NONE) {
+ r = tracking_retrack(d, false);
+ RET_ENDUPDATE(result, r);
+ }
+ return result;
+}
+
+static void pull_dumppackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) {
+ struct pull_source *source = privdata;
+
+ if (newversion == NULL) {
+ if (oldversion != NULL && bestcandidate != NULL) {
+ printf("'%s': '%s' will be deleted"
+ " (best new: '%s')\n",
+ packagename, oldversion, bestcandidate);
+ } else if (oldversion != NULL) {
+ printf("'%s': '%s' will be deleted"
+ " (no longer available or superseded)\n",
+ packagename, oldversion);
+ } else {
+ printf("'%s': will NOT be added as '%s'\n",
+ packagename, bestcandidate);
+ }
+ } else if (newversion == oldversion) {
+ if (bestcandidate != NULL) {
+ if (verbose > 1)
+ printf("'%s': '%s' will be kept"
+ " (best new: '%s')\n",
+ packagename, oldversion,
+ bestcandidate);
+ } else {
+ if (verbose > 0)
+ printf("'%s': '%s' will be kept"
+ " (unavailable for reload)\n",
+ packagename, oldversion);
+ }
+ } else {
+ const char *via = source->rule->name;
+
+ assert (newfilekeys != NULL);
+ assert (newcontrol != NULL);
+ if (oldversion != NULL)
+ (void)printf("'%s': '%s' will be upgraded"
+ " to '%s' (from '%s'):\n files needed: ",
+ packagename, oldversion,
+ newversion, via);
+ else
+ (void)printf("'%s': newly installed"
+ " as '%s' (from '%s'):\n files needed: ",
+ packagename, newversion, via);
+ (void)strlist_fprint(stdout, newfilekeys);
+ if (verbose > 2)
+ (void)printf("\n installing as: '%s'\n",
+ newcontrol);
+ else
+ (void)putchar('\n');
+ }
+}
+
+static void pull_dump(struct pull_distribution *distribution) {
+ struct pull_target *u;
+
+ for (u=distribution->targets ; u != NULL ; u=u->next) {
+ if (u->upgradelist == NULL)
+ continue;
+ printf("Updates needed for '%s':\n", u->target->identifier);
+ upgradelist_dump(u->upgradelist, pull_dumppackage);
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ }
+}
+
+static void pull_dumplistpackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) {
+ struct pull_source *source = privdata;
+
+ if (newversion == NULL) {
+ if (oldversion == NULL)
+ return;
+ printf("delete '%s' '%s'\n", packagename, oldversion);
+ } else if (newversion == oldversion) {
+ if (bestcandidate != NULL)
+ printf("keep '%s' '%s' '%s'\n", packagename,
+ oldversion, bestcandidate);
+ else
+ printf("keep '%s' '%s' unavailable\n", packagename,
+ oldversion);
+ } else {
+ const char *via = source->rule->name;
+
+ assert (newfilekeys != NULL);
+ assert (newcontrol != NULL);
+ if (oldversion != NULL)
+ (void)printf("update '%s' '%s' '%s' '%s'\n",
+ packagename, oldversion,
+ newversion, via);
+ else
+ (void)printf("add '%s' - '%s' '%s'\n",
+ packagename, newversion, via);
+ }
+}
+
+static void pull_dumplist(struct pull_distribution *distribution) {
+ struct pull_target *u;
+
+ for (u=distribution->targets ; u != NULL ; u=u->next) {
+ if (u->upgradelist == NULL)
+ continue;
+ printf("Updates needed for '%s':\n", u->target->identifier);
+ upgradelist_dump(u->upgradelist, pull_dumplistpackage);
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ }
+}
+
+retvalue pull_update(struct pull_distribution *distributions) {
+ retvalue result, r;
+ struct pull_distribution *d;
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = distribution_prepareforwriting(d->distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = distribution_loadalloverrides(d->distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ if (verbose >= 0)
+ printf("Calculating packages to pull...\n");
+
+ result = RET_NOTHING;
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = pull_search(stdout, d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ // TODO: make already here sure the files are ready?
+ }
+ if (RET_WAS_ERROR(result)) {
+ for (d=distributions ; d != NULL ; d=d->next) {
+ struct pull_target *u;
+ for (u=d->targets ; u != NULL ; u=u->next) {
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ }
+ }
+ return result;
+ }
+ if (verbose >= 0)
+ printf("Installing (and possibly deleting) packages...\n");
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ if (global.onlysmalldeletes) {
+ if (pull_isbigdelete(d)) {
+ fprintf(stderr,
+"Not processing '%s' because of --onlysmalldeletes\n",
+ d->distribution->codename);
+ continue;
+ }
+ }
+ r = pull_install(d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ logger_wait();
+
+ return result;
+}
+
+retvalue pull_checkupdate(struct pull_distribution *distributions) {
+ struct pull_distribution *d;
+ retvalue result, r;
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = distribution_loadalloverrides(d->distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ if (verbose >= 0)
+ fprintf(stderr, "Calculating packages to get...\n");
+
+ result = RET_NOTHING;
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = pull_search(stderr, d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ pull_dump(d);
+ }
+
+ return result;
+}
+
+retvalue pull_dumpupdate(struct pull_distribution *distributions) {
+ struct pull_distribution *d;
+ retvalue result, r;
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = distribution_loadalloverrides(d->distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ result = RET_NOTHING;
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = pull_search(NULL, d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ pull_dumplist(d);
+ }
+
+ return result;
+}
diff --git a/pull.h b/pull.h
new file mode 100644
index 0000000..e19b41d
--- /dev/null
+++ b/pull.h
@@ -0,0 +1,31 @@
+#ifndef REPREPRO_PULLS_H
+#define REPREPRO_PULLS_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_RELEASE_H
+#include "release.h"
+#endif
+#ifndef REPREPRO_DISTRIBUTION_H
+#include "distribution.h"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+
+struct pull_rule;
+struct pull_distribution;
+
+retvalue pull_getrules(/*@out@*/struct pull_rule **);
+
+void pull_freerules(/*@only@*/struct pull_rule *p);
+void pull_freedistributions(/*@only@*/struct pull_distribution *p);
+
+retvalue pull_prepare(struct distribution *, struct pull_rule *, bool fast, /*@null@*/const struct atomlist */*components*/,/*@null@*/const struct atomlist */*architectures*/,/*@null@*/const struct atomlist */*packagetypes*/, /*@out@*/struct pull_distribution **);
+retvalue pull_update(struct pull_distribution *);
+retvalue pull_checkupdate(struct pull_distribution *);
+retvalue pull_dumpupdate(struct pull_distribution *);
+
+#endif
diff --git a/readtextfile.c b/readtextfile.c
new file mode 100644
index 0000000..ca74689
--- /dev/null
+++ b/readtextfile.c
@@ -0,0 +1,140 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <fcntl.h>
+#include "error.h"
+#include "names.h"
+#include "chunks.h"
+#include "readtextfile.h"
+
+/* This file supplies code to read a text file (.changes, .dsc, Release, ...)
+ * into a chunk, warning if it is too long or if it contains binary data */
+
+static bool isbinarydata(const char *buffer, size_t len, const char *source) {
+ size_t i;
+ unsigned char c;
+
+ for (i = 0 ; i < len ; i++) {
+ c = (unsigned char)buffer[i];
+ if (c < ' ' && c != '\t' && c != '\n' && c != '\r') {
+ fprintf(stderr,
+"Unexpected binary character \\%03hho in %s\n",
+ c, source);
+ return true;
+ }
+ }
+ return false;
+}
+
+retvalue readtextfilefd(int fd, const char *source, char **data, size_t *len) {
+ size_t buffersize = 102400, readdata = 0;
+ ssize_t readbytes;
+ char *buffer, *h;
+
+ buffer = malloc(buffersize);
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+ errno = 0;
+ while ((readbytes = read(fd, buffer + readdata, buffersize-readdata))
+ > 0) {
+
+ /* text files are normally small, so it does not hurt to check
+ * the whole of them always */
+ if (isbinarydata(buffer + readdata, (size_t)readbytes, source)) {
+ free(buffer);
+ return RET_ERROR;
+ }
+ readdata += readbytes;
+ assert (readdata <= buffersize);
+ if (readdata + 1024 >= buffersize) {
+ if (buffersize >= 10*1024*1024) {
+ fprintf(stderr, "Ridiculously large %s\n", source);
+ free(buffer);
+ return RET_ERROR;
+ }
+ buffersize += 51200;
+ h = realloc(buffer, buffersize);
+ if (FAILEDTOALLOC(h)) {
+ free(buffer);
+ return RET_ERROR_OOM;
+ }
+ buffer = h;
+ }
+ }
+ if (readbytes < 0) {
+ int e = errno;
+ free(buffer);
+ fprintf(stderr, "Error reading %s: %s\n", source,
+ strerror(e));
+ return RET_ERRNO(e);
+ }
+ h = realloc(buffer, readdata + 1);
+ if (h == NULL) {
+#ifdef SPLINT
+ h = NULL;
+#endif
+ if (readdata >= buffersize) {
+ free(buffer);
+ return RET_ERROR_OOM;
+ }
+ } else
+ buffer = h;
+ buffer[readdata] = '\0';
+ *data = buffer;
+ if (len != NULL)
+ *len = readdata;
+ return RET_OK;
+}
+
+retvalue readtextfile(const char *source, const char *sourcetoshow, char **data, size_t *len) {
+ int fd; char *buffer; size_t bufferlen;
+ retvalue r;
+ int ret;
+
+ fd = open(source, O_RDONLY|O_NOCTTY);
+ if (fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error opening '%s': %s\n",
+ sourcetoshow, strerror(e));
+ return RET_ERRNO(e);
+ }
+ r = readtextfilefd(fd, sourcetoshow, &buffer, &bufferlen);
+ if (!RET_IS_OK(r)) {
+ (void)close(fd);
+ return r;
+ }
+ ret = close(fd);
+ if (ret != 0) {
+ int e = errno;
+ free(buffer);
+ fprintf(stderr, "Error reading %s: %s\n", sourcetoshow,
+ strerror(e));
+ return RET_ERRNO(e);
+ }
+ *data = buffer;
+ if (len != NULL)
+ *len = bufferlen;
+ return RET_OK;
+}
diff --git a/readtextfile.h b/readtextfile.h
new file mode 100644
index 0000000..bc91e80
--- /dev/null
+++ b/readtextfile.h
@@ -0,0 +1,16 @@
+#ifndef REPREPRO_READTEXTFILE
+#define REPREPRO_READTEXTFILE
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_GLOBALS_H
+#include "globals.h"
+#warning "What's hapening here?"
+#endif
+
+retvalue readtextfilefd(int, const char *, /*@out@*/char **, /*@null@*//*@out@*/size_t *);
+retvalue readtextfile(const char *, const char *, /*@out@*/char **, /*@null@*//*@out@*/size_t *);
+
+#endif
diff --git a/reference.c b/reference.c
new file mode 100644
index 0000000..dc28b9e
--- /dev/null
+++ b/reference.c
@@ -0,0 +1,231 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "error.h"
+#include "strlist.h"
+#include "names.h"
+#include "dirs.h"
+#include "database_p.h"
+#include "pool.h"
+#include "reference.h"
+
+retvalue references_isused( const char *what) {
+ return table_gettemprecord(rdb_references, what, NULL, NULL);
+}
+
+retvalue references_check(const char *referee, const struct strlist *filekeys) {
+ int i;
+ retvalue result, r;
+
+ result = RET_NOTHING;
+ for (i = 0 ; i < filekeys->count ; i++) {
+ r = table_checkrecord(rdb_references,
+ filekeys->values[i], referee);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing reference to '%s' by '%s'\n",
+ filekeys->values[i], referee);
+ r = RET_ERROR;
+ }
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+/* add an reference to a file for an identifier. multiple calls */
+retvalue references_increment(const char *needed, const char *neededby) {
+ retvalue r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: references_insert(needed=%s, neededby=%s) called.\n",
+ needed, neededby);
+
+ r = table_addrecord(rdb_references, needed,
+ neededby, strlen(neededby), false);
+ if (RET_IS_OK(r) && verbose > 8)
+ printf("Adding reference to '%s' by '%s'\n", needed, neededby);
+ return r;
+}
+
+/* remove reference for a file from a given reference */
+retvalue references_decrement(const char *needed, const char *neededby) {
+ retvalue r;
+
+ r = table_removerecord(rdb_references, needed, neededby);
+ if (r == RET_NOTHING)
+ return r;
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error while trying to removing reference to '%s' by '%s'\n",
+ needed, neededby);
+ return r;
+ }
+ if (verbose > 8)
+ fprintf(stderr, "Removed reference to '%s' by '%s'\n",
+ needed, neededby);
+ if (RET_IS_OK(r)) {
+ retvalue r2;
+ r2 = pool_dereferenced(needed);
+ RET_UPDATE(r, r2);
+ }
+ return r;
+}
+
+/* Add an reference by <identifier> for the given <files>,
+ * excluding <exclude>, if it is nonNULL. */
+retvalue references_insert(const char *identifier,
+ const struct strlist *files, const struct strlist *exclude) {
+ retvalue result, r;
+ int i;
+
+ if (verbose >= 15) {
+ fprintf(stderr, "trace: references_insert(identifier=%s, files=[", identifier);
+ for (i = 0 ; i < files->count ; i++) {
+ fprintf(stderr, "%s%s", i == 0 ? "" : ", ", files->values[i]);
+ }
+ fprintf(stderr, "], exclude=%s", exclude == NULL ? "NULL" : "[");
+ if (exclude != NULL) {
+ for (i = 0 ; i < exclude->count ; i++) {
+ fprintf(stderr, "%s%s", i == 0 ? "" : ", ", exclude->values[i]);
+ }
+ }
+ fprintf(stderr, "%s) called.\n", exclude == NULL ? "" : "]");
+ }
+
+ result = RET_NOTHING;
+
+ for (i = 0 ; i < files->count ; i++) {
+ const char *filename = files->values[i];
+
+ if (exclude == NULL || !strlist_in(exclude, filename)) {
+ r = references_increment(filename, identifier);
+ RET_UPDATE(result, r);
+ }
+ }
+ return result;
+}
+
+/* add possible already existing references */
+retvalue references_add(const char *identifier, const struct strlist *files) {
+ int i;
+ retvalue r;
+
+ for (i = 0 ; i < files->count ; i++) {
+ const char *filekey = files->values[i];
+ r = table_addrecord(rdb_references, filekey,
+ identifier, strlen(identifier), true);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+/* Remove reference by <identifier> for the given <oldfiles>,
+ * excluding <exclude>, if it is nonNULL. */
+retvalue references_delete(const char *identifier, const struct strlist *files, const struct strlist *exclude) {
+ retvalue result, r;
+ int i;
+
+ assert (files != NULL);
+
+ result = RET_NOTHING;
+
+ for (i = 0 ; i < files->count ; i++) {
+ const char *filekey = files->values[i];
+
+ if (exclude == NULL || !strlist_in(exclude, filekey)) {
+ r = references_decrement(filekey, identifier);
+ RET_UPDATE(result, r);
+ }
+ }
+ return result;
+
+}
+
+/* remove all references from a given identifier */
+retvalue references_remove(const char *neededby) {
+ struct cursor *cursor;
+ retvalue result, r;
+ const char *found_to, *found_by;
+ size_t datalen, l;
+
+ r = table_newglobalcursor(rdb_references, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+
+ l = strlen(neededby);
+
+ result = RET_NOTHING;
+ while (cursor_nexttempdata(rdb_references, cursor,
+ &found_to, &found_by, &datalen)) {
+
+ if (datalen >= l && strncmp(found_by, neededby, l) == 0 &&
+ (found_by[l] == '\0' || found_by[l] == ' ')) {
+ if (verbose > 8)
+ fprintf(stderr,
+"Removing reference to '%s' by '%s'\n",
+ found_to, neededby);
+ r = cursor_delete(rdb_references, cursor,
+ found_to, NULL);
+ RET_UPDATE(result, r);
+ if (RET_IS_OK(r)) {
+ r = pool_dereferenced(found_to);
+ RET_ENDUPDATE(result, r);
+ }
+ }
+ }
+ r = cursor_close(rdb_references, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+/* dump all references to stdout */
+retvalue references_dump(void) {
+ struct cursor *cursor;
+ retvalue result, r;
+ const char *found_to, *found_by;
+
+ r = table_newglobalcursor(rdb_references, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+
+ result = RET_OK;
+ while (cursor_nexttempdata(rdb_references, cursor,
+ &found_to, &found_by, NULL)) {
+ if (fputs(found_by, stdout) == EOF ||
+ putchar(' ') == EOF ||
+ puts(found_to) == EOF) {
+ result = RET_ERROR;
+ break;
+ }
+ result = RET_OK;
+ if (interrupted()) {
+ result = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ }
+ r = cursor_close(rdb_references, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
diff --git a/reference.h b/reference.h
new file mode 100644
index 0000000..e9bbe7e
--- /dev/null
+++ b/reference.h
@@ -0,0 +1,50 @@
+#ifndef REPREPRO_REFERENCE_H
+#define REPREPRO_REFERENCE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's happening?"
+#endif
+
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#warning "What's happening?"
+#endif
+
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+
+struct references;
+
+/* remove all references from a given identifier */
+retvalue references_remove(const char *neededby);
+
+/* Add an reference by <identifier> for the given <files>,
+ * excluding <exclude>, if it is nonNULL. */
+retvalue references_insert(const char *, const struct strlist *, const struct strlist * /*exclude*/);
+
+/* Add an reference by <identifier> for the given <files>,
+ * do not error out if reference already exists */
+retvalue references_add(const char *, const struct strlist *);
+
+/* Remove reference by <identifier> for the given <oldfiles>,
+ * excluding <exclude>, if it is nonNULL. */
+retvalue references_delete(const char *, const struct strlist *, /*@null@*/const struct strlist * /*exclude*/);
+
+/* add an reference to a file for an identifier. */
+retvalue references_increment(const char * /*needed*/, const char * /*needey*/);
+
+/* delete reference to a file for an identifier */
+retvalue references_decrement(const char * /*needed*/, const char * /*needey*/);
+
+/* check if an item is needed, returns RET_NOTHING if not */
+retvalue references_isused(const char *);
+
+/* check if a reference is found as expected */
+retvalue references_check(const char * /*referee*/, const struct strlist */*what*/);
+
+/* output all references to stdout */
+retvalue references_dump(void);
+
+#endif
diff --git a/release.c b/release.c
new file mode 100644
index 0000000..473c5e6
--- /dev/null
+++ b/release.c
@@ -0,0 +1,1900 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2007,2009,2012,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <time.h>
+#include <zlib.h>
+#ifdef HAVE_LIBBZ2
+#include <bzlib.h>
+#endif
+#ifdef HAVE_LIBLZMA
+#include <lzma.h>
+#endif
+#define CHECKSUMS_CONTEXT visible
+#include "error.h"
+#include "ignore.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "filecntl.h"
+#include "chunks.h"
+#include "checksums.h"
+#include "dirs.h"
+#include "names.h"
+#include "signature.h"
+#include "distribution.h"
+#include "outhook.h"
+#include "release.h"
+
+#define INPUT_BUFFER_SIZE 1024
+#define GZBUFSIZE 40960
+#define BZBUFSIZE 40960
+// TODO: what is the correct value here:
+#define XZBUFSIZE 40960
+
+struct release {
+ /* The base-directory of the distribution we are exporting */
+ char *dirofdist;
+ /* anything new yet added */
+ bool new;
+ /* NULL if no snapshot */
+ /*@null@*/char *snapshotname;
+ /* specific overrides for fakeprefixes or snapshots: */
+ /*@null@*/char *fakesuite;
+ /*@null@*/char *fakecodename;
+ /*@null@*/const char *fakecomponentprefix;
+ size_t fakecomponentprefixlen;
+ /* the files yet for the list */
+ struct release_entry {
+ struct release_entry *next;
+ char *relativefilename;
+ struct checksums *checksums;
+ char *fullfinalfilename;
+ char *fulltemporaryfilename;
+ char *symlinktarget;
+ /* name chks NULL NULL NULL: add old filename or virtual file
+ * name chks file file NULL: rename new file and publish
+ * name NULL file file NULL: rename new file
+ * name NULL file NULL NULL: delete if done
+ * name NULL file NULL file: create symlink */
+ } *files;
+ /* the Release file in preperation
+ * (only valid between _prepare and _finish) */
+ struct signedfile *signedfile;
+ /* the cache database for old files */
+ struct table *cachedb;
+};
+
+static void release_freeentry(struct release_entry *e) {
+ free(e->relativefilename);
+ checksums_free(e->checksums);
+ free(e->fullfinalfilename);
+ if (!global.keeptemporaries && e->fulltemporaryfilename != NULL)
+ (void)unlink(e->fulltemporaryfilename);
+ free(e->fulltemporaryfilename);
+ free(e->symlinktarget);
+ free(e);
+}
+
+void release_free(struct release *release) {
+ struct release_entry *e;
+
+ free(release->snapshotname);
+ free(release->dirofdist);
+ free(release->fakesuite);
+ free(release->fakecodename);
+ while ((e = release->files) != NULL) {
+ release->files = e->next;
+ release_freeentry(e);
+ }
+ if (release->signedfile != NULL)
+ signedfile_free(release->signedfile);
+ if (release->cachedb != NULL) {
+ table_close(release->cachedb);
+ }
+ free(release);
+}
+
+const char *release_dirofdist(struct release *release) {
+ return release->dirofdist;
+}
+
+static retvalue newreleaseentry(struct release *release, /*@only@*/ char *relativefilename,
+ /*@only@*/ struct checksums *checksums,
+ /*@only@*/ /*@null@*/ char *fullfinalfilename,
+ /*@only@*/ /*@null@*/ char *fulltemporaryfilename,
+ /*@only@*/ /*@null@*/ char *symlinktarget) {
+ struct release_entry *n, *p;
+
+ /* everything has a relative name */
+ assert (relativefilename != NULL);
+ /* it's either something to do or to publish */
+ assert (fullfinalfilename != NULL || checksums != NULL);
+ /* if there is something temporary, it has a final place */
+ assert (fulltemporaryfilename == NULL || fullfinalfilename != NULL);
+ /* a symlink cannot be published (Yet?) */
+ assert (symlinktarget == NULL || checksums == NULL);
+ /* cannot place a file and a symlink */
+ assert (symlinktarget == NULL || fulltemporaryfilename == NULL);
+ /* something to publish cannot be a file deletion */
+ assert (checksums == NULL
+ || fullfinalfilename == NULL
+ || fulltemporaryfilename != NULL
+ || symlinktarget != NULL);
+ n = NEW(struct release_entry);
+ if (FAILEDTOALLOC(n)) {
+ checksums_free(checksums);
+ free(fullfinalfilename);
+ free(fulltemporaryfilename);
+ free(symlinktarget);
+ return RET_ERROR_OOM;
+ }
+ n->next = NULL;
+ n->relativefilename = relativefilename;
+ n->checksums = checksums;
+ n->fullfinalfilename = fullfinalfilename;
+ n->fulltemporaryfilename = fulltemporaryfilename;
+ n->symlinktarget = symlinktarget;
+ if (release->files == NULL)
+ release->files = n;
+ else {
+ p = release->files;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = n;
+ }
+ return RET_OK;
+}
+
+retvalue release_init(struct release **release, const char *codename, const char *suite, const char *fakecomponentprefix) {
+ struct release *n;
+ size_t len, suitelen, codenamelen;
+ retvalue r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: release_init(codename=%s, suite=%s, fakecomponentprefix=%s) called.\n",
+ codename, suite, fakecomponentprefix);
+
+ n = zNEW(struct release);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->dirofdist = calc_dirconcat(global.distdir, codename);
+ if (FAILEDTOALLOC(n->dirofdist)) {
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ if (fakecomponentprefix != NULL) {
+ len = strlen(fakecomponentprefix);
+ codenamelen = strlen(codename);
+
+ n->fakecomponentprefix = fakecomponentprefix;
+ n->fakecomponentprefixlen = len;
+ if (codenamelen > len &&
+ codename[codenamelen - len - 1] == '/' &&
+ memcmp(codename + (codenamelen - len),
+ fakecomponentprefix, len) == 0) {
+ n->fakecodename = strndup(codename,
+ codenamelen - len - 1);
+ if (FAILEDTOALLOC(n->fakecodename)) {
+ free(n->dirofdist);
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ }
+ if (suite != NULL && (suitelen = strlen(suite)) > len &&
+ suite[suitelen - len - 1] == '/' &&
+ memcmp(suite + (suitelen - len),
+ fakecomponentprefix, len) == 0) {
+ n->fakesuite = strndup(suite,
+ suitelen - len - 1);
+ if (FAILEDTOALLOC(n->fakesuite)) {
+ free(n->fakecodename);
+ free(n->dirofdist);
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ }
+ }
+ r = database_openreleasecache(codename, &n->cachedb);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ n->cachedb = NULL;
+ free(n->fakecodename);
+ free(n->fakesuite);
+ free(n->dirofdist);
+ free(n);
+ return r;
+ }
+ *release = n;
+ return RET_OK;
+}
+
+retvalue release_initsnapshot(const char *codename, const char *name, struct release **release) {
+ struct release *n;
+
+ n = zNEW(struct release);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->dirofdist = calc_snapshotbasedir(codename, name);
+ if (FAILEDTOALLOC(n->dirofdist)) {
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ /* apt only removes the last /... part but we create two,
+ * so stop it generating warnings by faking a suite */
+ n->fakesuite = mprintf("%s/snapshots/%s", codename, name);
+ if (FAILEDTOALLOC(n->fakesuite)) {
+ free(n->dirofdist);
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ n->fakecodename = NULL;
+ n->fakecomponentprefix = NULL;
+ n->fakecomponentprefixlen = 0;
+ n->cachedb = NULL;
+ n->snapshotname = strdup(name);
+ if (n->snapshotname == NULL) {
+ free(n->fakesuite);
+ free(n->dirofdist);
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ *release = n;
+ return RET_OK;
+}
+
+retvalue release_adddel(struct release *release, /*@only@*/char *reltmpfile) {
+ char *filename;
+
+ filename = calc_dirconcat(release->dirofdist, reltmpfile);
+ if (FAILEDTOALLOC(filename)) {
+ free(reltmpfile);
+ return RET_ERROR_OOM;
+ }
+ return newreleaseentry(release, reltmpfile, NULL, filename, NULL, NULL);
+}
+
+retvalue release_addnew(struct release *release, /*@only@*/char *reltmpfile, /*@only@*/char *relfilename) {
+ retvalue r;
+ char *filename, *finalfilename;
+ struct checksums *checksums;
+
+ filename = calc_dirconcat(release->dirofdist, reltmpfile);
+ if (FAILEDTOALLOC(filename)) {
+ free(reltmpfile);
+ free(relfilename);
+ return RET_ERROR_OOM;
+ }
+ free(reltmpfile);
+ r = checksums_read(filename, &checksums);
+ if (!RET_IS_OK(r)) {
+ free(relfilename);
+ free(filename);
+ return r;
+ }
+ finalfilename = calc_dirconcat(release->dirofdist, relfilename);
+ if (FAILEDTOALLOC(finalfilename)) {
+ free(relfilename);
+ free(filename);
+ checksums_free(checksums);
+ return RET_ERROR_OOM;
+ }
+ release->new = true;
+ return newreleaseentry(release, relfilename,
+ checksums, finalfilename, filename, NULL);
+}
+
+retvalue release_addsilentnew(struct release *release, /*@only@*/char *reltmpfile, /*@only@*/char *relfilename) {
+ char *filename, *finalfilename;
+
+ filename = calc_dirconcat(release->dirofdist, reltmpfile);
+ if (FAILEDTOALLOC(filename)) {
+ free(reltmpfile);
+ free(relfilename);
+ return RET_ERROR_OOM;
+ }
+ free(reltmpfile);
+ finalfilename = calc_dirconcat(release->dirofdist, relfilename);
+ if (FAILEDTOALLOC(finalfilename)) {
+ free(relfilename);
+ free(filename);
+ return RET_ERROR_OOM;
+ }
+ release->new = true;
+ return newreleaseentry(release, relfilename,
+ NULL, finalfilename, filename, NULL);
+}
+
+retvalue release_addold(struct release *release, /*@only@*/char *relfilename) {
+ retvalue r;
+ char *filename;
+ struct checksums *checksums;
+
+ filename = calc_dirconcat(release->dirofdist, relfilename);
+ if (FAILEDTOALLOC(filename)) {
+ free(filename);
+ return RET_ERROR_OOM;
+ }
+ r = checksums_read(filename, &checksums);
+ free(filename);
+ if (!RET_IS_OK(r)) {
+ free(relfilename);
+ return r;
+ }
+ return newreleaseentry(release, relfilename,
+ checksums, NULL, NULL, NULL);
+}
+
+static retvalue release_addsymlink(struct release *release, /*@only@*/char *relfilename, /*@only@*/ char *symlinktarget) {
+ char *fullfilename;
+
+ fullfilename = calc_dirconcat(release->dirofdist, relfilename);
+ if (FAILEDTOALLOC(fullfilename)) {
+ free(symlinktarget);
+ free(relfilename);
+ return RET_ERROR_OOM;
+ }
+ release->new = true;
+ return newreleaseentry(release, relfilename, NULL,
+ fullfilename, NULL, symlinktarget);
+}
+
+static char *calc_compressedname(const char *name, enum indexcompression ic) {
+ switch (ic) {
+ case ic_uncompressed:
+ return strdup(name);
+ case ic_gzip:
+ return calc_addsuffix(name, "gz");
+#ifdef HAVE_LIBBZ2
+ case ic_bzip2:
+ return calc_addsuffix(name, "bz2");
+#endif
+#ifdef HAVE_LIBLZMA
+ case ic_xz:
+ return calc_addsuffix(name, "xz");
+#endif
+ default:
+ assert ("Huh?" == NULL);
+ return NULL;
+ }
+}
+
+static retvalue release_usecached(struct release *release,
+ const char *relfilename,
+ compressionset compressions) {
+ retvalue result, r;
+ enum indexcompression ic;
+ char *filename[ic_count];
+ struct checksums *checksums[ic_count];
+
+ memset(filename, 0, sizeof(filename));
+ memset(checksums, 0, sizeof(checksums));
+ result = RET_OK;
+
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++) {
+ if (ic != ic_uncompressed &&
+ (compressions & IC_FLAG(ic)) == 0)
+ continue;
+ filename[ic] = calc_compressedname(relfilename, ic);
+ if (FAILEDTOALLOC(filename[ic])) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ }
+ if (RET_IS_OK(result)) {
+ /* first look if the there are actual files, in case
+ * the cache still lists them but they got lost */
+
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++) {
+ char *fullfilename;
+
+ if ((compressions & IC_FLAG(ic)) == 0)
+ continue;
+ assert (filename[ic] != NULL);
+ fullfilename = calc_dirconcat(release->dirofdist,
+ filename[ic]);
+ if (FAILEDTOALLOC(fullfilename)) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ if (!isregularfile(fullfilename)) {
+ free(fullfilename);
+ result = RET_NOTHING;
+ break;
+ }
+ free(fullfilename);
+ }
+ }
+ if (RET_IS_OK(result) && release->cachedb == NULL)
+ result = RET_NOTHING;
+ if (!RET_IS_OK(result)) {
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++)
+ free(filename[ic]);
+ return result;
+ }
+
+ /* now that the files are there look into the cache
+ * what checksums they have. */
+
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++) {
+ char *combinedchecksum;
+
+ if (filename[ic] == NULL)
+ continue;
+ r = table_getrecord(release->cachedb, false, filename[ic],
+ &combinedchecksum, NULL);
+ if (!RET_IS_OK(r)) {
+ result = r;
+ break;
+ }
+ r = checksums_parse(&checksums[ic], combinedchecksum);
+ // TODO: handle malformed checksums better?
+ free(combinedchecksum);
+ if (!RET_IS_OK(r)) {
+ result = r;
+ break;
+ }
+ }
+ /* some files might not yet have some type of checksum available,
+ * so calculate them (checking the other checksums match...): */
+ if (RET_IS_OK(result)) {
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++) {
+ char *fullfilename;
+ if (filename[ic] == NULL)
+ continue;
+ fullfilename = calc_dirconcat(release->dirofdist,
+ filename[ic]);
+ if (FAILEDTOALLOC(fullfilename))
+ r = RET_ERROR_OOM;
+ else
+ r = checksums_complete(&checksums[ic],
+ fullfilename);
+ if (r == RET_ERROR_WRONG_MD5) {
+ fprintf(stderr,
+"WARNING: '%s' is different from recorded checksums.\n"
+"(This was only caught because some new checksum type was not yet available.)\n"
+"Triggering recreation of that file.\n", fullfilename);
+ r = RET_NOTHING;
+ }
+ free(fullfilename);
+ if (!RET_IS_OK(r)) {
+ result = r;
+ break;
+ }
+ }
+ }
+ if (!RET_IS_OK(result)) {
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++) {
+ if (filename[ic] == NULL)
+ continue;
+ free(filename[ic]);
+ checksums_free(checksums[ic]);
+ }
+ return result;
+ }
+ /* everything found, commit it: */
+ result = RET_OK;
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++) {
+ if (filename[ic] == NULL)
+ continue;
+ r = newreleaseentry(release, filename[ic],
+ checksums[ic],
+ NULL, NULL, NULL);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+
+struct filetorelease {
+ retvalue state;
+ struct openfile {
+ int fd;
+ struct checksumscontext context;
+ char *relativefilename;
+ char *fullfinalfilename;
+ char *fulltemporaryfilename;
+ char *symlinkas;
+ } f[ic_count];
+ /* input buffer, to checksum/compress data at once */
+ unsigned char *buffer; size_t waiting_bytes;
+ /* output buffer for gzip compression */
+ unsigned char *gzoutputbuffer; size_t gz_waiting_bytes;
+ z_stream gzstream;
+#ifdef HAVE_LIBBZ2
+ /* output buffer for bzip2 compression */
+ char *bzoutputbuffer; size_t bz_waiting_bytes;
+ bz_stream bzstream;
+#endif
+#ifdef HAVE_LIBLZMA
+ /* output buffer for bzip2 compression */
+ unsigned char *xzoutputbuffer; size_t xz_waiting_bytes;
+ lzma_stream xzstream;
+#endif
+};
+
+void release_abortfile(struct filetorelease *file) {
+ enum indexcompression i;
+
+ for (i = ic_uncompressed ; i < ic_count ; i++) {
+ if (file->f[i].fd >= 0) {
+ (void)close(file->f[i].fd);
+ if (file->f[i].fulltemporaryfilename != NULL)
+ (void)unlink(file->f[i].fulltemporaryfilename);
+ }
+ free(file->f[i].relativefilename);
+ free(file->f[i].fullfinalfilename);
+ free(file->f[i].fulltemporaryfilename);
+ free(file->f[i].symlinkas);
+ }
+ free(file->buffer);
+ free(file->gzoutputbuffer);
+ if (file->gzstream.next_out != NULL) {
+ (void)deflateEnd(&file->gzstream);
+ }
+#ifdef HAVE_LIBBZ2
+ free(file->bzoutputbuffer);
+ if (file->bzstream.next_out != NULL) {
+ (void)BZ2_bzCompressEnd(&file->bzstream);
+ }
+#endif
+#ifdef HAVE_LIBLZMA
+ if (file->xzoutputbuffer != NULL) {
+ free(file->xzoutputbuffer);
+ lzma_end(&file->xzstream);
+ }
+#endif
+}
+
+bool release_oldexists(struct filetorelease *file) {
+ enum indexcompression ic;
+ bool hadanything = false;
+
+ for (ic = ic_uncompressed ; ic < ic_count ; ic++) {
+ char *f = file->f[ic].fullfinalfilename;
+
+ if (f != NULL) {
+ if (isregularfile(f))
+ hadanything = true;
+ else
+ return false;
+ }
+ }
+ return hadanything;
+}
+
+static retvalue openfile(const char *dirofdist, struct openfile *f) {
+
+ f->fullfinalfilename = calc_dirconcat(dirofdist, f->relativefilename);
+ if (FAILEDTOALLOC(f->fullfinalfilename))
+ return RET_ERROR_OOM;
+ f->fulltemporaryfilename = calc_addsuffix(f->fullfinalfilename, "new");
+ if (FAILEDTOALLOC(f->fulltemporaryfilename))
+ return RET_ERROR_OOM;
+ (void)unlink(f->fulltemporaryfilename);
+ f->fd = open(f->fulltemporaryfilename,
+ O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666);
+ if (f->fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d opening file %s for writing: %s\n",
+ e, f->fulltemporaryfilename, strerror(e));
+ return RET_ERRNO(e);
+ }
+ return RET_OK;
+}
+
+static retvalue writetofile(struct openfile *file, const unsigned char *data, size_t len) {
+
+ checksumscontext_update(&file->context, data, len);
+
+ if (file->fd < 0)
+ return RET_NOTHING;
+
+ while (len > 0) {
+ ssize_t written = write(file->fd, data, len);
+ if (written >= 0) {
+ len -= written;
+ data += written;
+ } else {
+ int e = errno;
+ if (e == EAGAIN || e == EINTR)
+ continue;
+ fprintf(stderr, "Error %d writing to %s: %s\n",
+ e, file->fullfinalfilename,
+ strerror(e));
+ return RET_ERRNO(e);
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue initgzcompression(struct filetorelease *f) {
+ int zret;
+
+ if ((zlibCompileFlags() & (1<<17)) !=0) {
+ fprintf(stderr, "libz compiled without .gz supporting code\n");
+ return RET_ERROR;
+ }
+ f->gzoutputbuffer = malloc(GZBUFSIZE);
+ if (FAILEDTOALLOC(f->gzoutputbuffer))
+ return RET_ERROR_OOM;
+ f->gzstream.next_in = NULL;
+ f->gzstream.avail_in = 0;
+ f->gzstream.next_out = f->gzoutputbuffer;
+ f->gzstream.avail_out = GZBUFSIZE;
+ f->gzstream.zalloc = NULL;
+ f->gzstream.zfree = NULL;
+ f->gzstream.opaque = NULL;
+ zret = deflateInit2(&f->gzstream,
+ /* Level: 0-9 or Z_DEFAULT_COMPRESSION: */
+ Z_DEFAULT_COMPRESSION,
+ /* only possibility yet: */
+ Z_DEFLATED,
+ /* +16 to generate gzip header */
+ 16 + MAX_WBITS,
+ /* how much memory to use 1-9 */
+ 8,
+ /* default or Z_FILTERED or Z_HUFFMAN_ONLY or Z_RLE */
+ Z_DEFAULT_STRATEGY
+ );
+ f->gz_waiting_bytes = GZBUFSIZE - f->gzstream.avail_out;
+ if (zret == Z_MEM_ERROR)
+ return RET_ERROR_OOM;
+ if (zret != Z_OK) {
+ if (f->gzstream.msg == NULL) {
+ fprintf(stderr, "Error from zlib's deflateInit2: "
+ "unknown(%d)\n", zret);
+ } else {
+ fprintf(stderr, "Error from zlib's deflateInit2: %s\n",
+ f->gzstream.msg);
+ }
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+#ifdef HAVE_LIBBZ2
+
+static retvalue initbzcompression(struct filetorelease *f) {
+ int bzret;
+
+ f->bzoutputbuffer = malloc(BZBUFSIZE);
+ if (FAILEDTOALLOC(f->bzoutputbuffer))
+ return RET_ERROR_OOM;
+ f->bzstream.next_in = NULL;
+ f->bzstream.avail_in = 0;
+ f->bzstream.next_out = f->bzoutputbuffer;
+ f->bzstream.avail_out = BZBUFSIZE;
+ f->bzstream.bzalloc = NULL;
+ f->bzstream.bzfree = NULL;
+ f->bzstream.opaque = NULL;
+ bzret = BZ2_bzCompressInit(&f->bzstream,
+ /* blocksize (1-9) */
+ 9,
+ /* verbosity */
+ 0,
+ /* workFaktor (1-250, 0 = default(30)) */
+ 0
+ );
+ if (bzret == BZ_MEM_ERROR)
+ return RET_ERROR_OOM;
+ if (bzret != BZ_OK) {
+ fprintf(stderr, "Error from libbz2's bzCompressInit: "
+ "%d\n", bzret);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+#endif
+
+#ifdef HAVE_LIBLZMA
+
+static retvalue initxzcompression(struct filetorelease *f) {
+ lzma_ret lret;
+
+ f->xzoutputbuffer = malloc(XZBUFSIZE);
+ if (FAILEDTOALLOC(f->xzoutputbuffer))
+ return RET_ERROR_OOM;
+ memset(&f->xzstream, 0, sizeof(f->xzstream));
+ lret = lzma_easy_encoder(&f->xzstream, 9, LZMA_CHECK_CRC64);
+ if (lret == LZMA_MEM_ERROR)
+ return RET_ERROR_OOM;
+ if (lret != LZMA_OK) {
+ fprintf(stderr, "Error from liblzma's lzma_easy_encoder: "
+ "%d\n", lret);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+#endif
+
+
+static const char * const ics[ic_count] = { "", ".gz"
+#ifdef HAVE_LIBBZ2
+ , ".bz2"
+#endif
+#ifdef HAVE_LIBLZMA
+ , ".xz"
+#endif
+};
+
+static inline retvalue setfilename(struct filetorelease *n, const char *relfilename, /*@null@*/const char *symlinkas, enum indexcompression ic) {
+ n->f[ic].relativefilename = mprintf("%s%s", relfilename, ics[ic]);
+ if (FAILEDTOALLOC(n->f[ic].relativefilename))
+ return RET_ERROR_OOM;
+ if (symlinkas == NULL)
+ return RET_OK;
+ /* symlink creation fails horrible if the symlink is not in the base
+ * directory */
+ assert (strchr(symlinkas, '/') == NULL);
+ n->f[ic].symlinkas = mprintf("%s%s", symlinkas, ics[ic]);
+ if (FAILEDTOALLOC(n->f[ic].symlinkas))
+ return RET_ERROR_OOM;
+ return RET_OK;
+}
+
+static inline void warnfilename(struct release *release, const char *relfilename, enum indexcompression ic) {
+ char *fullfilename;
+
+ if (IGNORABLE(oldfile))
+ return;
+
+ fullfilename = mprintf("%s/%s%s", release->dirofdist,
+ relfilename, ics[ic]);
+ if (FAILEDTOALLOC(fullfilename))
+ return;
+ if (isanyfile(fullfilename)) {
+ fprintf(stderr, "Possibly left over file '%s'.\n",
+ fullfilename);
+ if (!ignored[IGN_oldfile]) {
+ fputs("You might want to delete it or use --ignore=oldfile to no longer get this message.\n", stderr);
+ ignored[IGN_oldfile] = true;
+ }
+ }
+ free(fullfilename);
+}
+
+static retvalue startfile(struct release *release, const char *filename, /*@null@*/const char *symlinkas, compressionset compressions, bool usecache, struct filetorelease **file) {
+ struct filetorelease *n;
+ enum indexcompression i;
+
+ if (usecache) {
+ retvalue r = release_usecached(release, filename, compressions);
+ if (r != RET_NOTHING) {
+ if (RET_IS_OK(r))
+ return RET_NOTHING;
+ return r;
+ }
+ }
+
+ n = zNEW(struct filetorelease);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->buffer = malloc(INPUT_BUFFER_SIZE);
+ if (FAILEDTOALLOC(n->buffer)) {
+ release_abortfile(n);
+ return RET_ERROR_OOM;
+ }
+ for (i = ic_uncompressed ; i < ic_count ; i ++) {
+ n->f[i].fd = -1;
+ }
+ if ((compressions & IC_FLAG(ic_uncompressed)) != 0) {
+ retvalue r;
+
+ r = setfilename(n, filename, symlinkas, ic_uncompressed);
+ if (!RET_WAS_ERROR(r))
+ r = openfile(release->dirofdist, &n->f[ic_uncompressed]);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(n);
+ return r;
+ }
+ } else {
+ /* the uncompressed file always shows up in Release */
+ n->f[ic_uncompressed].relativefilename = strdup(filename);
+ if (FAILEDTOALLOC(n->f[ic_uncompressed].relativefilename)) {
+ release_abortfile(n);
+ return RET_ERROR_OOM;
+ }
+ }
+
+ if ((compressions & IC_FLAG(ic_gzip)) != 0) {
+ retvalue r;
+
+ r = setfilename(n, filename, symlinkas, ic_gzip);
+ if (!RET_WAS_ERROR(r))
+ r = openfile(release->dirofdist, &n->f[ic_gzip]);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(n);
+ return r;
+ }
+ checksumscontext_init(&n->f[ic_gzip].context);
+ r = initgzcompression(n);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(n);
+ return r;
+ }
+ }
+#ifdef HAVE_LIBBZ2
+ if ((compressions & IC_FLAG(ic_bzip2)) != 0) {
+ retvalue r;
+ r = setfilename(n, filename, symlinkas, ic_bzip2);
+ if (!RET_WAS_ERROR(r))
+ r = openfile(release->dirofdist, &n->f[ic_bzip2]);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(n);
+ return r;
+ }
+ checksumscontext_init(&n->f[ic_bzip2].context);
+ r = initbzcompression(n);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(n);
+ return r;
+ }
+ }
+#endif
+#ifdef HAVE_LIBLZMA
+ if ((compressions & IC_FLAG(ic_xz)) != 0) {
+ retvalue r;
+ r = setfilename(n, filename, symlinkas, ic_xz);
+ if (!RET_WAS_ERROR(r))
+ r = openfile(release->dirofdist, &n->f[ic_xz]);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(n);
+ return r;
+ }
+ checksumscontext_init(&n->f[ic_xz].context);
+ r = initxzcompression(n);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(n);
+ return r;
+ }
+ }
+#endif
+ checksumscontext_init(&n->f[ic_uncompressed].context);
+ *file = n;
+ return RET_OK;
+}
+
+retvalue release_startfile(struct release *release, const char *filename, compressionset compressions, bool usecache, struct filetorelease **file) {
+ return startfile(release, filename, NULL, compressions, usecache, file);
+}
+
+retvalue release_startlinkedfile(struct release *release, const char *filename, const char *symlinkas, compressionset compressions, bool usecache, struct filetorelease **file) {
+ return startfile(release, filename, symlinkas, compressions, usecache, file);
+}
+
+void release_warnoldfileorlink(struct release *release, const char *filename, compressionset compressions) {
+ enum indexcompression i;
+
+ for (i = ic_uncompressed ; i < ic_count ; i ++)
+ if ((compressions & IC_FLAG(i)) != 0)
+ warnfilename(release, filename, i);
+}
+
+static inline char *calc_relative_path(const char *target, const char *linkname) {
+ size_t t_len, l_len, common_len, len;
+ const char *t, *l;
+ int depth;
+ char *n, *p;
+
+ t_len = strlen(target);
+ l_len = strlen(linkname);
+
+ t = target; l = linkname; common_len = 0;
+ while (*t == *l && *t != '\0') {
+ if (*t == '/')
+ common_len = (t - target) + 1;
+ t++;
+ l++;
+ }
+ depth = 0;
+ while (*l != '\0') {
+ if (*l++ == '/')
+ depth++;
+ }
+ assert (common_len <= t_len && common_len <= l_len &&
+ memcmp(target, linkname, common_len) == 0);
+ len = 3 * depth + t_len - common_len;
+
+ n = malloc(len + 1);
+ if (FAILEDTOALLOC(n))
+ return NULL;
+ p = n;
+ while (depth > 0) {
+ memcpy(p, "../", 3);
+ p += 3;
+ }
+ memcpy(p, target + common_len, 1 + t_len - common_len);
+ p += t_len - common_len;
+ assert ((size_t)(p-n) == len);
+ return n;
+}
+
+static retvalue releasefile(struct release *release, struct openfile *f) {
+ struct checksums *checksums;
+ retvalue r;
+
+ if (f->relativefilename == NULL) {
+ assert (f->fullfinalfilename == NULL);
+ assert (f->fulltemporaryfilename == NULL);
+ return RET_NOTHING;
+ }
+ assert((f->fullfinalfilename == NULL
+ && f->fulltemporaryfilename == NULL)
+ || (f->fullfinalfilename != NULL
+ && f->fulltemporaryfilename != NULL));
+
+ r = checksums_from_context(&checksums, &f->context);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (f->symlinkas) {
+ char *symlinktarget = calc_relative_path(f->relativefilename,
+ f->symlinkas);
+ if (FAILEDTOALLOC(symlinktarget))
+ return RET_ERROR_OOM;
+ r = release_addsymlink(release, f->symlinkas,
+ symlinktarget);
+ f->symlinkas = NULL;
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ r = newreleaseentry(release, f->relativefilename, checksums,
+ f->fullfinalfilename,
+ f->fulltemporaryfilename,
+ NULL);
+ f->relativefilename = NULL;
+ f->fullfinalfilename = NULL;
+ f->fulltemporaryfilename = NULL;
+ return r;
+}
+
+static retvalue writegz(struct filetorelease *f) {
+ int zret;
+
+ assert (f->f[ic_gzip].fd >= 0);
+
+ f->gzstream.next_in = f->buffer;
+ f->gzstream.avail_in = INPUT_BUFFER_SIZE;
+
+ do {
+ f->gzstream.next_out = f->gzoutputbuffer + f->gz_waiting_bytes;
+ f->gzstream.avail_out = GZBUFSIZE - f->gz_waiting_bytes;
+
+ zret = deflate(&f->gzstream, Z_NO_FLUSH);
+ f->gz_waiting_bytes = GZBUFSIZE - f->gzstream.avail_out;
+
+ if ((zret == Z_OK && f->gz_waiting_bytes >= GZBUFSIZE / 2)
+ || zret == Z_BUF_ERROR) {
+ retvalue r;
+ /* there should be anything to write, otherwise
+ * better break to avoid an infinite loop */
+ if (f->gz_waiting_bytes == 0)
+ break;
+ r = writetofile(&f->f[ic_gzip],
+ f->gzoutputbuffer, f->gz_waiting_bytes);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ f->gz_waiting_bytes = 0;
+ }
+ /* as we start with some data to process, Z_BUF_ERROR
+ * should only happen when no output is possible, as that
+ * gets possible again it should finally produce more output
+ * and return Z_OK and always terminate. Hopefully... */
+ } while (zret == Z_BUF_ERROR
+ || (zret == Z_OK && f->gzstream.avail_in != 0));
+
+ f->gzstream.next_in = NULL;
+ f->gzstream.avail_in = 0;
+
+ if (zret != Z_OK) {
+ if (f->gzstream.msg == NULL) {
+ fprintf(stderr, "Error from zlib's deflate: "
+ "unknown(%d)\n", zret);
+ } else {
+ fprintf(stderr, "Error from zlib's deflate: %s\n",
+ f->gzstream.msg);
+ }
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue finishgz(struct filetorelease *f) {
+ int zret;
+
+ assert (f->f[ic_gzip].fd >= 0);
+
+ f->gzstream.next_in = f->buffer;
+ f->gzstream.avail_in = f->waiting_bytes;
+
+ do {
+ f->gzstream.next_out = f->gzoutputbuffer + f->gz_waiting_bytes;
+ f->gzstream.avail_out = GZBUFSIZE - f->gz_waiting_bytes;
+
+ zret = deflate(&f->gzstream, Z_FINISH);
+ f->gz_waiting_bytes = GZBUFSIZE - f->gzstream.avail_out;
+
+ if (zret == Z_OK || zret == Z_STREAM_END
+ || zret == Z_BUF_ERROR) {
+ retvalue r;
+ if (f->gz_waiting_bytes == 0) {
+ if (zret != Z_STREAM_END) {
+ fprintf(stderr,
+"Unexpected buffer error after deflate (%d)\n", zret);
+ return RET_ERROR;
+ }
+ break;
+ }
+ r = writetofile(&f->f[ic_gzip],
+ f->gzoutputbuffer, f->gz_waiting_bytes);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ f->gz_waiting_bytes = 0;
+ }
+ /* see above */
+ } while (zret == Z_BUF_ERROR || zret == Z_OK);
+
+ if (zret != Z_STREAM_END) {
+ if (f->gzstream.msg == NULL) {
+ fprintf(stderr, "Error from zlib's deflate: "
+ "unknown(%d)\n", zret);
+ } else {
+ fprintf(stderr, "Error from zlib's deflate: %s\n",
+ f->gzstream.msg);
+ }
+ return RET_ERROR;
+ }
+
+ zret = deflateEnd(&f->gzstream);
+ /* to avoid deflateEnd called again */
+ f->gzstream.next_out = NULL;
+ if (zret != Z_OK) {
+ if (f->gzstream.msg == NULL) {
+ fprintf(stderr, "Error from zlib's deflateEnd: "
+ "unknown(%d)\n", zret);
+ } else {
+ fprintf(stderr, "Error from zlib's deflateEnd: %s\n",
+ f->gzstream.msg);
+ }
+ return RET_ERROR;
+ }
+
+
+ return RET_OK;
+}
+
+#ifdef HAVE_LIBBZ2
+
+static retvalue writebz(struct filetorelease *f) {
+ int bzret;
+
+ assert (f->f[ic_bzip2].fd >= 0);
+
+ f->bzstream.next_in = (char*)f->buffer;
+ f->bzstream.avail_in = INPUT_BUFFER_SIZE;
+
+ do {
+ f->bzstream.next_out = f->bzoutputbuffer + f->bz_waiting_bytes;
+ f->bzstream.avail_out = BZBUFSIZE - f->bz_waiting_bytes;
+
+ bzret = BZ2_bzCompress(&f->bzstream, BZ_RUN);
+ f->bz_waiting_bytes = BZBUFSIZE - f->bzstream.avail_out;
+
+ if (bzret == BZ_RUN_OK &&
+ f->bz_waiting_bytes >= BZBUFSIZE / 2) {
+ retvalue r;
+ assert (f->bz_waiting_bytes > 0);
+ r = writetofile(&f->f[ic_bzip2],
+ (const unsigned char *)f->bzoutputbuffer,
+ f->bz_waiting_bytes);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ f->bz_waiting_bytes = 0;
+ }
+ } while (bzret == BZ_RUN_OK && f->bzstream.avail_in != 0);
+
+ f->bzstream.next_in = NULL;
+ f->bzstream.avail_in = 0;
+
+ if (bzret != BZ_RUN_OK) {
+ fprintf(stderr, "Error from libbz2's bzCompress: "
+ "%d\n", bzret);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue finishbz(struct filetorelease *f) {
+ int bzret;
+
+ assert (f->f[ic_bzip2].fd >= 0);
+
+ f->bzstream.next_in = (char*)f->buffer;
+ f->bzstream.avail_in = f->waiting_bytes;
+
+ do {
+ f->bzstream.next_out = f->bzoutputbuffer + f->bz_waiting_bytes;
+ f->bzstream.avail_out = BZBUFSIZE - f->bz_waiting_bytes;
+
+ bzret = BZ2_bzCompress(&f->bzstream, BZ_FINISH);
+ f->bz_waiting_bytes = BZBUFSIZE - f->bzstream.avail_out;
+
+ /* BZ_RUN_OK most likely is not possible here, but BZ_FINISH_OK
+ * is returned when it cannot be finished in one step.
+ * but better safe then sorry... */
+ if ((bzret == BZ_RUN_OK || bzret == BZ_FINISH_OK
+ || bzret == BZ_STREAM_END)
+ && f->bz_waiting_bytes > 0) {
+ retvalue r;
+ r = writetofile(&f->f[ic_bzip2],
+ (const unsigned char*)f->bzoutputbuffer,
+ f->bz_waiting_bytes);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ f->bz_waiting_bytes = 0;
+ }
+ } while (bzret == BZ_RUN_OK || bzret == BZ_FINISH_OK);
+
+ if (bzret != BZ_STREAM_END) {
+ fprintf(stderr, "Error from bzlib's bzCompress: "
+ "%d\n", bzret);
+ return RET_ERROR;
+ }
+
+ bzret = BZ2_bzCompressEnd(&f->bzstream);
+ /* to avoid bzCompressEnd called again */
+ f->bzstream.next_out = NULL;
+ if (bzret != BZ_OK) {
+ fprintf(stderr, "Error from libbz2's bzCompressEnd: "
+ "%d\n", bzret);
+ return RET_ERROR;
+ }
+
+ return RET_OK;
+}
+#endif
+
+#ifdef HAVE_LIBLZMA
+
+static retvalue writexz(struct filetorelease *f) {
+ lzma_ret xzret;
+
+ assert (f->f[ic_xz].fd >= 0);
+
+ f->xzstream.next_in = f->buffer;
+ f->xzstream.avail_in = INPUT_BUFFER_SIZE;
+
+ do {
+ f->xzstream.next_out = f->xzoutputbuffer + f->xz_waiting_bytes;
+ f->xzstream.avail_out = XZBUFSIZE - f->xz_waiting_bytes;
+
+ xzret = lzma_code(&f->xzstream, LZMA_RUN);
+ f->xz_waiting_bytes = XZBUFSIZE - f->xzstream.avail_out;
+
+ if (xzret == LZMA_OK &&
+ f->xz_waiting_bytes >= XZBUFSIZE / 2) {
+ retvalue r;
+ assert (f->xz_waiting_bytes > 0);
+ r = writetofile(&f->f[ic_xz],
+ (const unsigned char *)f->xzoutputbuffer,
+ f->xz_waiting_bytes);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ f->xz_waiting_bytes = 0;
+ }
+ } while (xzret == LZMA_OK && f->xzstream.avail_in != 0);
+
+ f->xzstream.next_in = NULL;
+ f->xzstream.avail_in = 0;
+
+ if (xzret != LZMA_OK) {
+ fprintf(stderr, "Error from liblzma's lzma_code: "
+ "%d\n", xzret);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue finishxz(struct filetorelease *f) {
+ lzma_ret xzret;
+
+ assert (f->f[ic_xz].fd >= 0);
+
+ f->xzstream.next_in = f->buffer;
+ f->xzstream.avail_in = f->waiting_bytes;
+
+ do {
+ f->xzstream.next_out = f->xzoutputbuffer + f->xz_waiting_bytes;
+ f->xzstream.avail_out = XZBUFSIZE - f->xz_waiting_bytes;
+
+ xzret = lzma_code(&f->xzstream, LZMA_FINISH);
+ f->xz_waiting_bytes = XZBUFSIZE - f->xzstream.avail_out;
+
+ if ((xzret == LZMA_OK || xzret == LZMA_STREAM_END)
+ && f->xz_waiting_bytes > 0) {
+ retvalue r;
+ r = writetofile(&f->f[ic_xz],
+ (const unsigned char*)f->xzoutputbuffer,
+ f->xz_waiting_bytes);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ f->xz_waiting_bytes = 0;
+ }
+ } while (xzret == LZMA_OK);
+
+ if (xzret != LZMA_STREAM_END) {
+ fprintf(stderr, "Error from liblzma's lzma_code: "
+ "%d\n", xzret);
+ return RET_ERROR;
+ }
+ assert (f->xz_waiting_bytes == 0);
+
+ lzma_end(&f->xzstream);
+ free(f->xzoutputbuffer);
+ f->xzoutputbuffer = NULL;
+
+ return RET_OK;
+}
+#endif
+
+retvalue release_finishfile(struct release *release, struct filetorelease *file) {
+ retvalue result, r;
+ enum indexcompression i;
+
+ if (RET_WAS_ERROR(file->state)) {
+ r = file->state;
+ release_abortfile(file);
+ return r;
+ }
+
+ r = writetofile(&file->f[ic_uncompressed],
+ file->buffer, file->waiting_bytes);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ return r;
+ }
+ if (file->f[ic_uncompressed].fd >= 0) {
+ if (close(file->f[ic_uncompressed].fd) != 0) {
+ int e = errno;
+ file->f[ic_uncompressed].fd = -1;
+ release_abortfile(file);
+ return RET_ERRNO(e);
+ }
+ file->f[ic_uncompressed].fd = -1;
+ }
+ if (file->f[ic_gzip].fd >= 0) {
+ r = finishgz(file);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ return r;
+ }
+ if (close(file->f[ic_gzip].fd) != 0) {
+ int e = errno;
+ file->f[ic_gzip].fd = -1;
+ release_abortfile(file);
+ return RET_ERRNO(e);
+ }
+ file->f[ic_gzip].fd = -1;
+ }
+#ifdef HAVE_LIBBZ2
+ if (file->f[ic_bzip2].fd >= 0) {
+ r = finishbz(file);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ return r;
+ }
+ if (close(file->f[ic_bzip2].fd) != 0) {
+ int e = errno;
+ file->f[ic_bzip2].fd = -1;
+ release_abortfile(file);
+ return RET_ERRNO(e);
+ }
+ file->f[ic_bzip2].fd = -1;
+ }
+#endif
+#ifdef HAVE_LIBLZMA
+ if (file->f[ic_xz].fd >= 0) {
+ r = finishxz(file);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ return r;
+ }
+ if (close(file->f[ic_xz].fd) != 0) {
+ int e = errno;
+ file->f[ic_xz].fd = -1;
+ release_abortfile(file);
+ return RET_ERRNO(e);
+ }
+ file->f[ic_xz].fd = -1;
+ }
+#endif
+ release->new = true;
+ result = RET_OK;
+
+ for (i = ic_uncompressed ; i < ic_count ; i++) {
+ r = releasefile(release, &file->f[i]);
+ if (RET_WAS_ERROR(r)) {
+ release_abortfile(file);
+ return r;
+ }
+ RET_UPDATE(result, r);
+ }
+ free(file->buffer);
+ free(file->gzoutputbuffer);
+#ifdef HAVE_LIBBZ2
+ free(file->bzoutputbuffer);
+#endif
+#ifdef HAVE_LIBLZMA
+ assert(file->xzoutputbuffer == NULL);
+#endif
+ free(file);
+ return result;
+}
+
+static retvalue release_processbuffer(struct filetorelease *file) {
+ retvalue result, r;
+
+ result = RET_OK;
+ assert (file->waiting_bytes == INPUT_BUFFER_SIZE);
+
+ /* always call this - even if there is no uncompressed file
+ * to generate - so that checksums are calculated */
+ r = writetofile(&file->f[ic_uncompressed],
+ file->buffer, INPUT_BUFFER_SIZE);
+ RET_UPDATE(result, r);
+
+ if (file->f[ic_gzip].relativefilename != NULL) {
+ r = writegz(file);
+ RET_UPDATE(result, r);
+ }
+ RET_UPDATE(file->state, result);
+#ifdef HAVE_LIBBZ2
+ if (file->f[ic_bzip2].relativefilename != NULL) {
+ r = writebz(file);
+ RET_UPDATE(result, r);
+ }
+ RET_UPDATE(file->state, result);
+#endif
+#ifdef HAVE_LIBLZMA
+ if (file->f[ic_xz].relativefilename != NULL) {
+ r = writexz(file);
+ RET_UPDATE(result, r);
+ }
+ RET_UPDATE(file->state, result);
+#endif
+ return result;
+}
+
+retvalue release_writedata(struct filetorelease *file, const char *data, size_t len) {
+ retvalue result, r;
+ size_t free_bytes;
+
+ result = RET_OK;
+ /* move stuff into buffer, so stuff is not processed byte by byte */
+ free_bytes = INPUT_BUFFER_SIZE - file->waiting_bytes;
+ if (len < free_bytes) {
+ memcpy(file->buffer + file->waiting_bytes, data, len);
+ file->waiting_bytes += len;
+ assert (file->waiting_bytes < INPUT_BUFFER_SIZE);
+ return RET_OK;
+ }
+ memcpy(file->buffer + file->waiting_bytes, data, free_bytes);
+ len -= free_bytes;
+ data += free_bytes;
+ file->waiting_bytes += free_bytes;
+ r = release_processbuffer(file);
+ RET_UPDATE(result, r);
+ while (len >= INPUT_BUFFER_SIZE) {
+ /* should not hopefully not happen, as all this copying
+ * is quite slow... */
+ memcpy(file->buffer, data, INPUT_BUFFER_SIZE);
+ len -= INPUT_BUFFER_SIZE;
+ data += INPUT_BUFFER_SIZE;
+ r = release_processbuffer(file);
+ RET_UPDATE(result, r);
+ }
+ memcpy(file->buffer, data, len);
+ file->waiting_bytes = len;
+ assert (file->waiting_bytes < INPUT_BUFFER_SIZE);
+ return result;
+}
+
+/* Generate a "Release"-file for arbitrary directory */
+retvalue release_directorydescription(struct release *release, const struct distribution *distribution, const struct target *target, const char *releasename, bool onlyifneeded) {
+ retvalue r;
+ struct filetorelease *f;
+ char *relfilename;
+
+ relfilename = calc_dirconcat(target->relativedirectory, releasename);
+ if (FAILEDTOALLOC(relfilename))
+ return RET_ERROR_OOM;
+ r = startfile(release, relfilename, NULL,
+ IC_FLAG(ic_uncompressed), onlyifneeded, &f);
+ free(relfilename);
+ if (RET_WAS_ERROR(r) || r == RET_NOTHING)
+ return r;
+
+#define release_writeheader(name, data) \
+ if (data != NULL) { \
+ (void)release_writestring(f, name ": "); \
+ (void)release_writestring(f, data); \
+ (void)release_writestring(f, "\n"); \
+ }
+
+ release_writeheader("Archive", distribution->suite);
+ release_writeheader("Version", distribution->version);
+ release_writeheader("Component",
+ atoms_components[target->component]);
+ release_writeheader("Origin", distribution->origin);
+ release_writeheader("Label", distribution->label);
+ release_writeheader("Architecture",
+ atoms_architectures[target->architecture]);
+ release_writeheader("NotAutomatic", distribution->notautomatic);
+ release_writeheader("ButAutomaticUpgrades",
+ distribution->butautomaticupgrades);
+ release_writeheader("Description", distribution->description);
+#undef release_writeheader
+ r = release_finishfile(release, f);
+ return r;
+}
+
+static retvalue storechecksums(struct release *release) {
+ struct release_entry *file;
+ retvalue result, r;
+ const char *combinedchecksum;
+ /* size including trailing '\0' character: */
+ size_t len;
+
+ result = RET_OK;
+
+ for (file = release->files ; file != NULL ; file = file->next) {
+
+ assert (file->relativefilename != NULL);
+
+ r = table_deleterecord(release->cachedb,
+ file->relativefilename, true);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (file->checksums == NULL)
+ continue;
+
+ r = checksums_getcombined(file->checksums, &combinedchecksum, &len);
+ RET_UPDATE(result, r);
+ if (!RET_IS_OK(r))
+ continue;
+
+ r = table_adduniqsizedrecord(release->cachedb,
+ file->relativefilename, combinedchecksum, len+1,
+ false, false);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+static inline bool componentneedsfake(const char *cn, const struct release *release) {
+ if (release->fakecomponentprefix == NULL)
+ return false;
+ if (strncmp(cn, release->fakecomponentprefix,
+ release->fakecomponentprefixlen) != 0)
+ return true;
+ return cn[release->fakecomponentprefixlen] != '/';
+}
+
+
+static struct release_entry *newspecialreleaseentry(struct release *release, const char *relativefilename) {
+ struct release_entry *n, *p;
+
+ assert (relativefilename != NULL);
+ n = zNEW(struct release_entry);
+ if (FAILEDTOALLOC(n))
+ return NULL;
+ n->relativefilename = strdup(relativefilename);
+ n->fullfinalfilename = calc_dirconcat(release->dirofdist,
+ relativefilename);
+ if (!FAILEDTOALLOC(n->fullfinalfilename))
+ n->fulltemporaryfilename = mprintf("%s.new",
+ n->fullfinalfilename);
+ if (FAILEDTOALLOC(n->relativefilename)
+ || FAILEDTOALLOC(n->fullfinalfilename)
+ || FAILEDTOALLOC(n->fulltemporaryfilename)) {
+ release_freeentry(n);
+ return NULL;
+ }
+ if (release->files == NULL)
+ release->files = n;
+ else {
+ p = release->files;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = n;
+ }
+ return n;
+}
+static void omitunusedspecialreleaseentry(struct release *release, struct release_entry *e) {
+ struct release_entry **p;
+
+ if (e->fulltemporaryfilename != NULL)
+ /* new file available, nothing to omit */
+ return;
+ if (isregularfile(e->fullfinalfilename))
+ /* this will be deleted, everything fine */
+ return;
+ p = &release->files;
+ while (*p != NULL && *p != e)
+ p = &(*p)->next;
+ if (*p != e) {
+ assert (*p == e);
+ return;
+ }
+ *p = e->next;
+ release_freeentry(e);
+}
+
+/* Generate a main "Release" file for a distribution */
+retvalue release_prepare(struct release *release, struct distribution *distribution, bool onlyifneeded) {
+ size_t s;
+ retvalue r;
+ char buffer[100], untilbuffer[100];
+ time_t t;
+ struct tm *gmt;
+ struct release_entry *file;
+ enum checksumtype cs;
+ int i;
+ static const char * const release_checksum_headers[cs_hashCOUNT] =
+ { "MD5Sum:\n", "SHA1:\n", "SHA256:\n", "SHA512:\n" };
+ struct release_entry *plainentry, *signedentry, *detachedentry;
+
+ // TODO: check for existence of Release file here first?
+ if (onlyifneeded && !release->new) {
+ return RET_NOTHING;
+ }
+
+ (void)time(&t);
+ gmt = gmtime(&t);
+ if (FAILEDTOALLOC(gmt))
+ return RET_ERROR_OOM;
+ s=strftime(buffer, 99, "%a, %d %b %Y %H:%M:%S UTC", gmt);
+ if (s == 0 || s >= 99) {
+ fprintf(stderr, "strftime is doing strange things...\n");
+ return RET_ERROR;
+ }
+ if (distribution->validfor > 0) {
+ t += distribution->validfor;
+ gmt = gmtime(&t);
+ if (FAILEDTOALLOC(gmt))
+ return RET_ERROR_OOM;
+ s=strftime(untilbuffer, 99, "%a, %d %b %Y %H:%M:%S UTC", gmt);
+ if (s == 0 || s >= 99) {
+ fprintf(stderr,
+"strftime is doing strange things...\n");
+ return RET_ERROR;
+ }
+ }
+ plainentry = newspecialreleaseentry(release, "Release");
+ if (FAILEDTOALLOC(plainentry))
+ return RET_ERROR_OOM;
+ signedentry = newspecialreleaseentry(release, "InRelease");
+ if (FAILEDTOALLOC(signedentry))
+ return RET_ERROR_OOM;
+ detachedentry = newspecialreleaseentry(release, "Release.gpg");
+ if (FAILEDTOALLOC(signedentry))
+ return RET_ERROR_OOM;
+ r = signature_startsignedfile(&release->signedfile);
+ if (RET_WAS_ERROR(r))
+ return r;
+#define writestring(s) signedfile_write(release->signedfile, s, strlen(s))
+#define writechar(c) {char __c = c ; signedfile_write(release->signedfile, &__c, 1); }
+
+ if (distribution->origin != NULL) {
+ writestring("Origin: ");
+ writestring(distribution->origin);
+ writechar('\n');
+ }
+ if (distribution->label != NULL) {
+ writestring("Label: ");
+ writestring(distribution->label);
+ writechar('\n');
+ }
+ if (release->fakesuite != NULL) {
+ writestring("Suite: ");
+ writestring(release->fakesuite);
+ writechar('\n');
+ } else if (distribution->suite != NULL) {
+ writestring("Suite: ");
+ writestring(distribution->suite);
+ writechar('\n');
+ }
+ writestring("Codename: ");
+ if (release->fakecodename != NULL)
+ writestring(release->fakecodename);
+ else
+ writestring(distribution->codename);
+ if (distribution->version != NULL) {
+ writestring("\nVersion: ");
+ writestring(distribution->version);
+ }
+ writestring("\nDate: ");
+ writestring(buffer);
+ if (distribution->validfor > 0) {
+ writestring("\nValid-Until: ");
+ writestring(untilbuffer);
+ }
+ writestring("\nArchitectures:");
+ for (i = 0 ; i < distribution->architectures.count ; i++) {
+ architecture_t a = distribution->architectures.atoms[i];
+
+ /* Debian's topmost Release files do not list it,
+ * so we won't either */
+ if (a == architecture_source)
+ continue;
+ writechar(' ');
+ writestring(atoms_architectures[a]);
+ }
+ writestring("\nComponents:");
+ for (i = 0 ; i < distribution->components.count ; i++) {
+ component_t c = distribution->components.atoms[i];
+ const char *cn = atoms_components[c];
+
+ writechar(' ');
+ if (componentneedsfake(cn, release)) {
+ writestring(release->fakecomponentprefix);
+ writechar('/');
+ }
+ writestring(cn);
+ }
+ if (distribution->description != NULL) {
+ writestring("\nDescription: ");
+ writestring(distribution->description);
+ }
+ if (distribution->signed_by != NULL) {
+ writestring("\nSigned-By: ");
+ writestring(distribution->signed_by);
+ }
+ if (distribution->notautomatic != NULL) {
+ writestring("\nNotAutomatic: ");
+ writestring(distribution->notautomatic);
+ }
+ if (distribution->butautomaticupgrades != NULL) {
+ writestring("\nButAutomaticUpgrades: ");
+ writestring(distribution->butautomaticupgrades);
+ }
+ writechar('\n');
+
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ assert (release_checksum_headers[cs] != NULL);
+ writestring(release_checksum_headers[cs]);
+ for (file = release->files ; file != NULL ; file = file->next) {
+ const char *hash, *size;
+ size_t hashlen, sizelen;
+ if (file->checksums == NULL)
+ continue;
+ if (!checksums_gethashpart(file->checksums, cs,
+ &hash, &hashlen, &size, &sizelen))
+ continue;
+ writechar(' ');
+ signedfile_write(release->signedfile, hash, hashlen);
+ writechar(' ');
+ signedfile_write(release->signedfile, size, sizelen);
+ writechar(' ');
+ writestring(file->relativefilename);
+ writechar('\n');
+ }
+ }
+ r = signedfile_create(release->signedfile,
+ plainentry->fulltemporaryfilename,
+ &signedentry->fulltemporaryfilename,
+ &detachedentry->fulltemporaryfilename,
+ &distribution->signwith, !global.keeptemporaries);
+ if (RET_WAS_ERROR(r)) {
+ signedfile_free(release->signedfile);
+ release->signedfile = NULL;
+ return r;
+ }
+ omitunusedspecialreleaseentry(release, signedentry);
+ omitunusedspecialreleaseentry(release, detachedentry);
+ return RET_OK;
+}
+
+static inline void release_toouthook(struct release *release, struct distribution *distribution) {
+ struct release_entry *file;
+ char *reldir;
+
+ if (release->snapshotname != NULL) {
+ reldir = mprintf("dists/%s/snapshots/%s",
+ distribution->codename, release->snapshotname);
+ if (FAILEDTOALLOC(reldir))
+ return;
+ outhook_send("BEGIN-SNAPSHOT", distribution->codename,
+ reldir, release->snapshotname);
+ } else {
+ reldir = mprintf("dists/%s", distribution->codename);
+ if (FAILEDTOALLOC(reldir))
+ return;
+ outhook_send("BEGIN-DISTRIBUTION", distribution->codename,
+ reldir, distribution->suite);
+ }
+
+ for (file = release->files ; file != NULL ; file = file->next) {
+ /* relf chks ffn ftfn symt
+ * name chks NULL NULL NULL: added old filename or virtual file
+ * name chks file NULL NULL: renamed new file and published
+ * name NULL file NULL NULL: renamed new file
+ * name NULL NULL NULL NULL: deleted file
+ * name NULL NULL NULL file: created symlink */
+
+ /* should already be in place: */
+ assert (file->fulltemporaryfilename == NULL);
+ /* symlinks are special: */
+ if (file->symlinktarget != NULL) {
+ outhook_send("DISTSYMLINK",
+ reldir,
+ file->relativefilename,
+ file->symlinktarget);
+ } else if (file->fullfinalfilename != NULL) {
+ outhook_send("DISTFILE", reldir,
+ file->relativefilename,
+ file->fullfinalfilename);
+ } else if (file->checksums == NULL){
+ outhook_send("DISTDELETE", reldir,
+ file->relativefilename, NULL);
+ }
+ /* would be nice to distinguish kept and virtual files... */
+ }
+
+ if (release->snapshotname != NULL) {
+ outhook_send("END-SNAPSHOT", distribution->codename,
+ reldir, release->snapshotname);
+ } else {
+ outhook_send("END-DISTRIBUTION", distribution->codename,
+ reldir, distribution->suite);
+ }
+ free(reldir);
+}
+
+/* Generate a main "Release" file for a distribution */
+retvalue release_finish(/*@only@*/struct release *release, struct distribution *distribution) {
+ retvalue result, r;
+ int e;
+ struct release_entry *file;
+ bool somethingwasdone;
+
+ somethingwasdone = false;
+ result = RET_OK;
+
+ for (file = release->files ; file != NULL ; file = file->next) {
+ assert (file->relativefilename != NULL);
+ if (file->checksums == NULL
+ && file->fullfinalfilename != NULL
+ && file->fulltemporaryfilename == NULL
+ && file->symlinktarget == NULL) {
+ e = unlink(file->fullfinalfilename);
+ if (e < 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d deleting %s: %s. (Will be ignored)\n",
+ e, file->fullfinalfilename,
+ strerror(e));
+ }
+ free(file->fullfinalfilename);
+ file->fullfinalfilename = NULL;
+ } else if (file->fulltemporaryfilename != NULL) {
+ assert (file->fullfinalfilename != NULL);
+ assert (file->symlinktarget == NULL);
+
+ e = rename(file->fulltemporaryfilename,
+ file->fullfinalfilename);
+ if (e < 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d moving %s to %s: %s!\n",
+ e, file->fulltemporaryfilename,
+ file->fullfinalfilename,
+ strerror(e));
+ r = RET_ERRNO(e);
+ /* after something was done, do not stop
+ * but try to do as much as possible */
+ if (!somethingwasdone) {
+ release_free(release);
+ return r;
+ }
+ RET_UPDATE(result, r);
+ } else {
+ somethingwasdone = true;
+ free(file->fulltemporaryfilename);
+ file->fulltemporaryfilename = NULL;
+ }
+ } else if (file->symlinktarget != NULL) {
+ assert (file->fullfinalfilename != NULL);
+
+ (void)unlink(file->fullfinalfilename);
+ e = symlink(file->symlinktarget, file->fullfinalfilename);
+ if (e != 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d creating symlink '%s' -> '%s': %s.\n",
+ e, file->fullfinalfilename,
+ file->symlinktarget,
+ strerror(e));
+ r = RET_ERRNO(e);
+ /* after something was done, do not stop
+ * but try to do as much as possible */
+ if (!somethingwasdone) {
+ release_free(release);
+ return r;
+ }
+ RET_UPDATE(result, r);
+ }
+ }
+ }
+ if (RET_WAS_ERROR(result) && somethingwasdone) {
+ fprintf(stderr,
+"ATTENTION: some files were already moved to place, some could not be.\n"
+"The generated index files for %s might be in a inconsistent state\n"
+"and currently not useable! You should remove the reason for the failure\n"
+"(most likely bad access permissions) and export the affected distributions\n"
+"manually (via reprepro export codenames) as soon as possible!\n",
+ distribution->codename);
+ }
+ if (release->cachedb != NULL) {
+ // TODO: split this in removing before and adding later?
+ // remember which file were changed in case of error, so
+ // only those are changed...
+ /* now update the cache database,
+ * so we find those the next time */
+ r = storechecksums(release);
+ RET_UPDATE(result, r);
+
+ r = table_close(release->cachedb);
+ release->cachedb = NULL;
+ RET_ENDUPDATE(result, r);
+ }
+ release_toouthook(release, distribution);
+ /* free everything */
+ release_free(release);
+ return result;
+}
+
+retvalue release_mkdir(struct release *release, const char *relativedirectory) {
+ char *dirname;
+ retvalue r;
+
+ dirname = calc_dirconcat(release->dirofdist, relativedirectory);
+ if (FAILEDTOALLOC(dirname))
+ return RET_ERROR_OOM;
+ // TODO: in some far future, remember which dirs were created so that
+ r = dirs_make_recursive(dirname);
+ free(dirname);
+ return r;
+}
diff --git a/release.h b/release.h
new file mode 100644
index 0000000..52e37a2
--- /dev/null
+++ b/release.h
@@ -0,0 +1,68 @@
+#ifndef REPREPRO_RELEASE_H
+#define REPREPRO_RELEASE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+
+struct release;
+
+#define ic_first ic_uncompressed
+enum indexcompression {ic_uncompressed=0, ic_gzip,
+#ifdef HAVE_LIBBZ2
+ ic_bzip2,
+#endif
+#ifdef HAVE_LIBLZMA
+ ic_xz,
+#endif
+ ic_count /* fake item to get count */
+};
+typedef unsigned int compressionset; /* 1 << indexcompression */
+#define IC_FLAG(a) (1<<(a))
+
+/* Initialize Release generation */
+retvalue release_init(struct release **, const char * /*codename*/, /*@null@*/const char * /*suite*/, /*@null@*/const char * /*fakeprefix*/);
+/* same but for a snapshot */
+retvalue release_initsnapshot(const char *codename, const char *name, struct release **);
+
+retvalue release_mkdir(struct release *, const char * /*relativedirectory*/);
+
+const char *release_dirofdist(struct release *);
+
+retvalue release_addnew(struct release *, /*@only@*/char *, /*@only@*/char *);
+retvalue release_addsilentnew(struct release *, /*@only@*/char *, /*@only@*/char *);
+retvalue release_adddel(struct release *, /*@only@*/char *);
+retvalue release_addold(struct release *, /*@only@*/char *);
+
+struct filetorelease;
+
+retvalue release_startfile(struct release *, const char * /*filename*/, compressionset, bool /*usecache*/, struct filetorelease **);
+retvalue release_startlinkedfile(struct release *, const char * /*filename*/, const char * /*symlinkas*/, compressionset, bool /*usecache*/, struct filetorelease **);
+void release_warnoldfileorlink(struct release *, const char *, compressionset);
+
+/* return true if an old file is already there */
+bool release_oldexists(struct filetorelease *);
+
+/* errors will be cached for release_finishfile */
+retvalue release_writedata(struct filetorelease *, const char *, size_t);
+#define release_writestring(file, data) release_writedata(file, data, strlen(data))
+
+void release_abortfile(/*@only@*/struct filetorelease *);
+retvalue release_finishfile(struct release *, /*@only@*/struct filetorelease *);
+
+struct distribution;
+struct target;
+retvalue release_directorydescription(struct release *, const struct distribution *, const struct target *, const char * /*filename*/, bool /*onlyifneeded*/);
+
+void release_free(/*@only@*/struct release *);
+retvalue release_prepare(struct release *, struct distribution *, bool /*onlyneeded*/);
+retvalue release_finish(/*@only@*/struct release *, struct distribution *);
+
+#endif
diff --git a/remoterepository.c b/remoterepository.c
new file mode 100644
index 0000000..ec1d726
--- /dev/null
+++ b/remoterepository.c
@@ -0,0 +1,2103 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2007,2008,2009,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+#include <config.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+#include <dirent.h>
+
+#include "globals.h"
+#include "error.h"
+#include "ignore.h"
+#include "filecntl.h"
+#include "checksums.h"
+#include "mprintf.h"
+#include "dirs.h"
+#include "chunks.h"
+#include "names.h"
+#include "aptmethod.h"
+#include "signature.h"
+#include "readtextfile.h"
+#include "uncompression.h"
+#include "diffindex.h"
+#include "rredpatch.h"
+#include "remoterepository.h"
+
+/* This is code to handle lists from remote repositories.
+ Those are stored in the lists/ (or --listdir) directory
+ and needs some maintaince:
+
+ - cleaning (unneeded) lists from that directory
+ - deciding what to download from a remote repository
+ (needs knowledge what is there and what is there)
+ - in the future: implement ed to use remote .diffs
+*/
+
+struct remote_repository {
+ struct remote_repository *next, *prev;
+
+ /* repository is determined by pattern name currently.
+ * That might change if there is some safe way to combine
+ * some. (note that method options might make equally looking
+ * repositories different ones, so that is hard to decide).
+ *
+ * This is possible as pattern is not modifyable in options
+ * or method by the using distribution.
+ */
+ const char *name;
+ const char *method;
+ const char *fallback;
+ const struct strlist *config;
+
+ struct aptmethod *download;
+
+ struct remote_distribution *distributions;
+};
+static struct remote_repository *repositories = NULL;
+
+struct remote_distribution {
+ struct remote_distribution *next;
+
+ /* repository and suite uniquely identify it,
+ as the only thing the distribution can change is the suite.
+ Currently most of the other fields would also fit in the
+ remote_repository structure, but I plan to add new patters
+ allowing this by distribution...
+ */
+ struct remote_repository *repository;
+ char *suite;
+
+ /* flat repository */
+ bool flat; bool flatnonflatwarned;
+ char *suite_base_dir;
+
+ /* if true, do not download or check Release file */
+ bool ignorerelease;
+ /* hashes to ignore */
+ bool ignorehashes[cs_hashCOUNT];
+
+ /* linked list of key descriptions to check against, each must match */
+ struct signature_requirement *verify;
+
+ /* local copy of InRelease, Release and Release.gpg file,
+ * only set if available */
+ char *inreleasefile;
+ char *releasefile;
+ char *releasegpgfile;
+ const char *usedreleasefile;
+
+ /* filenames and checksums from the Release file */
+ struct checksumsarray remotefiles;
+
+ /* the index files we need */
+ struct remote_index *indices;
+
+ /* InRelease failed or requested not to be used */
+ bool noinrelease;
+};
+
+struct remote_index {
+ /* next index in remote distribution */
+ struct remote_index *next;
+
+ struct remote_distribution *from;
+
+ /* what to download? .gz better than .bz2? and so on */
+ struct encoding_preferences downloadas;
+
+ /* remote filename as to be found in Release file*/
+ char *filename_in_release;
+
+ /* the name without suffix in the lists/ dir */
+ char *cachefilename;
+ /* the basename of the above */
+ const char *cachebasename;
+
+ /* index in checksums for the different types, -1 = not avail */
+ int ofs[c_COUNT], diff_ofs;
+
+ /* index in requested download methods so we can continue later */
+ int lasttriedencoding;
+ /* the compression to be tried currently */
+ enum compression compression;
+
+ /* the old uncompressed file, so that it is only deleted
+ * when needed, to avoid losing it for a patch run */
+ /*@dependant@*/struct cachedlistfile *olduncompressed;
+ struct checksums *oldchecksums;
+
+ /* if using pdiffs, the content of the Packages.diff/Index: */
+ struct diffindex *diffindex;
+ /* the last patch queued to be applied */
+ char *patchfilename;
+ /*@dependant@*/const struct diffindex_patch *selectedpatch;
+ bool deletecompressedpatch;
+
+ bool queued;
+ bool needed;
+ bool got;
+};
+
+#define MAXPARTS 5
+struct cachedlistfile {
+ struct cachedlistfile *next;
+ const char *basefilename;
+ unsigned int partcount;
+ const char *parts[MAXPARTS];
+ /* might be used by some rule */
+ bool needed, deleted;
+ char fullfilename[];
+};
+
+
+static void remote_index_free(/*@only@*/struct remote_index *i) {
+ if (i == NULL)
+ return;
+ free(i->cachefilename);
+ free(i->patchfilename);
+ free(i->filename_in_release);
+ diffindex_free(i->diffindex);
+ checksums_free(i->oldchecksums);
+ free(i);
+}
+
+static void remote_distribution_free(/*@only@*/struct remote_distribution *d) {
+ if (d == NULL)
+ return;
+ free(d->suite);
+ signature_requirements_free(d->verify);
+ free(d->inreleasefile);
+ free(d->releasefile);
+ free(d->releasegpgfile);
+ free(d->suite_base_dir);
+ checksumsarray_done(&d->remotefiles);
+ while (d->indices != NULL) {
+ struct remote_index *h = d->indices;
+ d->indices = h->next;
+ remote_index_free(h);
+ }
+ free(d);
+}
+
+void remote_repository_free(struct remote_repository *remote) {
+ if (remote == NULL)
+ return;
+ while (remote->distributions != NULL) {
+ struct remote_distribution *h = remote->distributions;
+ remote->distributions = h->next;
+ remote_distribution_free(h);
+ }
+ if (remote->next != NULL)
+ remote->next->prev = remote->prev;
+ if (remote->prev != NULL)
+ remote->prev->next = remote->next;
+ free(remote);
+ return;
+}
+
+void cachedlistfile_freelist(struct cachedlistfile *c) {
+ while (c != NULL) {
+ struct cachedlistfile *n = c->next;
+ free(c);
+ c = n;
+ }
+}
+
+void cachedlistfile_deleteunneeded(const struct cachedlistfile *c) {
+ for (; c != NULL ; c = c->next) {
+ if (c->needed)
+ continue;
+ if (verbose >= 0)
+ printf("deleting %s\n", c->fullfilename);
+ deletefile(c->fullfilename);
+ }
+}
+
+static /*@null@*/ struct cachedlistfile *cachedlistfile_new(const char *basefilename, size_t len, size_t listdirlen) {
+ struct cachedlistfile *c;
+ size_t l;
+ char *p;
+ char ch;
+
+ c = malloc(sizeof(struct cachedlistfile) + listdirlen + 2*len + 3);
+ if (FAILEDTOALLOC(c))
+ return NULL;
+ c->next = NULL;
+ c->needed = false;
+ c->deleted = false;
+ p = c->fullfilename;
+ assert ((size_t)(p - (char*)c) <= sizeof(struct cachedlistfile));
+ memcpy(p, global.listdir, listdirlen);
+ p += listdirlen;
+ *(p++) = '/';
+ assert ((size_t)(p - c->fullfilename) == listdirlen + 1);
+ c->basefilename = p;
+ memcpy(p, basefilename, len); p += len;
+ *(p++) = '\0';
+ assert ((size_t)(p - c->fullfilename) == listdirlen + len + 2);
+
+ c->parts[0] = p;
+ c->partcount = 1;
+ l = len;
+ while (l-- > 0 && (ch = *(basefilename++)) != '\0') {
+ if (ch == '_') {
+ *(p++) = '\0';
+ if (c->partcount < MAXPARTS)
+ c->parts[c->partcount] = p;
+ c->partcount++;
+ } else if (ch == '%') {
+ char first, second;
+
+ if (len <= 1) {
+ c->partcount = 0;
+ return c;
+ }
+ first = *(basefilename++);
+ second = *(basefilename++);
+ if (first >= '0' && first <= '9')
+ *p = (first - '0') << 4;
+ else if (first >= 'a' && first <= 'f')
+ *p = (first - 'a' + 10) << 4;
+ else {
+ c->partcount = 0;
+ return c;
+ }
+ if (second >= '0' && second <= '9')
+ *p |= (second - '0');
+ else if (second >= 'a' && second <= 'f')
+ *p |= (second - 'a' + 10);
+ else {
+ c->partcount = 0;
+ return c;
+ }
+ p++;
+ } else
+ *(p++) = ch;
+ }
+ *(p++) = '\0';
+ assert ((size_t)(p - c->fullfilename) <= listdirlen + 2*len + 3);
+ return c;
+}
+
+retvalue cachedlists_scandir(/*@out@*/struct cachedlistfile **cachedfiles_p) {
+ struct cachedlistfile *cachedfiles = NULL, **next_p;
+ struct dirent *r;
+ size_t listdirlen = strlen(global.listdir);
+ DIR *dir;
+
+ // TODO: check if it is always created before...
+ dir = opendir(global.listdir);
+ if (dir == NULL) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d opening directory '%s': %s!\n",
+ e, global.listdir, strerror(e));
+ return RET_ERRNO(e);
+ }
+ next_p = &cachedfiles;
+ while (true) {
+ size_t namelen;
+ int e;
+
+ errno = 0;
+ r = readdir(dir);
+ if (r == NULL) {
+ e = errno;
+ if (e == 0)
+ break;
+ /* this should not happen... */
+ e = errno;
+ fprintf(stderr, "Error %d reading dir '%s': %s!\n",
+ e, global.listdir, strerror(e));
+ (void)closedir(dir);
+ cachedlistfile_freelist(cachedfiles);
+ return RET_ERRNO(e);
+ }
+ namelen = _D_EXACT_NAMLEN(r);
+ if (namelen == 1 && r->d_name[0] == '.')
+ continue;
+ if (namelen == 2 && r->d_name[0] == '.' && r->d_name[1] == '.')
+ continue;
+ *next_p = cachedlistfile_new(r->d_name, namelen, listdirlen);
+ if (FAILEDTOALLOC(*next_p)) {
+ (void)closedir(dir);
+ cachedlistfile_freelist(cachedfiles);
+ return RET_ERROR_OOM;
+ }
+ next_p = &(*next_p)->next;
+ }
+ if (closedir(dir) != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d closing directory '%s': %s!\n",
+ e, global.listdir, strerror(e));
+ cachedlistfile_freelist(cachedfiles);
+ return RET_ERRNO(e);
+ }
+ *cachedfiles_p = cachedfiles;
+ return RET_OK;
+}
+
+static retvalue cachedlistfile_delete(struct cachedlistfile *old) {
+ int e;
+ if (old->deleted)
+ return RET_OK;
+ e = deletefile(old->fullfilename);
+ if (e != 0)
+ return RET_ERRNO(e);
+ old->deleted = true;
+ return RET_OK;
+}
+
+struct remote_repository *remote_repository_prepare(const char *name, const char *method, const char *fallback, const struct strlist *config) {
+ struct remote_repository *n;
+
+ /* calling code ensures no two with the same name are created,
+ * so just create it... */
+
+ n = zNEW(struct remote_repository);
+ if (FAILEDTOALLOC(n))
+ return NULL;
+ n->name = name;
+ n->method = method;
+ n->fallback = fallback;
+ n->config = config;
+
+ n->next = repositories;
+ if (n->next != NULL)
+ n->next->prev = n;
+ repositories = n;
+
+ return n;
+}
+
+/* This escaping is quite harsh, but so nothing bad can happen... */
+static inline size_t escapedlen(const char *p) {
+ size_t l = 0;
+ if (*p == '-') {
+ l = 3;
+ p++;
+ }
+ while (*p != '\0') {
+ if ((*p < 'A' || *p > 'Z') && (*p < 'a' || *p > 'z') &&
+ (*p < '0' || *p > '9') && *p != '-')
+ l +=3;
+ else
+ l++;
+ p++;
+ }
+ return l;
+}
+
+static inline char *escapedcopy(char *dest, const char *orig) {
+ static char hex[16] = "0123456789ABCDEF";
+ if (*orig == '-') {
+ orig++;
+ *dest = '%'; dest++;
+ *dest = '2'; dest++;
+ *dest = 'D'; dest++;
+ }
+ while (*orig != '\0') {
+ if ((*orig < 'A' || *orig > 'Z')
+ && (*orig < 'a' || *orig > 'z')
+ && (*orig < '0' || *orig > '9')
+ && *orig != '-') {
+ *dest = '%'; dest++;
+ *dest = hex[(*orig >> 4)& 0xF ]; dest++;
+ *dest = hex[*orig & 0xF ]; dest++;
+ } else {
+ *dest = *orig;
+ dest++;
+ }
+ orig++;
+ }
+ return dest;
+}
+
+char *genlistsfilename(const char *type, unsigned int count, ...) {
+ const char *fields[count];
+ unsigned int i;
+ size_t listdir_len, type_len, len;
+ char *result, *p;
+ va_list ap;
+
+ len = 0;
+ va_start(ap, count);
+ for (i = 0 ; i < count ; i++) {
+ fields[i] = va_arg(ap, const char*);
+ assert (fields[i] != NULL);
+ len += escapedlen(fields[i]) + 1;
+ }
+ /* check sentinel */
+ assert (va_arg(ap, const char*) == NULL);
+ va_end(ap);
+ listdir_len = strlen(global.listdir);
+ if (type != NULL)
+ type_len = strlen(type);
+ else
+ type_len = 0;
+
+ result = malloc(listdir_len + type_len + len + 2);
+ if (FAILEDTOALLOC(result))
+ return NULL;
+ memcpy(result, global.listdir, listdir_len);
+ p = result + listdir_len;
+ *(p++) = '/';
+ for (i = 0 ; i < count ; i++) {
+ p = escapedcopy(p, fields[i]);
+ *(p++) = '_';
+ }
+ assert ((size_t)(p - result) == listdir_len + len + 1);
+ if (type != NULL)
+ memcpy(p, type, type_len + 1);
+ else
+ *(--p) = '\0';
+ return result;
+}
+
+void cachedlistfile_need(struct cachedlistfile *list, const char *type, unsigned int count, ...) {
+ struct cachedlistfile *file;
+ const char *fields[count];
+ unsigned int i;
+ va_list ap;
+
+ va_start(ap, count);
+ for (i = 0 ; i < count ; i++) {
+ fields[i] = va_arg(ap, const char*);
+ assert (fields[i] != NULL);
+ }
+ /* check sentinel */
+ assert (va_arg(ap, const char*) == NULL);
+ va_end(ap);
+
+ for (file = list ; file != NULL ; file = file->next) {
+ if (file->partcount != count + 1)
+ continue;
+ i = 0;
+ while (i < count && strcmp(file->parts[i], fields[i]) == 0)
+ i++;
+ if (i < count)
+ continue;
+ if (strcmp(type, file->parts[i]) != 0)
+ continue;
+ file->needed = true;
+ }
+}
+
+retvalue remote_distribution_prepare(struct remote_repository *repository, const char *suite, bool ignorerelease, bool getinrelease, const char *verifyrelease, bool flat, bool *ignorehashes, struct remote_distribution **out_p) {
+ struct remote_distribution *n, **last;
+ enum checksumtype cs;
+
+ for (last = &repository->distributions ; (n = *last) != NULL
+ ; last = &n->next) {
+ if (strcmp(n->suite, suite) != 0)
+ continue;
+ if (n->flat != flat) {
+ if (verbose >= 0 && !n->flatnonflatwarned &&
+ !IGNORABLE(flatandnonflat))
+ fprintf(stderr,
+"Warning: From the same remote repository '%s', distribution '%s'\n"
+"is requested both flat and non-flat. While this is possible\n"
+"(having %s/dists/%s and %s/%s), it is unlikely.\n"
+"To no longer see this message, use --ignore=flatandnonflat.\n",
+ repository->method, suite,
+ repository->method, suite,
+ repository->method, suite);
+ n->flatnonflatwarned = true;
+ continue;
+ }
+ break;
+ }
+
+ if (*last != NULL) {
+ n = *last;
+ assert (n->flat == flat);
+
+ if ((n->ignorerelease && !ignorerelease) ||
+ (!n->ignorerelease && ignorerelease)) {
+ // TODO a hint which two are at fault would be nice,
+ // but how to get the information...
+ if (verbose >= 0)
+ fprintf(stderr,
+"Warning: I was told to both ignore Release files for Suite '%s'\n"
+"from remote repository '%s' and to not ignore it. Going to not ignore!\n",
+ suite, repository->name);
+ n->ignorerelease = false;
+ }
+ if ((n->noinrelease && getinrelease) ||
+ (!n->noinrelease && !getinrelease)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"Warning: Conflicting GetInRelease values for Suite '%s'\n"
+"from remote repository '%s'. Resolving to get InRelease files!\n",
+ suite, repository->name);
+ n->noinrelease = false;
+ }
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ if ((n->ignorehashes[cs] && !ignorehashes[cs]) ||
+ (!n->ignorehashes[cs] && ignorehashes[cs])) {
+ // TODO dito
+ if (verbose >= 0)
+ fprintf(stderr,
+"Warning: I was told to both ignore '%s' for Suite '%s'\n"
+"from remote repository '%s' and to not ignore it. Going to not ignore!\n",
+ suite,
+ release_checksum_names[cs],
+ repository->name);
+ n->ignorehashes[cs] = false;
+ }
+ }
+ if (verifyrelease != NULL) {
+ retvalue r;
+
+ r = signature_requirement_add(&n->verify,
+ verifyrelease);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ *out_p = n;
+ return RET_OK;
+ }
+
+ n = zNEW(struct remote_distribution);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->repository = repository;
+ n->suite = strdup(suite);
+ n->ignorerelease = ignorerelease;
+ n->noinrelease = !getinrelease;
+ if (verifyrelease != NULL) {
+ retvalue r;
+
+ r = signature_requirement_add(&n->verify, verifyrelease);
+ if (RET_WAS_ERROR(r)) {
+ remote_distribution_free(n);
+ return r;
+ }
+ }
+ memcpy(n->ignorehashes, ignorehashes, sizeof(bool [cs_hashCOUNT]));
+ n->flat = flat;
+ if (flat)
+ n->suite_base_dir = strdup(suite);
+ else
+ n->suite_base_dir = calc_dirconcat("dists", suite);
+ if (FAILEDTOALLOC(n->suite) ||
+ FAILEDTOALLOC(n->suite_base_dir)) {
+ remote_distribution_free(n);
+ return RET_ERROR_OOM;
+ }
+ /* ignorerelease can be unset later, so always calculate the filename */
+ if (flat)
+ n->inreleasefile = genlistsfilename("InRelease", 3,
+ repository->name, suite, "flat",
+ ENDOFARGUMENTS);
+ else
+ n->inreleasefile = genlistsfilename("InRelease", 2,
+ repository->name, suite, ENDOFARGUMENTS);
+ if (FAILEDTOALLOC(n->inreleasefile)) {
+ remote_distribution_free(n);
+ return RET_ERROR_OOM;
+ }
+ if (flat)
+ n->releasefile = genlistsfilename("Release", 3,
+ repository->name, suite, "flat",
+ ENDOFARGUMENTS);
+ else
+ n->releasefile = genlistsfilename("Release", 2,
+ repository->name, suite, ENDOFARGUMENTS);
+ if (FAILEDTOALLOC(n->releasefile)) {
+ remote_distribution_free(n);
+ return RET_ERROR_OOM;
+ }
+ n->releasegpgfile = calc_addsuffix(n->releasefile, "gpg");
+ if (FAILEDTOALLOC(n->releasefile)) {
+ remote_distribution_free(n);
+ return RET_ERROR_OOM;
+ }
+ *last = n;
+ *out_p = n;
+ return RET_OK;
+}
+
+static retvalue copytoplace(const char *gotfilename, const char *wantedfilename, const char *method, struct checksums **checksums_p) {
+ retvalue r;
+ struct checksums *checksums = NULL;
+
+ /* if the file is somewhere else, copy it: */
+ if (strcmp(gotfilename, wantedfilename) != 0) {
+ /* never link index files, but copy them */
+ if (verbose > 1)
+ fprintf(stderr,
+"Copy file '%s' to '%s'...\n", gotfilename, wantedfilename);
+ r = checksums_copyfile(wantedfilename, gotfilename, false,
+ &checksums);
+ if (r == RET_ERROR_EXIST) {
+ fprintf(stderr,
+"Unexpected error: '%s' exists while it should not!\n",
+ wantedfilename);
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Cannot open '%s', obtained from '%s' method.\n",
+ gotfilename, method);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ }
+ if (checksums_p == NULL)
+ checksums_free(checksums);
+ else
+ *checksums_p = checksums;
+ return RET_OK;
+}
+
+static retvalue enqueue_old_release_files(struct remote_distribution *d);
+
+/* handle a downloaded Release or Release.gpg file:
+ * no checksums to test, nothing to trigger, as they have to be all
+ * read at once to decide what is new and what actually needs downloading */
+static retvalue release_callback(enum queue_action action, void *privdata, void *privdata2, UNUSED(const char *uri), const char *gotfilename, const char *wantedfilename, UNUSED(/*@null@*/const struct checksums *checksums), const char *methodname) {
+ struct remote_distribution *d = privdata;
+ retvalue r;
+
+ /* if the InRelease file cannot be got,
+ * try Release (and Release.gpg if checking) instead */
+ if (action == qa_error && privdata2 == d->inreleasefile) {
+ assert (!d->noinrelease);
+
+ return enqueue_old_release_files(d);
+ }
+
+ if (action != qa_got)
+ return RET_ERROR;
+
+ r = copytoplace(gotfilename, wantedfilename, methodname, NULL);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return r;
+}
+
+static retvalue enqueue_old_release_files(struct remote_distribution *d) {
+ retvalue r;
+
+ d->noinrelease = true;
+ r = aptmethod_enqueueindex(d->repository->download,
+ d->suite_base_dir, "Release", "",
+ d->releasefile, "",
+ release_callback, d, NULL);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (d->verify != NULL) {
+ r = aptmethod_enqueueindex(d->repository->download,
+ d->suite_base_dir, "Release", ".gpg",
+ d->releasegpgfile, "",
+ release_callback, d, NULL);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static retvalue remote_distribution_enqueuemetalists(struct remote_distribution *d) {
+ struct remote_repository *repository = d->repository;
+
+ assert (repository->download != NULL);
+
+ if (d->ignorerelease)
+ return RET_NOTHING;
+
+ (void)unlink(d->inreleasefile);
+ (void)unlink(d->releasefile);
+ if (d->verify != NULL) {
+ (void)unlink(d->releasegpgfile);
+ }
+
+ if (d->noinrelease)
+ return enqueue_old_release_files(d);
+ else
+ return aptmethod_enqueueindex(repository->download,
+ d->suite_base_dir, "InRelease", "", d->inreleasefile,
+ "", release_callback, d, d->inreleasefile);
+}
+
+retvalue remote_startup(struct aptmethodrun *run) {
+ struct remote_repository *rr;
+ retvalue r;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ for (rr = repositories ; rr != NULL ; rr = rr->next) {
+ assert (rr->download == NULL);
+
+ r = aptmethod_newmethod(run,
+ rr->method, rr->fallback,
+ rr->config, &rr->download);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static void find_index(const struct strlist *files, struct remote_index *ri) {
+ const char *filename = ri->filename_in_release;
+ size_t len = strlen(filename);
+ int i;
+ enum compression c;
+
+ for (i = 0 ; i < files->count ; i++) {
+ const char *value = files->values[i];
+
+ if (strncmp(value, filename, len) != 0)
+ continue;
+
+ value += len;
+
+ if (*value == '\0') {
+ ri->ofs[c_none] = i;
+ continue;
+ }
+ if (*value != '.')
+ continue;
+ if (strcmp(value, ".diff/Index") == 0) {
+ ri->diff_ofs = i;
+ continue;
+ }
+
+ for (c = 0 ; c < c_COUNT ; c++)
+ if (strcmp(value, uncompression_suffix[c]) == 0) {
+ ri->ofs[c] = i;
+ break;
+ }
+ }
+}
+
+/* get a strlist with the md5sums of a Release-file */
+static inline retvalue release_getchecksums(const char *releasefile, const char *chunk, const bool ignorehash[cs_hashCOUNT], struct checksumsarray *out) {
+ retvalue r;
+ struct strlist files[cs_hashCOUNT];
+ enum checksumtype cs;
+ bool foundanything = false;
+
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ if (ignorehash[cs]) {
+ strlist_init(&files[cs]);
+ continue;
+ }
+ assert (release_checksum_names[cs] != NULL);
+ r = chunk_getextralinelist(chunk, release_checksum_names[cs],
+ &files[cs]);
+ if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum) {
+ strlist_done(&files[cs]);
+ }
+ return r;
+ } else if (r == RET_NOTHING)
+ strlist_init(&files[cs]);
+ else
+ foundanything = true;
+ }
+
+ if (!foundanything) {
+ fprintf(stderr, "Missing checksums in Release file '%s'!\n",
+ releasefile);
+ return RET_ERROR;
+ }
+
+ r = checksumsarray_parse(out, files, releasefile);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ strlist_done(&files[cs]);
+ }
+ return r;
+}
+
+static retvalue process_remoterelease(struct remote_distribution *rd) {
+ struct remote_repository *rr = rd->repository;
+ struct remote_index *ri;
+ retvalue r;
+ char *releasedata;
+ size_t releaselen;
+
+ if (!rd->noinrelease) {
+ r = signature_check_inline(rd->verify,
+ rd->inreleasefile, &releasedata);
+ assert (r != RET_NOTHING);
+ if (r == RET_NOTHING)
+ r = RET_ERROR_BADSIG;
+ if (r == RET_ERROR_BADSIG) {
+ fprintf(stderr,
+"Error: Not enough signatures found for remote repository %s (%s %s)!\n",
+ rr->name, rr->method, rd->suite);
+ r = RET_ERROR_BADSIG;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ rd->usedreleasefile = rd->inreleasefile;
+ } else {
+ r = readtextfile(rd->releasefile, rd->releasefile,
+ &releasedata, &releaselen);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ rd->usedreleasefile = rd->releasefile;
+
+ if (rd->verify != NULL) {
+ r = signature_check(rd->verify,
+ rd->releasegpgfile, rd->releasefile,
+ releasedata, releaselen);
+ assert (r != RET_NOTHING);
+ if (r == RET_NOTHING)
+ r = RET_ERROR_BADSIG;
+ if (r == RET_ERROR_BADSIG) {
+ fprintf(stderr,
+"Error: Not enough signatures found for remote repository %s (%s %s)!\n",
+ rr->name, rr->method, rd->suite);
+ r = RET_ERROR_BADSIG;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(releasedata);
+ return r;
+ }
+ }
+ }
+ r = release_getchecksums(rd->usedreleasefile, releasedata,
+ rd->ignorehashes, &rd->remotefiles);
+ free(releasedata);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* Check for our files in there */
+ for (ri = rd->indices ; ri != NULL ; ri = ri->next) {
+ find_index(&rd->remotefiles.names, ri);
+ }
+ // TODO: move checking if not exists at all to here?
+ return RET_OK;
+}
+
+retvalue remote_preparemetalists(struct aptmethodrun *run, bool nodownload) {
+ struct remote_repository *rr;
+ struct remote_distribution *rd;
+ retvalue r;
+
+ if (!nodownload) {
+ for (rr = repositories ; rr != NULL ; rr = rr->next) {
+ for (rd = rr->distributions ; rd != NULL ;
+ rd = rd->next) {
+ r = remote_distribution_enqueuemetalists(rd);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ r = aptmethod_download(run);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ for (rr = repositories ; rr != NULL ; rr = rr->next) {
+ for (rd = rr->distributions ; rd != NULL ; rd = rd->next) {
+ if (!rd->ignorerelease) {
+ if (nodownload)
+ if (!isregularfile(rd->inreleasefile))
+ rd->noinrelease = true;
+ r = process_remoterelease(rd);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ }
+ return RET_OK;
+}
+
+bool remote_index_isnew(/*@null@*/const struct remote_index *ri, struct donefile *done) {
+ const char *basefilename;
+ struct checksums *checksums;
+ bool hashes_missing, improves;
+
+ /* files without uncompressed checksum cannot be tested */
+ if (ri->ofs[c_none] < 0)
+ return true;
+ /* if not there or the wrong files comes next, then something
+ * has changed and we better reload everything */
+ if (!donefile_nextindex(done, &basefilename, &checksums))
+ return true;
+ if (strcmp(basefilename, ri->cachebasename) != 0) {
+ checksums_free(checksums);
+ return true;
+ }
+ /* otherwise check if the file checksums match */
+ if (!checksums_check(checksums,
+ ri->from->remotefiles.checksums[ri->ofs[c_none]],
+ &hashes_missing)) {
+ checksums_free(checksums);
+ return true;
+ }
+ if (hashes_missing) {
+ /* if Release has checksums we do not yet know about,
+ * process it to make sure those match as well */
+ checksums_free(checksums);
+ return true;
+ }
+ if (!checksums_check(ri->from->remotefiles.checksums[ri->ofs[c_none]],
+ checksums, &improves)) {
+ /* this should not happen, but ... */
+ checksums_free(checksums);
+ return true;
+ }
+ if (improves) {
+ /* assume this is our file and add the other hashes so they
+ * will show up in the file again the next time.
+ * This is a bit unelegant in mixing stuff, but otherwise this
+ * will cause redownloading when remote adds more hashes.
+ * The only downside of mixing can reject files that have the
+ * same recorded hashes as a previously processed files.
+ * But that is quite inlikely unless on attack, so getting some
+ * hint in that case cannot harm.*/
+ (void)checksums_combine(&ri->from->remotefiles.checksums[
+ ri->ofs[c_none]], checksums, NULL);
+ }
+ checksums_free(checksums);
+ return false;
+}
+
+static inline void remote_index_oldfiles(struct remote_index *ri, /*@null@*/struct cachedlistfile *oldfiles, /*@out@*/struct cachedlistfile *old[c_COUNT]) {
+ struct cachedlistfile *o;
+ size_t l;
+ enum compression c;
+
+ for (c = 0 ; c < c_COUNT ; c++)
+ old[c] = NULL;
+
+ l = strlen(ri->cachebasename);
+ for (o = oldfiles ; o != NULL ; o = o->next) {
+ if (o->deleted)
+ continue;
+ if (strncmp(o->basefilename, ri->cachebasename, l) != 0)
+ continue;
+ for (c = 0 ; c < c_COUNT ; c++)
+ if (strcmp(o->basefilename + l,
+ uncompression_suffix[c]) == 0) {
+ old[c] = o;
+ o->needed = true;
+ break;
+ }
+ if (strcmp(o->basefilename + l, ".diffindex") == 0)
+ (void)cachedlistfile_delete(o);
+ if (strncmp(o->basefilename + l, ".diff-", 6) == 0)
+ (void)cachedlistfile_delete(o);
+ }
+}
+
+static inline void remote_index_delete_oldfiles(struct remote_index *ri, /*@null@*/struct cachedlistfile *oldfiles) {
+ struct cachedlistfile *o;
+ size_t l;
+
+ l = strlen(ri->cachebasename);
+ for (o = oldfiles ; o != NULL ; o = o->next) {
+ if (o->deleted)
+ continue;
+ if (strncmp(o->basefilename, ri->cachebasename, l) != 0)
+ continue;
+ (void)cachedlistfile_delete(o);
+ }
+}
+
+static queue_callback index_callback;
+static queue_callback diff_callback;
+
+static retvalue queue_next_without_release(struct remote_distribution *rd, struct remote_index *ri) {
+ const struct encoding_preferences *downloadas;
+ static const struct encoding_preferences defaultdownloadas = {
+ .count = 5,
+ .requested = {
+ { .diff = false, .force = false, .compression = c_gzip },
+ { .diff = false, .force = false, .compression = c_bzip2 },
+ { .diff = false, .force = false, .compression = c_none },
+ { .diff = false, .force = false, .compression = c_lzma },
+ { .diff = false, .force = false, .compression = c_xz }
+ }
+ };
+ int e;
+
+ if (ri->downloadas.count == 0)
+ downloadas = &defaultdownloadas;
+ else
+ downloadas = &ri->downloadas;
+
+ for (e = ri->lasttriedencoding + 1 ; e < downloadas->count ; e++) {
+ enum compression c = downloadas->requested[e].compression;
+
+ if (downloadas->requested[e].diff)
+ continue;
+ if (uncompression_supported(c)) {
+ ri->lasttriedencoding = e;
+ ri->compression = c;
+ return aptmethod_enqueueindex(rd->repository->download,
+ rd->suite_base_dir,
+ ri->filename_in_release,
+ uncompression_suffix[c],
+ ri->cachefilename,
+ uncompression_suffix[c],
+ index_callback, ri, NULL);
+ }
+ }
+ if (ri->lasttriedencoding < 0)
+ fprintf(stderr,
+"ERROR: no supported compressions in DownloadListsAs for '%s' by '%s'!\n",
+ rd->suite, rd->repository->method);
+ ri->lasttriedencoding = e;
+ return RET_ERROR;
+}
+
+static inline retvalue find_requested_encoding(struct remote_index *ri, const char *releasefile) {
+ int e;
+ enum compression c, stopat,
+ /* the most-preferred requested but unsupported */
+ unsupported = c_COUNT,
+ /* the best unrequested but supported */
+ unrequested = c_COUNT;
+
+ if (ri->downloadas.count > 0) {
+ bool found = false;
+ for (e = ri->lasttriedencoding + 1 ;
+ e < ri->downloadas.count ;
+ e++) {
+ struct compression_preference req;
+
+ req = ri->downloadas.requested[e];
+
+ if (req.diff) {
+ if (ri->olduncompressed == NULL)
+ continue;
+ assert (ri->ofs[c_none] >= 0);
+ if (!req.force && ri->diff_ofs < 0)
+ continue;
+ ri->compression = c_COUNT;
+ ri->lasttriedencoding = e;
+ return RET_OK;
+ }
+ if (ri->ofs[req.compression] < 0 &&
+ (!req.force || ri->ofs[c_none] < 0))
+ continue;
+ if (uncompression_supported(req.compression)) {
+ ri->compression = req.compression;
+ ri->lasttriedencoding = e;
+ return RET_OK;
+ } else if (unsupported == c_COUNT)
+ unsupported = req.compression;
+ }
+ if (ri->lasttriedencoding > -1) {
+ /* we already tried something, and nothing else
+ * is available, so give up */
+ ri->lasttriedencoding = e;
+ return RET_ERROR;
+ }
+
+ /* nothing that is both requested by the user and supported
+ * and listed in the Release file found, check what is there
+ * to get a meaningful error message */
+
+ for (c = 0 ; c < c_COUNT ; c++) {
+ if (ri->ofs[c] < 0)
+ continue;
+ found = true;
+ if (uncompression_supported(c))
+ unrequested = c;
+ }
+
+ if (!found) {
+ // TODO: might be nice to check for not-yet-even
+ // known about compressions and say they are not
+ // yet know yet instead then here...
+ fprintf(stderr,
+"Could not find '%s' within '%s'\n",
+ ri->filename_in_release, releasefile);
+ return RET_ERROR_WRONG_MD5;
+ }
+
+ if (unsupported != c_COUNT && unrequested != c_COUNT) {
+ fprintf(stderr,
+"Error: '%s' only lists unusable or unrequested compressions of '%s'.\n"
+"Try e.g the '%s' option (or check what it is set to) to make more useable.\n"
+"Or change your DownloadListsAs to request e.g. '%s'.\n",
+ releasefile, ri->filename_in_release,
+ uncompression_option[unsupported],
+ uncompression_config[unrequested]);
+ return RET_ERROR;
+ }
+ if (unsupported != c_COUNT) {
+ fprintf(stderr,
+"Error: '%s' only lists unusable compressions of '%s'.\n"
+"Try e.g the '%s' option (or check what it is set to) to make more useable.\n",
+ releasefile, ri->filename_in_release,
+ uncompression_option[unsupported]);
+ return RET_ERROR;
+ }
+ if (unrequested != c_COUNT) {
+ fprintf(stderr,
+"Error: '%s' only lists unrequested compressions of '%s'.\n"
+"Try changing your DownloadListsAs to request e.g. '%s'.\n",
+ releasefile, ri->filename_in_release,
+ uncompression_config[unrequested]);
+ return RET_ERROR;
+ }
+ fprintf(stderr,
+"Error: '%s' lists no requested and usable compressions of '%s'.\n",
+ releasefile, ri->filename_in_release);
+ return RET_ERROR;
+ }
+ /* When nothing specified, use the newest compression.
+ * This might make it slow on older computers (and perhaps
+ * on relatively new ones, too), but usually bandwidth costs
+ * and your time not.
+ * And you can always configure it to prefer a faster one...
+ */
+
+ /* ri->lasttriedencoding -1 means nothing tried,
+ * 0 means Package.diff was tried,
+ * 1 means nothing c_COUNT - 1 was already tried,
+ * 2 means nothing c_COUNT - 2 was already tried,
+ * and so on...*/
+
+ if (ri->lasttriedencoding < 0) {
+ if (ri->olduncompressed != NULL && ri->diff_ofs >= 0) {
+ ri->compression = c_COUNT;
+ ri->lasttriedencoding = 0;
+ return RET_OK;
+ }
+ stopat = c_COUNT;
+ } else
+ stopat = c_COUNT - ri->lasttriedencoding;
+
+ ri->compression = c_COUNT;
+ for (c = 0 ; c < stopat ; c++) {
+ if (ri->ofs[c] < 0)
+ continue;
+ if (uncompression_supported(c))
+ ri->compression = c;
+ else
+ unsupported = c;
+ }
+ if (ri->compression == c_COUNT) {
+ if (ri->lasttriedencoding > -1) {
+ /* not the first try, no error message needed */
+ ri->lasttriedencoding = c_COUNT;
+ return RET_ERROR;
+ }
+ if (unsupported != c_COUNT) {
+ fprintf(stderr,
+"Error: '%s' only lists unusable compressions of '%s'.\n"
+"Try e.g the '%s' option (or check what it is set to) to enable more.\n",
+ releasefile, ri->filename_in_release,
+ uncompression_option[unsupported]);
+ return RET_ERROR;
+ }
+ fprintf(stderr,
+"Could not find '%s' within '%s'\n",
+ ri->filename_in_release, releasefile);
+ return RET_ERROR_WRONG_MD5;
+
+ }
+ ri->lasttriedencoding = c_COUNT - ri->compression;
+ return RET_OK;
+}
+
+static inline retvalue remove_old_uncompressed(struct remote_index *ri) {
+ retvalue r;
+
+ if (ri->olduncompressed != NULL) {
+ r = cachedlistfile_delete(ri->olduncompressed);
+ ri->olduncompressed = NULL;
+ return r;
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue queue_next_encoding(struct remote_distribution *rd, struct remote_index *ri);
+
+// TODO: check if this still makes sense.
+// (might be left over to support switching from older versions
+// of reprepro that also put compressed files there)
+static inline retvalue reuse_old_compressed_index(struct remote_distribution *rd, struct remote_index *ri, enum compression c, const char *oldfullfilename) {
+ retvalue r;
+
+ r = uncompress_file(oldfullfilename, ri->cachefilename, c);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (ri->ofs[c_none] >= 0) {
+ r = checksums_test(ri->cachefilename,
+ rd->remotefiles.checksums[ri->ofs[c_none]],
+ &rd->remotefiles.checksums[ri->ofs[c_none]]);
+ if (r == RET_ERROR_WRONG_MD5) {
+ fprintf(stderr,
+"Error: File '%s' looked correct according to '%s',\n"
+"but after unpacking '%s' looks wrong.\n"
+"Something is seriously broken!\n",
+ oldfullfilename, rd->usedreleasefile,
+ ri->cachefilename);
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"File '%s' mysteriously vanished!\n", ri->cachefilename);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ /* already there, nothing to do to get it... */
+ ri->queued = true;
+ ri->got = true;
+ return RET_OK;
+}
+
+static inline retvalue queueindex(struct remote_distribution *rd, struct remote_index *ri, bool nodownload, /*@null@*/struct cachedlistfile *oldfiles) {
+ enum compression c;
+ retvalue r;
+ struct cachedlistfile *old[c_COUNT];
+
+ if (rd->ignorerelease) {
+ ri->queued = true;
+ if (nodownload) {
+ ri->got = true;
+ return RET_OK;
+ }
+
+ /* as there is no way to know which are current,
+ * just delete everything */
+ remote_index_delete_oldfiles(ri, oldfiles);
+
+ return queue_next_without_release(rd, ri);
+ }
+
+ /* check if this file is still available from an earlier download */
+ remote_index_oldfiles(ri, oldfiles, old);
+ ri->olduncompressed = NULL;
+ ri->oldchecksums = NULL;
+ if (ri->ofs[c_none] < 0 && old[c_none] != NULL) {
+ /* if we know not what it should be,
+ * we canot use the old... */
+ r = cachedlistfile_delete(old[c_none]);
+ if (RET_WAS_ERROR(r))
+ return r;
+ old[c_none] = NULL;
+ } else if (old[c_none] != NULL) {
+ bool improves;
+ int uo = ri->ofs[c_none];
+ struct checksums **wanted_p = &rd->remotefiles.checksums[uo];
+
+ r = checksums_read(old[c_none]->fullfilename,
+ &ri->oldchecksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "File '%s' mysteriously vanished!\n",
+ old[c_none]->fullfilename);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (checksums_check(*wanted_p, ri->oldchecksums, &improves)) {
+ /* already there, nothing to do to get it... */
+ ri->queued = true;
+ ri->got = true;
+ if (improves)
+ r = checksums_combine(wanted_p,
+ ri->oldchecksums, NULL);
+ else
+ r = RET_OK;
+ checksums_free(ri->oldchecksums);
+ ri->oldchecksums = NULL;
+ return r;
+ }
+ ri->olduncompressed = old[c_none];
+ old[c_none] = NULL;
+ }
+
+ assert (old[c_none] == NULL);
+
+ /* make sure everything old is deleted or check if it can be used */
+ for (c = 0 ; c < c_COUNT ; c++) {
+ if (old[c] == NULL)
+ continue;
+ if (c != c_none && ri->ofs[c] >= 0) {
+ /* check if it can be used */
+ r = checksums_test(old[c]->fullfilename,
+ rd->remotefiles.checksums[ri->ofs[c]],
+ &rd->remotefiles.checksums[ri->ofs[c]]);
+ if (r == RET_ERROR_WRONG_MD5)
+ r = RET_NOTHING;
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(r)) {
+ r = remove_old_uncompressed(ri);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (old[c_none] == NULL);
+ return reuse_old_compressed_index(rd, ri, c,
+ old[c]->fullfilename);
+ }
+ }
+ r = cachedlistfile_delete(old[c]);
+ if (RET_WAS_ERROR(r))
+ return r;
+ old[c] = NULL;
+ }
+
+ /* nothing found, we'll have to download: */
+
+ if (nodownload) {
+ if (ri->olduncompressed != NULL)
+ fprintf(stderr,
+"Error: '%s' does not match Release file, try without --nolistsdownload to download new one!\n",
+ ri->cachefilename);
+ else
+ fprintf(stderr,
+"Error: Missing '%s', try without --nolistsdownload to download it!\n",
+ ri->cachefilename);
+ return RET_ERROR_MISSING;
+ }
+
+ return queue_next_encoding(rd, ri);
+}
+
+static retvalue queue_next_encoding(struct remote_distribution *rd, struct remote_index *ri) {
+ struct remote_repository *rr = rd->repository;
+ retvalue r;
+
+ if (rd->ignorerelease)
+ return queue_next_without_release(rd, ri);
+
+ r = find_requested_encoding(ri, rd->usedreleasefile);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ assert (ri->compression <= c_COUNT);
+
+ /* check if downloading a .diff/Index (aka .pdiff) is requested */
+ if (ri->compression == c_COUNT) {
+ assert (ri->olduncompressed != NULL);
+ assert (ri->oldchecksums != NULL);
+
+ ri->queued = true;
+ return aptmethod_enqueueindex(rr->download, rd->suite_base_dir,
+ ri->filename_in_release, ".diff/Index",
+ ri->cachefilename, ".diffindex",
+ diff_callback, ri, NULL);
+ }
+
+ assert (ri->compression < c_COUNT);
+ assert (uncompression_supported(ri->compression));
+
+ if (ri->compression == c_none) {
+ r = remove_old_uncompressed(ri);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+/* as those checksums might be overwritten with completed data,
+ * this assumes that the uncompressed checksums for one index is never
+ * the compressed checksum for another... */
+
+ ri->queued = true;
+ return aptmethod_enqueueindex(rr->download, rd->suite_base_dir,
+ ri->filename_in_release,
+ uncompression_suffix[ri->compression],
+ ri->cachefilename,
+ uncompression_suffix[ri->compression],
+ index_callback, ri, NULL);
+}
+
+
+static retvalue remote_distribution_enqueuelists(struct remote_distribution *rd, bool nodownload, struct cachedlistfile *oldfiles) {
+ struct remote_index *ri;
+ retvalue r;
+
+ /* check what to get for the requested indicies */
+ for (ri = rd->indices ; ri != NULL ; ri = ri->next) {
+ if (ri->queued)
+ continue;
+ if (!ri->needed) {
+ /* if we do not know anything about it,
+ * it cannot have got marked as old
+ * or otherwise as unneeded */
+ assert (!rd->ignorerelease);
+ continue;
+ }
+ r = queueindex(rd, ri, nodownload, oldfiles);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+retvalue remote_preparelists(struct aptmethodrun *run, bool nodownload) {
+ struct remote_repository *rr;
+ struct remote_distribution *rd;
+ retvalue r;
+ struct cachedlistfile *oldfiles;
+
+ r = cachedlists_scandir(&oldfiles);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING)
+ oldfiles = NULL;
+
+ for (rr = repositories ; rr != NULL ; rr = rr->next) {
+ for (rd = rr->distributions ; rd != NULL
+ ; rd = rd->next) {
+ r = remote_distribution_enqueuelists(rd,
+ nodownload, oldfiles);
+ if (RET_WAS_ERROR(r)) {
+ cachedlistfile_freelist(oldfiles);
+ return r;
+ }
+ }
+ }
+ r = aptmethod_download(run);
+ if (RET_WAS_ERROR(r)) {
+ cachedlistfile_freelist(oldfiles);
+ return r;
+ }
+
+ cachedlistfile_freelist(oldfiles);
+ return RET_OK;
+}
+
+static struct remote_index *addindex(struct remote_distribution *rd, /*@only@*/char *cachefilename, /*@only@*/char *filename, /*@null@*/const struct encoding_preferences *downloadas) {
+ struct remote_index *ri, **last;
+ enum compression c;
+ const char *cachebasename;
+
+ if (FAILEDTOALLOC(cachefilename) || FAILEDTOALLOC(filename))
+ return NULL;
+
+ cachebasename = dirs_basename(cachefilename);
+ last = &rd->indices;
+ while (*last != NULL && strcmp((*last)->cachebasename, cachebasename) != 0)
+ last = &(*last)->next;
+ if (*last != NULL) {
+ ri = *last;
+ // TODO: perhaps try to calculate some form of intersections
+ // instead of just using the shorter one...
+ if (downloadas != NULL &&
+ (ri->downloadas.count == 0
+ || ri->downloadas.count > downloadas->count))
+ ri->downloadas = *downloadas;
+ free(cachefilename); free(filename);
+ return ri;
+ }
+
+ ri = zNEW(struct remote_index);
+ if (FAILEDTOALLOC(ri)) {
+ free(cachefilename); free(filename);
+ return NULL;
+ }
+
+ *last = ri;
+ ri->from = rd;
+ ri->cachefilename = cachefilename;
+ ri->cachebasename = cachebasename;
+ ri->filename_in_release = filename;
+ if (downloadas != NULL)
+ ri->downloadas = *downloadas;
+ for (c = 0 ; c < c_COUNT ; c++)
+ ri->ofs[c] = -1;
+ ri->diff_ofs = -1;
+ ri->lasttriedencoding = -1;
+ return ri;
+}
+
+struct remote_index *remote_index(struct remote_distribution *rd, const char *architecture, const char *component, packagetype_t packagetype, /*@null@*/const struct encoding_preferences *downloadas) {
+ char *cachefilename, *filename_in_release;
+
+ assert (!rd->flat);
+ if (packagetype == pt_deb) {
+ filename_in_release = mprintf(
+"%s/binary-%s/Packages",
+ component, architecture);
+ cachefilename = genlistsfilename("Packages", 4,
+ rd->repository->name, rd->suite,
+ component, architecture, ENDOFARGUMENTS);
+ } else if (packagetype == pt_udeb) {
+ filename_in_release = mprintf(
+"%s/debian-installer/binary-%s/Packages",
+ component, architecture);
+ cachefilename = genlistsfilename("uPackages", 4,
+ rd->repository->name, rd->suite,
+ component, architecture, ENDOFARGUMENTS);
+ } else if (packagetype == pt_dsc) {
+ filename_in_release = mprintf(
+"%s/source/Sources",
+ component);
+ cachefilename = genlistsfilename("Sources", 3,
+ rd->repository->name, rd->suite,
+ component, ENDOFARGUMENTS);
+ } else {
+ assert ("Unexpected package type" == NULL);
+ }
+ return addindex(rd, cachefilename, filename_in_release, downloadas);
+}
+
+void cachedlistfile_need_index(struct cachedlistfile *list, const char *repository, const char *suite, const char *architecture, const char *component, packagetype_t packagetype) {
+ if (packagetype == pt_deb) {
+ cachedlistfile_need(list, "Packages", 4,
+ repository, suite,
+ component, architecture, ENDOFARGUMENTS);
+ } else if (packagetype == pt_udeb) {
+ cachedlistfile_need(list, "uPackages", 4,
+ repository, suite,
+ component, architecture, ENDOFARGUMENTS);
+ } else if (packagetype == pt_dsc) {
+ cachedlistfile_need(list, "Sources", 3,
+ repository, suite,
+ component, ENDOFARGUMENTS);
+ }
+}
+
+struct remote_index *remote_flat_index(struct remote_distribution *rd, packagetype_t packagetype, /*@null@*/const struct encoding_preferences *downloadas) {
+ char *cachefilename, *filename_in_release;
+
+ assert (rd->flat);
+ if (packagetype == pt_deb) {
+ filename_in_release = strdup("Packages");
+ cachefilename = genlistsfilename("Packages", 2,
+ rd->repository->name, rd->suite,
+ ENDOFARGUMENTS);
+ } else if (packagetype == pt_dsc) {
+ filename_in_release = strdup("Sources");
+ cachefilename = genlistsfilename("Sources", 2,
+ rd->repository->name, rd->suite,
+ ENDOFARGUMENTS);
+ } else {
+ assert ("Unexpected package type" == NULL);
+ }
+ return addindex(rd, cachefilename, filename_in_release, downloadas);
+}
+
+void cachedlistfile_need_flat_index(struct cachedlistfile *list, const char *repository, const char *suite, packagetype_t packagetype) {
+ if (packagetype == pt_deb) {
+ cachedlistfile_need(list, "Packages", 2,
+ repository, suite, ENDOFARGUMENTS);
+ } else if (packagetype == pt_dsc) {
+ cachedlistfile_need(list, "Sources", 1,
+ repository, suite, ENDOFARGUMENTS);
+ }
+}
+
+const char *remote_index_file(const struct remote_index *ri) {
+ assert (ri->needed && ri->queued && ri->got);
+ return ri->cachefilename;
+}
+const char *remote_index_basefile(const struct remote_index *ri) {
+ assert (ri->needed && ri->queued);
+ return ri->cachebasename;
+}
+
+struct aptmethod *remote_aptmethod(const struct remote_distribution *rd) {
+ return rd->repository->download;
+}
+
+void remote_index_markdone(const struct remote_index *ri, struct markdonefile *done) {
+ if (ri->ofs[c_none] < 0)
+ return;
+ markdone_index(done, ri->cachebasename,
+ ri->from->remotefiles.checksums[ri->ofs[c_none]]);
+}
+void remote_index_needed(struct remote_index *ri) {
+ ri->needed = true;
+}
+
+static retvalue indexfile_mark_got(struct remote_distribution *rd, struct remote_index *ri, /*@null@*/const struct checksums *gotchecksums) {
+ struct checksums **checksums_p;
+
+ if (!rd->ignorerelease && ri->ofs[c_none] >= 0) {
+ checksums_p = &rd->remotefiles.checksums[ri->ofs[c_none]];
+ bool matches, improves;
+
+ // TODO: this no longer calculates all the checksums if
+ // the Release does not contain more and the apt method
+ // returned not all (but all that are in Release).
+ // This will then cause the done file not containing all
+ // checksums. (but if the Release not contain them, this
+ // does not harm, does it?)
+
+ if (gotchecksums != NULL) {
+ matches = checksums_check(*checksums_p, gotchecksums,
+ &improves);
+ /* that should have been tested earlier */
+ assert (matches);
+ if (! matches)
+ return RET_ERROR_WRONG_MD5;
+ if (improves) {
+ retvalue r;
+
+ r = checksums_combine(checksums_p,
+ gotchecksums, NULL);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ }
+ ri->got = true;
+ return RET_OK;
+}
+
+static retvalue indexfile_unpacked(void *privdata, const char *compressed, bool failed) {
+ struct remote_index *ri = privdata;
+ struct remote_distribution *rd = ri->from;
+ retvalue r;
+ struct checksums *readchecksums = NULL;
+
+ if (failed) {
+ // TODO: check if alternative can be used...
+ return RET_ERROR;
+ }
+
+ /* file got uncompressed, check if it has the correct checksum */
+
+ /* even with a Release file, an old-style one might
+ * not list the checksums for the uncompressed indices */
+ if (!rd->ignorerelease && ri->ofs[c_none] >= 0) {
+ int ofs = ri->ofs[c_none];
+ const struct checksums *wantedchecksums =
+ rd->remotefiles.checksums[ofs];
+ bool matches, missing = false;
+
+ r = checksums_read(ri->cachefilename, &readchecksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Cannot open '%s', though it should just have been unpacked from '%s'!\n",
+ ri->cachefilename,
+ compressed);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ missing = false;
+ matches = checksums_check(readchecksums,
+ wantedchecksums, &missing);
+ assert (!missing);
+ if (!matches) {
+ fprintf(stderr,
+"Wrong checksum of uncompressed content of '%s':\n", compressed);
+ checksums_printdifferences(stderr,
+ wantedchecksums,
+ readchecksums);
+ checksums_free(readchecksums);
+ return RET_ERROR_WRONG_MD5;
+ }
+ /* if the compressed file was downloaded or copied, delete it.
+ * This is only done if we know the uncompressed checksum, so
+ * that less downloading is needed (though as apt no longer
+ * supports such archieves, they are unlikely anyway). */
+
+ if (strncmp(ri->cachefilename, compressed,
+ strlen(ri->cachefilename)) == 0) {
+ (void)unlink(compressed);
+ }
+ }
+ r = indexfile_mark_got(rd, ri, readchecksums);
+ checksums_free(readchecksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+
+/* *checksums_p must be either NULL or gotchecksums list all known checksums */
+static inline retvalue check_checksums(const char *methodname, const char *uri, const char *gotfilename, const struct checksums *wantedchecksums, /*@null@*/const struct checksums *gotchecksums, struct checksums **checksums_p) {
+ bool matches, missing = false;
+ struct checksums *readchecksums = NULL;
+ retvalue r;
+
+ if (gotchecksums == NULL) {
+ matches = true;
+ missing = true;
+ } else
+ matches = checksums_check(gotchecksums,
+ wantedchecksums, &missing);
+ /* if the apt method did not generate all checksums
+ * we want to check, we'll have to do so: */
+ if (matches && missing) {
+ /* we assume that everything we know how to
+ * extract from a Release file is something
+ * we know how to calculate out of a file */
+ assert (checksums_p == NULL || *checksums_p == NULL);
+ r = checksums_read(gotfilename, &readchecksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Cannot open '%s', though apt-method '%s' claims it is there!\n",
+ gotfilename, methodname);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ gotchecksums = readchecksums;
+ missing = false;
+ matches = checksums_check(gotchecksums,
+ wantedchecksums, &missing);
+ assert (!missing);
+ }
+ if (!matches) {
+ fprintf(stderr, "Wrong checksum during receive of '%s':\n",
+ uri);
+ checksums_printdifferences(stderr,
+ wantedchecksums,
+ gotchecksums);
+ checksums_free(readchecksums);
+ return RET_ERROR_WRONG_MD5;
+ }
+ if (checksums_p == NULL)
+ checksums_free(readchecksums);
+ else if (readchecksums != NULL)
+ *checksums_p = readchecksums;
+ return RET_OK;
+}
+
+static retvalue index_callback(enum queue_action action, void *privdata, UNUSED(void *privdata2), const char *uri, const char *gotfilename, const char *wantedfilename, /*@null@*/const struct checksums *gotchecksums, const char *methodname) {
+ struct remote_index *ri = privdata;
+ struct remote_distribution *rd = ri->from;
+ struct checksums *readchecksums = NULL;
+ retvalue r;
+
+ if (action == qa_error)
+ return queue_next_encoding(rd, ri);
+ if (action != qa_got)
+ return RET_ERROR;
+
+ if (ri->compression == c_none) {
+ assert (strcmp(wantedfilename, ri->cachefilename) == 0);
+ r = copytoplace(gotfilename, wantedfilename, methodname,
+ &readchecksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ gotfilename = wantedfilename;
+ if (readchecksums != NULL)
+ gotchecksums = readchecksums;
+ }
+
+ if (!rd->ignorerelease && ri->ofs[ri->compression] >= 0) {
+ int ofs = ri->ofs[ri->compression];
+ const struct checksums *wantedchecksums =
+ rd->remotefiles.checksums[ofs];
+
+ r = check_checksums(methodname, uri, gotfilename,
+ wantedchecksums, gotchecksums, &readchecksums);
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(readchecksums);
+ return r;
+ }
+ if (readchecksums != NULL)
+ gotchecksums = readchecksums;
+ }
+
+ if (ri->compression == c_none) {
+ assert (strcmp(gotfilename, wantedfilename) == 0);
+ r = indexfile_mark_got(rd, ri, gotchecksums);
+ checksums_free(readchecksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+ } else {
+ checksums_free(readchecksums);
+ r = remove_old_uncompressed(ri);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = uncompress_queue_file(gotfilename, ri->cachefilename,
+ ri->compression,
+ indexfile_unpacked, privdata);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+ }
+}
+
+static queue_callback diff_got_callback;
+
+static retvalue queue_next_diff(struct remote_index *ri) {
+ struct remote_distribution *rd = ri->from;
+ struct remote_repository *rr = rd->repository;
+ int i;
+ retvalue r;
+
+ for (i = 0 ; i < ri->diffindex->patchcount ; i++) {
+ bool improves;
+ struct diffindex_patch *p = &ri->diffindex->patches[i];
+ char *patchsuffix, *c;
+
+ if (p->done || p->frompackages == NULL)
+ continue;
+
+ if (!checksums_check(ri->oldchecksums, p->frompackages,
+ &improves))
+ continue;
+ /* p->frompackages should only have sha1 and oldchecksums
+ * should definitely list a sha1 hash */
+ assert (!improves);
+
+ p->done = true;
+
+ free(ri->patchfilename);
+ ri->patchfilename = mprintf("%s.diff-%s", ri->cachefilename,
+ p->name);
+ if (FAILEDTOALLOC(ri->patchfilename))
+ return RET_ERROR_OOM;
+ c = ri->patchfilename + strlen(ri->cachefilename);
+ while (*c != '\0') {
+ if ((*c < '0' || *c > '9')
+ && (*c < 'A' || *c > 'Z')
+ && (*c < 'a' || *c > 'z')
+ && *c != '.' && *c != '-')
+ *c = '_';
+ c++;
+ }
+ ri->selectedpatch = p;
+ patchsuffix = mprintf(".diff/%s.gz", p->name);
+ if (FAILEDTOALLOC(patchsuffix))
+ return RET_ERROR_OOM;
+
+ /* found a matching patch, tell the downloader we want it */
+ r = aptmethod_enqueueindex(rr->download, rd->suite_base_dir,
+ ri->filename_in_release,
+ patchsuffix,
+ ri->patchfilename, ".gz",
+ diff_got_callback, ri, p);
+ free(patchsuffix);
+ return r;
+ }
+ /* no patch matches, try next possibility... */
+ fprintf(stderr, "Error: available '%s' not listed in '%s.diffindex'.\n",
+ ri->cachefilename, ri->cachefilename);
+ return queue_next_encoding(rd, ri);
+}
+
+static retvalue diff_uncompressed(void *privdata, const char *compressed, bool failed) {
+ struct remote_index *ri = privdata;
+ struct remote_distribution *rd = ri->from;
+ const struct diffindex_patch *p = ri->selectedpatch;
+ char *tempfilename;
+ struct rred_patch *rp;
+ FILE *f;
+ int i;
+ retvalue r;
+ bool dummy;
+
+ if (ri->deletecompressedpatch)
+ (void)unlink(compressed);
+ if (failed)
+ return RET_ERROR;
+
+ r = checksums_test(ri->patchfilename, p->checksums, NULL);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Mysteriously vanished file '%s'!\n",
+ ri->patchfilename);
+ r = RET_ERROR_MISSING;
+ }
+ if (r == RET_ERROR_WRONG_MD5)
+ fprintf(stderr, "Corrupted package diff '%s'!\n",
+ ri->patchfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = patch_load(ri->patchfilename,
+ checksums_getfilesize(p->checksums), &rp);
+ ASSERT_NOT_NOTHING(r);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ tempfilename = calc_addsuffix(ri->cachefilename, "tmp");
+ if (FAILEDTOALLOC(tempfilename)) {
+ patch_free(rp);
+ return RET_ERROR_OOM;
+ }
+ (void)unlink(tempfilename);
+ i = rename(ri->cachefilename, tempfilename);
+ if (i != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d moving '%s' to '%s': %s\n",
+ e, ri->cachefilename, tempfilename,
+ strerror(e));
+ free(tempfilename);
+ patch_free(rp);
+ return RET_ERRNO(e);
+ }
+ f = fopen(ri->cachefilename, "w");
+ if (f == NULL) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating '%s': %s\n",
+ e, ri->cachefilename, strerror(e));
+ (void)unlink(tempfilename);
+ ri->olduncompressed->deleted = true;
+ ri->olduncompressed = NULL;
+ free(tempfilename);
+ patch_free(rp);
+ return RET_ERRNO(e);
+ }
+ r = patch_file(f, tempfilename, patch_getconstmodifications(rp));
+ (void)unlink(tempfilename);
+ (void)unlink(ri->patchfilename);
+ free(ri->patchfilename);
+ ri->patchfilename = NULL;
+ free(tempfilename);
+ patch_free(rp);
+ if (RET_WAS_ERROR(r)) {
+ (void)fclose(f);
+ remove_old_uncompressed(ri);
+ // TODO: fall back to downloading at once?
+ return r;
+ }
+ i = ferror(f);
+ if (i != 0) {
+ int e = errno;
+ (void)fclose(f);
+ fprintf(stderr, "Error %d writing to '%s': %s\n",
+ e, ri->cachefilename, strerror(e));
+ remove_old_uncompressed(ri);
+ return RET_ERRNO(e);
+ }
+ i = fclose(f);
+ if (i != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d writing to '%s': %s\n",
+ e, ri->cachefilename, strerror(e));
+ remove_old_uncompressed(ri);
+ return RET_ERRNO(e);
+ }
+ checksums_free(ri->oldchecksums);
+ ri->oldchecksums = NULL;
+ r = checksums_read(ri->cachefilename, &ri->oldchecksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Myteriously vanished file '%s'!\n",
+ ri->cachefilename);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (checksums_check(ri->oldchecksums,
+ rd->remotefiles.checksums[ri->ofs[c_none]],
+ &dummy)) {
+ ri->olduncompressed->deleted = true;
+ ri->olduncompressed = NULL;
+ /* we have a winner */
+ return indexfile_mark_got(rd, ri, ri->oldchecksums);
+ }
+ /* let's see what patch we need next */
+ return queue_next_diff(ri);
+}
+
+static retvalue diff_got_callback(enum queue_action action, void *privdata, UNUSED(void *privdata2), UNUSED(const char *uri), const char *gotfilename, const char *wantedfilename, UNUSED(/*@null@*/const struct checksums *gotchecksums), UNUSED(const char *methodname)) {
+ struct remote_index *ri = privdata;
+ retvalue r;
+
+ if (action == qa_error)
+ return queue_next_encoding(ri->from, ri);
+ if (action != qa_got)
+ return RET_ERROR;
+
+ ri->deletecompressedpatch = strcmp(gotfilename, wantedfilename) == 0;
+ r = uncompress_queue_file(gotfilename, ri->patchfilename,
+ c_gzip, diff_uncompressed, ri);
+ if (RET_WAS_ERROR(r))
+ (void)unlink(gotfilename);
+ return r;
+}
+
+static retvalue diff_callback(enum queue_action action, void *privdata, UNUSED(void *privdata2), const char *uri, const char *gotfilename, const char *wantedfilename, /*@null@*/const struct checksums *gotchecksums, const char *methodname) {
+ struct remote_index *ri = privdata;
+ struct remote_distribution *rd = ri->from;
+ struct checksums *readchecksums = NULL;
+ int ofs;
+ retvalue r;
+
+ if (action == qa_error)
+ return queue_next_encoding(rd, ri);
+ if (action != qa_got)
+ return RET_ERROR;
+
+ r = copytoplace(gotfilename, wantedfilename, methodname,
+ &readchecksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (readchecksums != NULL)
+ gotchecksums = readchecksums;
+
+ ofs = ri->diff_ofs;
+ if (ofs >= 0) {
+ const struct checksums *wantedchecksums =
+ rd->remotefiles.checksums[ofs];
+ bool matches, missing = false;
+
+ if (gotchecksums == NULL) {
+ matches = true;
+ missing = true;
+ } else
+ matches = checksums_check(gotchecksums,
+ wantedchecksums, &missing);
+ /* if the apt method did not generate all checksums
+ * we want to check, we'll have to do so: */
+ if (matches && missing) {
+ /* we assume that everything we know how to
+ * extract from a Release file is something
+ * we know how to calculate out of a file */
+ assert (readchecksums == NULL);
+ r = checksums_read(gotfilename, &readchecksums);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Cannot open '%s', though apt-method '%s' claims it is there!\n",
+ gotfilename, methodname);
+ r = RET_ERROR_MISSING;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ gotchecksums = readchecksums;
+ missing = false;
+ matches = checksums_check(gotchecksums,
+ wantedchecksums, &missing);
+ assert (!missing);
+ }
+ if (!matches) {
+ fprintf(stderr,
+"Wrong checksum during receive of '%s':\n", uri);
+ checksums_printdifferences(stderr,
+ wantedchecksums, gotchecksums);
+ checksums_free(readchecksums);
+ return RET_ERROR_WRONG_MD5;
+ }
+ }
+ checksums_free(readchecksums);
+ r = diffindex_read(wantedfilename, &ri->diffindex);
+ ASSERT_NOT_NOTHING(r);
+ if (RET_WAS_ERROR(r))
+ return queue_next_encoding(rd, ri);
+ if (ri->ofs[c_none] >= 0) {
+ bool dummy;
+ if (!checksums_check(rd->remotefiles.checksums[
+ ri->ofs[c_none]],
+ ri->diffindex->destination, &dummy)) {
+ fprintf(stderr,
+"'%s' does not match file requested in '%s'. Aborting diff processing...\n",
+ gotfilename, rd->usedreleasefile);
+ /* as this is claimed to be a common error
+ * (outdated .diff/Index file), proceed with
+ * other requested way to retrieve index file */
+ return queue_next_encoding(rd, ri);
+ }
+ }
+ return queue_next_diff(ri);
+}
diff --git a/remoterepository.h b/remoterepository.h
new file mode 100644
index 0000000..badfa3d
--- /dev/null
+++ b/remoterepository.h
@@ -0,0 +1,68 @@
+#ifndef REPREPRO_REMOTEREPOSITORY_H
+#define REPREPRO_REMOTEREPOSITORY_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_APTMETHOD_H
+#include "aptmethod.h"
+#endif
+#ifndef REPREPRO_DONEFILE_H
+#include "donefile.h"
+#endif
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+
+struct remote_repository;
+struct remote_distribution;
+struct remote_index;
+
+/* register repository, strings as stored by reference */
+struct remote_repository *remote_repository_prepare(const char * /*name*/, const char * /*method*/, const char * /*fallback*/, const struct strlist * /*config*/);
+
+/* register remote distribution of the given repository */
+retvalue remote_distribution_prepare(struct remote_repository *, const char * /*suite*/, bool /*ignorerelease*/, bool /*getinrelease*/, const char * /*verifyrelease*/, bool /*flat*/, bool * /*ignorehashes*/, /*@out@*/struct remote_distribution **);
+
+void remote_repository_free(/*@only@*/struct remote_repository *);
+
+/* create aptmethods for all of yet created repositories */
+retvalue remote_startup(struct aptmethodrun *);
+
+retvalue remote_preparemetalists(struct aptmethodrun *, bool /*nodownload*/);
+retvalue remote_preparelists(struct aptmethodrun *, bool /*nodownload*/);
+
+struct encoding_preferences {
+ /* number of preferences, 0 means use default */
+ unsigned short count;
+ /* a list of compressions to use */
+ struct compression_preference {
+ bool diff;
+ bool force;
+ enum compression compression;
+ } requested[3*c_COUNT];
+};
+
+struct remote_index *remote_index(struct remote_distribution *, const char * /*architecture*/, const char * /*component*/, packagetype_t, const struct encoding_preferences *);
+struct remote_index *remote_flat_index(struct remote_distribution *, packagetype_t, const struct encoding_preferences *);
+
+/* returns the name of the prepared uncompressed file */
+/*@observer@*/const char *remote_index_file(const struct remote_index *);
+/*@observer@*/const char *remote_index_basefile(const struct remote_index *);
+/*@observer@*/struct aptmethod *remote_aptmethod(const struct remote_distribution *);
+
+bool remote_index_isnew(const struct remote_index *, struct donefile *);
+void remote_index_needed(struct remote_index *);
+void remote_index_markdone(const struct remote_index *, struct markdonefile *);
+
+char *genlistsfilename(/*@null@*/const char * /*type*/, unsigned int /*count*/, ...) __attribute__((sentinel));
+
+struct cachedlistfile;
+retvalue cachedlists_scandir(/*@out@*/struct cachedlistfile **);
+void cachedlistfile_need_index(struct cachedlistfile *, const char * /*repository*/, const char * /*suite*/, const char * /*architecture*/, const char * /*component*/, packagetype_t);
+void cachedlistfile_need_flat_index(struct cachedlistfile *, const char * /*repository*/, const char * /*suite*/, packagetype_t);
+void cachedlistfile_need(struct cachedlistfile *, const char * /*type*/, unsigned int /*count*/, ...) __attribute__((sentinel));
+void cachedlistfile_freelist(/*@only@*/struct cachedlistfile *);
+void cachedlistfile_deleteunneeded(const struct cachedlistfile *);
+#endif
diff --git a/rredpatch.c b/rredpatch.c
new file mode 100644
index 0000000..a59bc49
--- /dev/null
+++ b/rredpatch.c
@@ -0,0 +1,772 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2009 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <assert.h>
+#include "error.h"
+#include "rredpatch.h"
+
+struct modification {
+ /* next item in the list (sorted by oldlinestart) */
+ struct modification *next, *previous;
+ /* each modification removes an (possible empty) range from
+ * the file and replaces it with an (possible empty) range
+ * of new lines */
+ int oldlinestart, oldlinecount, newlinecount;
+ size_t len;
+ const char *content;
+ /* a entry might be followed by one other with the same
+ * oldlinestart (due to merging or inefficient patches),
+ * but always: next->oldlinestart >= oldlinestart + oldlinecount
+ */
+};
+
+struct rred_patch {
+ int fd;
+ /* content of the file mapped with mmap */
+ char *data;
+ off_t len;
+ struct modification *modifications;
+ bool alreadyinuse;
+};
+
+void modification_freelist(struct modification *p) {
+ while (p != NULL) {
+ struct modification *m = p;
+ p = m->next;
+ free(m);
+ }
+}
+
+struct modification *modification_dup(const struct modification *p) {
+ struct modification *first = NULL, *last = NULL;
+
+ for (; p != NULL ; p = p->next) {
+ struct modification *m = NEW(struct modification);
+
+ if (FAILEDTOALLOC(m)) {
+ modification_freelist(first);
+ return NULL;
+ }
+ *m = *p;
+ m->next = NULL;
+ m->previous = last;
+ if (last == NULL)
+ first = m;
+ else
+ m->previous->next = m;
+ last = m;
+ }
+ return first;
+}
+
+struct modification *patch_getmodifications(struct rred_patch *p) {
+ struct modification *m;
+
+ assert (!p->alreadyinuse);
+ m = p->modifications;
+ p->modifications = NULL;
+ p->alreadyinuse = true;
+ return m;
+}
+
+const struct modification *patch_getconstmodifications(struct rred_patch *p) {
+ assert (!p->alreadyinuse);
+ return p->modifications;
+}
+
+static struct modification *modification_freehead(/*@only@*/struct modification *p) {
+ struct modification *m = p->next;
+ free(p);
+ return m;
+}
+
+void patch_free(/*@only@*/struct rred_patch *p) {
+ if (p->data != NULL)
+ (void)munmap(p->data, p->len);
+ if (p->fd >= 0)
+ (void)close(p->fd);
+ modification_freelist(p->modifications);
+ free(p);
+}
+
+retvalue patch_load(const char *filename, off_t length, struct rred_patch **patch_p) {
+ int fd;
+
+ fd = open(filename, O_NOCTTY|O_RDONLY);
+ if (fd < 0) {
+ int err = errno;
+ fprintf(stderr,
+"Error %d opening '%s' for reading: %s\n", err, filename, strerror(err));
+ return RET_ERRNO(err);
+ }
+ return patch_loadfd(filename, fd, length, patch_p);
+
+}
+
+retvalue patch_loadfd(const char *filename, int fd, off_t length, struct rred_patch **patch_p) {
+ int i;
+ struct rred_patch *patch;
+ const char *p, *e, *d, *l;
+ int number, number2, line;
+ char type;
+ struct modification *n;
+ struct stat statbuf;
+
+ patch = zNEW(struct rred_patch);
+ if (FAILEDTOALLOC(patch)) {
+ (void)close(fd);
+ return RET_ERROR_OOM;
+ }
+ patch->fd = fd;
+ i = fstat(patch->fd, &statbuf);
+ if (i != 0) {
+ int err = errno;
+ fprintf(stderr,
+"Error %d retrieving length of '%s': %s\n", err, filename, strerror(err));
+ patch_free(patch);
+ return RET_ERRNO(err);
+ }
+ if (length == -1)
+ length = statbuf.st_size;
+ if (statbuf.st_size != length) {
+ int err = errno;
+ fprintf(stderr,
+"Unexpected size of '%s': expected %lld, got %lld\n", filename,
+ (long long)length, (long long)statbuf.st_size);
+ patch_free(patch);
+ return RET_ERRNO(err);
+ }
+ if (length == 0) {
+ /* handle empty patches gracefully */
+ close(patch->fd);
+ patch->fd = -1;
+ patch->data = NULL;
+ patch->len = 0;
+ patch->modifications = NULL;
+ *patch_p = patch;
+ return RET_OK;
+ }
+ patch->len = length;
+ patch->data = mmap(NULL, patch->len, PROT_READ, MAP_PRIVATE,
+ patch->fd, 0);
+ if (patch->data == MAP_FAILED) {
+ int err = errno;
+ fprintf(stderr,
+"Error %d mapping '%s' into memory: %s\n", err, filename, strerror(err));
+ patch_free(patch);
+ return RET_ERRNO(err);
+ }
+ p = patch->data;
+ e = p + patch->len;
+ line = 1;
+ while (p < e) {
+ /* <number>,<number>(c|d)\n or <number>(a|i|c|d) */
+ d = p;
+ number = 0; number2 = -1;
+ while (d < e && *d >= '0' && *d <= '9') {
+ number = (*d - '0') + 10 * number;
+ d++;
+ }
+ if (d > p && d < e && *d == ',') {
+ d++;
+ number2 = 0;
+ while (d < e && *d >= '0' && *d <= '9') {
+ number2 = (*d - '0') + 10 * number2;
+ d++;
+ }
+ if (number2 < number) {
+ fprintf(stderr,
+"Error parsing '%s': malformed range (2nd number smaller than 1s) at line %d\n",
+ filename, line);
+ patch_free(patch);
+ return RET_ERROR;
+ }
+ }
+ if (d >= e || (*d != 'c' && *d != 'i' && *d != 'a' && *d != 'd')) {
+ fprintf(stderr,
+"Error parsing '%s': expected rule (c,i,a or d) at line %d\n",
+ filename, line);
+ patch_free(patch);
+ return RET_ERROR;
+ }
+ type = *d;
+ d++;
+ while (d < e && *d == '\r')
+ d++;
+ if (d >= e || *d != '\n') {
+ fprintf(stderr,
+"Error parsing '%s': expected newline after command at line %d\n",
+ filename, line);
+ patch_free(patch);
+ return RET_ERROR;
+ }
+ d++;
+ line++;
+
+ if (type != 'a' && number == 0) {
+ fprintf(stderr,
+"Error parsing '%s': missing number at line %d\n",
+ filename, line);
+ patch_free(patch);
+ return RET_ERROR;
+ }
+ if (type != 'c' && type != 'd' && number2 >= 0) {
+ fprintf(stderr,
+"Error parsing '%s': line range not allowed with %c at line %d\n",
+ filename, (char)type, line);
+ patch_free(patch);
+ return RET_ERROR;
+ }
+ n = zNEW(struct modification);
+ if (FAILEDTOALLOC(n)) {
+ patch_free(patch);
+ return RET_ERROR_OOM;
+ }
+ n->next = patch->modifications;
+ if (n->next != NULL)
+ n->next->previous = n;
+ patch->modifications = n;
+
+ p = d;
+ if (type == 'd') {
+ n->content = NULL;
+ n->len = 0;
+ n->newlinecount = 0;
+ } else {
+ int startline = line;
+
+ l = p;
+ while (l < e) {
+ p = l;
+ while (l < e && *l != '\n')
+ l++;
+ if (l >= e) {
+ if (l == p + 1 && *p == '.') {
+ /* that is also corrupted,
+ * but we can cure it */
+ break;
+ }
+ fprintf(stderr,
+"Error parsing '%s': ends in unterminated line. File most likely corrupted\n",
+ filename);
+ patch_free(patch);
+ return RET_ERROR;
+ }
+ l++;
+ if (p[0] == '.' && (p[1] == '\n' || p[1] == '\r'))
+ break;
+ line++;
+ }
+ if (p[0] != '.' || (l > p + 1 && p[1] != '\n' && p[1] != '\r')) {
+ fprintf(stderr,
+"Error parsing '%s': ends waiting for dot. File most likely corrupted\n",
+ filename);
+ patch_free(patch);
+ return RET_ERROR;
+ }
+ n->content = d;
+ n->len = p - d;
+ n->newlinecount = line - startline;
+ p = l;
+ line++;
+ }
+ if (type == 'a') {
+ /* appends appends after instead of before something: */
+ n->oldlinestart = number + 1;
+ n->oldlinecount = 0;
+ } else if (type == 'i') {
+ n->oldlinestart = number;
+ n->oldlinecount = 0;
+ } else {
+ n->oldlinestart = number;
+ if (number2 < 0)
+ n->oldlinecount = 1;
+ else
+ n->oldlinecount = (number2 - number) + 1;
+ }
+ /* make sure things are in the order diff usually
+ * generates them, which makes line-calculation much easier: */
+ if (n->next != NULL) {
+ if (n->oldlinestart + n->oldlinecount
+ > n->next->oldlinestart) {
+ struct modification *first, *second;
+ retvalue r;
+
+ // TODO: it might be more efficient to
+ // first store the different parts as different
+ // patchsets and then combine...
+
+ /* unlink and feed into patch merger */
+ first = n->next;
+ first->previous = NULL;
+ second = n;
+ n->next = NULL;
+ n = NULL;
+ r = combine_patches(&n, first, second);
+ patch->modifications = n;
+ if (RET_WAS_ERROR(r)) {
+ patch_free(patch);
+ return r;
+ }
+ }
+ }
+ }
+ *patch_p = patch;
+ return RET_OK;
+}
+
+static void modification_stripendlines(struct modification *m, int r) {
+ int lines;
+ const char *p;
+
+ m->newlinecount -= r;
+ lines = m->newlinecount;
+ p = m->content;
+ while (lines > 0) {
+ while (*p != '\n')
+ p++;
+ p++;
+ lines--;
+ }
+ assert ((size_t)(p - m->content) <= m->len);
+ m->len = p - m->content;
+}
+
+static void modification_stripstartlines(struct modification *m, int r) {
+ const char *p;
+
+ m->newlinecount -= r;
+ p = m->content;
+ while (r > 0) {
+ while (*p != '\n')
+ p++;
+ p++;
+ r--;
+ }
+ assert ((size_t)(p - m->content) <= m->len);
+ m->len -= p - m->content;
+ m->content = p;
+}
+
+static inline void move_queue(struct modification **last_p, struct modification **result_p, struct modification **from_p) {
+ struct modification *toadd, *last;
+
+ /* remove from queue: */
+ toadd = *from_p;
+ *from_p = toadd->next;
+ if (toadd->next != NULL) {
+ toadd->next->previous = NULL;
+ toadd->next = NULL;
+ }
+
+ /* if nothing yet, make it the first */
+ if (*last_p == NULL) {
+ *result_p = toadd;
+ toadd->previous = NULL;
+ *last_p = toadd;
+ return;
+ }
+
+ last = *last_p;
+ if (toadd->oldlinestart == last->oldlinestart + last->oldlinecount) {
+ /* check if something can be combined: */
+ if (toadd->newlinecount == 0) {
+ last->oldlinecount += toadd->oldlinecount;
+ free(toadd);
+ return;
+ }
+ if (last->newlinecount == 0) {
+ toadd->oldlinestart = last->oldlinestart;
+ toadd->oldlinecount += last->oldlinecount;
+ toadd->previous = last->previous;
+ if (toadd->previous == NULL)
+ *result_p = toadd;
+ else
+ toadd->previous->next = toadd;
+ *last_p = toadd;
+ free(last);
+ return;
+ }
+ if (last->content + last->len == toadd->content) {
+ last->oldlinecount += toadd->oldlinecount;
+ last->newlinecount += toadd->newlinecount;
+ last->len += toadd->len;
+ free(toadd);
+ return;
+ }
+ }
+ toadd->previous = last;
+ last->next = toadd;
+ assert (last->oldlinestart + last->oldlinecount <= toadd->oldlinestart);
+ *last_p = toadd;
+ return;
+}
+
+/* this merges a set of modifications into an already existing stack,
+ * modifying line numbers or even cutting away deleted/newly overwritten
+ * stuff as necessary */
+retvalue combine_patches(struct modification **result_p, /*@only@*/struct modification *first, /*@only@*/struct modification *second) {
+ struct modification *p, *a, *result, *last;
+ long lineofs;
+
+ p = first;
+ result = NULL;
+ last = NULL;
+ a = second;
+
+ lineofs = 0;
+
+ while (a != NULL) {
+ /* modification totally before current one,
+ * so just add it before it */
+ if (p == NULL || lineofs + a->oldlinestart + a->oldlinecount
+ <= p->oldlinestart) {
+ a->oldlinestart += lineofs;
+ move_queue(&last, &result, &a);
+ assert (p == NULL || p->oldlinestart >=
+ last->oldlinestart + last->oldlinecount);
+ continue;
+ }
+ /* modification to add after current head modification,
+ * so finalize head modification and update lineofs */
+ if (lineofs + a->oldlinestart
+ >= p->oldlinestart + p->newlinecount) {
+ lineofs += p->oldlinecount - p->newlinecount;
+ move_queue(&last, &result, &p);
+ assert (lineofs + a->oldlinestart >=
+ last->oldlinestart + last->oldlinecount);
+ continue;
+ }
+ /* new modification removes everything the old one added: */
+ if (lineofs + a->oldlinestart <= p->oldlinestart
+ && lineofs + a->oldlinestart + a->oldlinecount
+ >= p->oldlinestart + p->newlinecount) {
+ a->oldlinestart -= p->oldlinecount - p->newlinecount;
+ a->oldlinecount += p->oldlinecount - p->newlinecount;
+ lineofs += p->oldlinecount - p->newlinecount;
+ p = modification_freehead(p);
+ if (a->oldlinecount == 0 && a->newlinecount == 0) {
+ /* a exactly cancels p */
+ a = modification_freehead(a);
+ }
+ /* otherwise a is not yet finished,
+ * it might modify more */
+ continue;
+ }
+ /* otherwise something overlaps, things get complicated here: */
+
+ /* start of *a removes end of *p, so reduce *p: */
+ if (lineofs + a->oldlinestart > p->oldlinestart &&
+ lineofs + a->oldlinestart
+ < p->oldlinestart + p->newlinecount &&
+ lineofs + a->oldlinestart + a->oldlinecount
+ >= p->oldlinestart + p->newlinecount) {
+ int removedlines = p->oldlinestart + p->newlinecount
+ - (lineofs + a->oldlinestart);
+
+ /* finalize p as before */
+ lineofs += p->oldlinecount - p->newlinecount;
+ /* just telling a to delete less */
+ a->oldlinestart += removedlines;
+ a->oldlinecount -= removedlines;
+ /* and p to add less */
+ modification_stripendlines(p, removedlines);
+ move_queue(&last, &result, &p);
+ assert (lineofs + a->oldlinestart >=
+ last->oldlinestart + last->oldlinecount);
+ continue;
+ }
+ /* end of *a remove start of *p, so finalize *a and reduce *p */
+ if (lineofs + a->oldlinestart <= p->oldlinestart &&
+ lineofs + a->oldlinestart + a->oldlinecount
+ > p->oldlinestart &&
+ lineofs + a->oldlinestart + a->oldlinecount
+ < p->oldlinestart + p->newlinecount) {
+ int removedlines =
+ lineofs + a->oldlinestart + a->oldlinecount
+ - p->oldlinestart;
+ /* finalize *a with less lines deleted:*/
+ a->oldlinestart += lineofs;
+ a->oldlinecount -= removedlines;
+ if (a->oldlinecount == 0 && a->newlinecount == 0) {
+ /* a only removed something and this was hereby
+ * removed from p */
+ a = modification_freehead(a);
+ } else
+ move_queue(&last, &result, &a);
+ /* and reduce the number of lines of *p */
+ assert (removedlines < p->newlinecount);
+ modification_stripstartlines(p, removedlines);
+ /* p->newlinecount got smaller,
+ * so less will be deleted later */
+ lineofs -= removedlines;
+ if (last != NULL) {
+ assert (p->oldlinestart >=
+ last->oldlinestart + last->oldlinecount);
+ if (a != NULL)
+ assert (lineofs + a->oldlinestart >=
+ last->oldlinestart + last->oldlinecount);
+ }
+ /* note that a->oldlinestart+a->oldlinecount+1
+ * == p->oldlinestart */
+ continue;
+ }
+ /* the most complex case left, a inside p, this
+ * needs p split in two */
+ if (lineofs + a->oldlinestart > p->oldlinestart &&
+ lineofs + a->oldlinestart + a->oldlinecount
+ < p->oldlinestart + p->newlinecount) {
+ struct modification *n;
+ int removedlines = p->oldlinestart + p->newlinecount
+ - (lineofs + a->oldlinestart);
+
+ n = zNEW(struct modification);
+ if (FAILEDTOALLOC(n)) {
+ modification_freelist(result);
+ modification_freelist(p);
+ modification_freelist(a);
+ return RET_ERROR_OOM;
+ }
+ *n = *p;
+ /* all removing into the later p, so
+ * that later numbers fit */
+ n->next = NULL;
+ n->oldlinecount = 0;
+ assert (removedlines < n->newlinecount);
+ modification_stripendlines(n, removedlines);
+ lineofs += n->oldlinecount - n->newlinecount;
+ assert (lineofs+a->oldlinestart <= p->oldlinestart);
+ move_queue(&last, &result, &n);
+ assert (n == NULL);
+ /* only remove this and let the rest of the
+ * code handle the other changes */
+ modification_stripstartlines(p,
+ p->newlinecount - removedlines);
+ assert(p->newlinecount == removedlines);
+ assert (lineofs + a->oldlinestart >=
+ last->oldlinestart + last->oldlinecount);
+ continue;
+ }
+ modification_freelist(result);
+ modification_freelist(p);
+ modification_freelist(a);
+ fputs("Internal error in rred merging!\n", stderr);
+ return RET_ERROR;
+ }
+ while (p != NULL) {
+ move_queue(&last, &result, &p);
+ }
+ *result_p = result;
+ return RET_OK;
+}
+
+retvalue patch_file(FILE *o, const char *source, const struct modification *patch) {
+ FILE *i;
+ int currentline, ignore, c;
+
+ i = fopen(source, "r");
+ if (i == NULL) {
+ int e = errno;
+ fprintf(stderr, "Error %d opening %s: %s\n",
+ e, source, strerror(e));
+ return RET_ERRNO(e);
+ }
+ assert (patch == NULL || patch->oldlinestart > 0);
+ currentline = 1;
+ do {
+ while (patch != NULL && patch->oldlinestart == currentline) {
+ fwrite(patch->content, patch->len, 1, o);
+ ignore = patch->oldlinecount;
+ patch = patch->next;
+ while (ignore > 0) {
+ do {
+ c = getc(i);
+ } while (c != '\n' && c != EOF);
+ ignore--;
+ currentline++;
+ }
+ }
+ assert (patch == NULL || patch->oldlinestart >= currentline);
+ while ((c = getc(i)) != '\n') {
+ if (c == EOF) {
+ if (patch != NULL) {
+ fprintf(stderr,
+"Error patching '%s', file shorter than expected by patches!\n",
+ source);
+ (void)fclose(i);
+ return RET_ERROR;
+ }
+ break;
+ }
+ putc(c, o);
+ }
+ if (c == EOF)
+ break;
+ putc(c, o);
+ currentline++;
+ } while (1);
+ if (ferror(i) != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d reading %s: %s\n",
+ e, source, strerror(e));
+ (void)fclose(i);
+ return RET_ERRNO(e);
+ }
+ if (fclose(i) != 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d reading %s: %s\n",
+ e, source, strerror(e));
+ return RET_ERRNO(e);
+ }
+ return RET_OK;
+}
+
+void modification_printaspatch(void *f, const struct modification *m, void write_func(const void *, size_t, void *)) {
+ const struct modification *p, *q, *r;
+ char line[30];
+ int len;
+
+ if (m == NULL)
+ return;
+ assert (m->previous == NULL);
+ /* go to the end, as we have to print it backwards */
+ p = m;
+ while (p->next != NULL) {
+ assert (p->next->previous == p);
+ p = p->next;
+ }
+ /* then print, possibly merging things */
+ while (p != NULL) {
+ int start, oldcount, newcount;
+ start = p->oldlinestart;
+ oldcount = p->oldlinecount;
+ newcount = p->newlinecount;
+
+ if (p->next != NULL)
+ assert (start + oldcount <= p->next->oldlinestart);
+
+ r = p;
+ for (q = p->previous ;
+ q != NULL && q->oldlinestart + q->oldlinecount == start ;
+ q = q->previous) {
+ oldcount += q->oldlinecount;
+ start = q->oldlinestart;
+ newcount += q->newlinecount;
+ r = q;
+ }
+ if (newcount == 0) {
+ assert (oldcount > 0);
+ if (oldcount == 1)
+ len = snprintf(line, sizeof(line), "%dd\n",
+ start);
+ else
+ len = snprintf(line, sizeof(line), "%d,%dd\n",
+ start, start + oldcount - 1);
+ } else {
+ if (oldcount == 0)
+ len = snprintf(line, sizeof(line), "%da\n",
+ start - 1);
+ else if (oldcount == 1)
+ len = snprintf(line, sizeof(line), "%dc\n",
+ start);
+ else
+ len = snprintf(line, sizeof(line), "%d,%dc\n",
+ start, start + oldcount - 1);
+ }
+ assert (len < (int)sizeof(line));
+ write_func(line, len, f);
+ if (newcount != 0) {
+ while (r != p->next) {
+ if (r->len > 0)
+ write_func(r->content, r->len, f);
+ newcount -= r->newlinecount;
+ r = r->next;
+ }
+ assert (newcount == 0);
+ write_func(".\n", 2, f);
+ }
+ p = q;
+ }
+}
+
+/* make sure a patch is not empty and does not only add lines at the start,
+ * to work around some problems in apt */
+
+retvalue modification_addstuff(const char *source, struct modification **patch_p, char **line_p) {
+ struct modification **pp, *n, *m = NULL;
+ char *line = NULL; size_t bufsize = 0;
+ ssize_t got;
+ FILE *i;
+ long lineno = 0;
+
+ pp = patch_p;
+ /* check if this only adds things at the start and count how many */
+ while (*pp != NULL) {
+ m = *pp;
+ if (m->oldlinecount > 0 || m->oldlinestart > 1) {
+ *line_p = NULL;
+ return RET_OK;
+ }
+ lineno += m->newlinecount;
+ pp = &(*pp)->next;
+ }
+ /* not get the next line and claim it was changed */
+ i = fopen(source, "r");
+ if (i == NULL) {
+ int e = errno;
+ fprintf(stderr, "Error %d opening '%s': %s\n",
+ e, source, strerror(e));
+ return RET_ERRNO(e);
+ }
+ do {
+ got = getline(&line, &bufsize, i);
+ } while (got >= 0 && lineno-- > 0);
+ if (got < 0) {
+ int e = errno;
+
+ /* You should have made sure the old file is not empty */
+ fprintf(stderr, "Error %d reading '%s': %s\n",
+ e, source, strerror(e));
+ (void)fclose(i);
+ return RET_ERRNO(e);
+ }
+ (void)fclose(i);
+
+ n = NEW(struct modification);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ *pp = n;
+ n->next = NULL;
+ n->previous = m;
+ n->oldlinestart = 1;
+ n->oldlinecount = 1;
+ n->newlinecount = 1;
+ n->len = got;
+ n->content = line;
+ *line_p = line;
+ return RET_OK;
+}
+
diff --git a/rredpatch.h b/rredpatch.h
new file mode 100644
index 0000000..2148527
--- /dev/null
+++ b/rredpatch.h
@@ -0,0 +1,19 @@
+#ifndef REPREPRO_RREDPATCH_H
+#define REPREPRO_RREDPATCH_H
+
+struct rred_patch;
+struct modification;
+
+retvalue patch_load(const char *, off_t, /*@out@*/struct rred_patch **);
+retvalue patch_loadfd(const char *, int, off_t, /*@out@*/struct rred_patch **);
+void patch_free(/*@only@*/struct rred_patch *);
+/*@only@*//*@null@*/struct modification *patch_getmodifications(struct rred_patch *);
+/*@null@*/const struct modification *patch_getconstmodifications(struct rred_patch *);
+struct modification *modification_dup(const struct modification *);
+void modification_freelist(/*@only@*/struct modification *);
+retvalue combine_patches(/*@out@*/struct modification **, /*@only@*/struct modification *, /*@only@*/struct modification *);
+void modification_printaspatch(void *, const struct modification *, void (const void *, size_t, void *));
+retvalue modification_addstuff(const char *source, struct modification **patch_p, /*@out@*/char **line_p);
+retvalue patch_file(FILE *, const char *, const struct modification *);
+
+#endif
diff --git a/rredtool.c b/rredtool.c
new file mode 100644
index 0000000..5e2a60b
--- /dev/null
+++ b/rredtool.c
@@ -0,0 +1,1459 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2009 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <dirent.h>
+#include <assert.h>
+#include "globals.h"
+#include "error.h"
+#include "mprintf.h"
+#include "sha1.h"
+#include "filecntl.h"
+#include "rredpatch.h"
+#include "time.h"
+
+/* apt had a bug, http://bugs.debian.org/545694
+ * to fail if a patch file only prepends text.
+ * This if fixed in apt version 0.7.24,
+ * so this workaround can be disabled when older apt
+ * versions are no longer expected (i.e. sqeeze is oldstable) */
+#define APT_545694_WORKAROUND
+
+/* apt always wants to apply the last patch
+ * (see http://bugs.debian.org/545699), so
+ * always create an fake-empty patch last */
+#define APT_545699_WORKAROUND
+
+static int max_patch_count = 20;
+
+static const struct option options[] = {
+ {"version", no_argument, NULL, 'V'},
+ {"help", no_argument, NULL, 'h'},
+ {"debug", no_argument, NULL, 'D'},
+ {"merge", no_argument, NULL, 'm'},
+ {"max-patch-count", required_argument, NULL, 'N'},
+ {"reprepro-hook", no_argument, NULL, 'R'},
+ {"patch", no_argument, NULL, 'p'},
+ {NULL, 0, NULL, 0}
+};
+
+static void usage(FILE *f) {
+ fputs(
+"rredtool: handle the restricted subset of ed patches\n"
+" as used by Debian {Packages,Sources}.diff files.\n"
+"Syntax:\n"
+" rredtool <directory> <newfile> <oldfile> <mode>\n"
+" update .diff directory (to be called from reprepro)\n"
+" rredtool --merge <patches..>\n"
+" merge patches into one patch\n"
+" rredtool --patch <file> <patches..>\n"
+" apply patches to file\n", f);
+}
+
+static const char tab[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+
+struct hash {
+ char sha1[2*SHA1_DIGEST_SIZE+1];
+ off_t len;
+};
+/* we only need sha1 sum and we need it a lot, so implement a "only sha1" */
+static void finalize_sha1(struct SHA1_Context *context, off_t len, /*@out@*/struct hash *hash){
+ char *sha1;
+ unsigned char sha1buffer[SHA1_DIGEST_SIZE];
+ int i;
+
+ SHA1Final(context, sha1buffer);
+ sha1 = hash->sha1;
+ for (i = 0 ; i < SHA1_DIGEST_SIZE ; i++) {
+ *(sha1++) = tab[sha1buffer[i] >> 4];
+ *(sha1++) = tab[sha1buffer[i] & 0xF];
+ }
+ *sha1 = '\0';
+ hash->len = len;
+}
+
+static retvalue gen_sha1sum(const char *fullfilename, /*@out@*/struct hash *hash) {
+ struct SHA1_Context context;
+ static const size_t bufsize = 16384;
+ unsigned char *buffer = malloc(bufsize);
+ ssize_t sizeread;
+ int e, i;
+ int infd;
+ struct stat s;
+
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+
+ SHA1Init(&context);
+
+ infd = open(fullfilename, O_RDONLY);
+ if (infd < 0) {
+ e = errno;
+ if ((e == EACCES || e == ENOENT) &&
+ !isregularfile(fullfilename)) {
+ free(buffer);
+ return RET_NOTHING;
+ }
+ fprintf(stderr, "Error %d opening '%s': %s\n",
+ e, fullfilename, strerror(e));
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ i = fstat(infd, &s);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d getting information about '%s': %s\n",
+ e, fullfilename, strerror(e));
+ (void)close(infd);
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ do {
+ sizeread = read(infd, buffer, bufsize);
+ if (sizeread < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d while reading %s: %s\n",
+ e, fullfilename, strerror(e));
+ free(buffer);
+ (void)close(infd);
+ return RET_ERRNO(e);
+ }
+ SHA1Update(&context, buffer, (size_t)sizeread);
+ } while (sizeread > 0);
+ free(buffer);
+ i = close(infd);
+ if (i != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d reading %s: %s\n",
+ e, fullfilename, strerror(e));
+ return RET_ERRNO(e);
+ }
+ finalize_sha1(&context, s.st_size, hash);
+ return RET_OK;
+}
+
+struct fileandhash {
+ FILE *f;
+ off_t len;
+ struct SHA1_Context context;
+};
+
+static void hash_and_write(const void *data, size_t len, void *p) {
+ struct fileandhash *fh = p;
+
+ fwrite(data, len, 1, fh->f);
+ SHA1Update(&fh->context, data, len);
+ fh->len += len;
+}
+
+#define DATEFMT "%Y-%m-%d-%H%M.%S"
+#define DATELEN (4 + 1 + 2 + 1 + 2 + 1 + 2 + 2 + 1 + 2)
+
+static retvalue get_date_string(char *date, size_t max) {
+ struct tm *tm;
+ time_t current_time;
+ size_t len;
+
+ assert (max == DATELEN + 1);
+
+ current_time = time(NULL);
+ if (current_time == ((time_t) -1)) {
+ int e = errno;
+ fprintf(stderr, "rredtool: Error %d from time: %s\n",
+ e, strerror(e));
+ return RET_ERROR;
+ }
+ tm = gmtime(&current_time);
+ if (tm == NULL) {
+ int e = errno;
+ fprintf(stderr, "rredtool: Error %d from gmtime: %s\n",
+ e, strerror(e));
+ return RET_ERROR;
+ }
+ errno = 0;
+ len = strftime(date, max, DATEFMT, tm);
+ if (len == 0 || len != DATELEN) {
+ fprintf(stderr,
+"rredtool: internal problem calling strftime!\n");
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static int create_temporary_file(void) {
+ const char *tempdir;
+ char *filename;
+ int fd;
+
+ tempdir = getenv("TMPDIR");
+ if (tempdir == NULL)
+ tempdir = getenv("TEMPDIR");
+ if (tempdir == NULL)
+ tempdir = "/tmp";
+ filename = mprintf("%s/XXXXXX", tempdir);
+ if (FAILEDTOALLOC(filename)) {
+ errno = ENOMEM;
+ return -1;
+ }
+#ifdef HAVE_MKOSTEMP
+ fd = mkostemp(filename, 0600);
+#else
+#ifdef HAVE_MKSTEMP
+ fd = mkstemp(filename);
+#else
+#error Need mkostemp or mkstemp
+#endif
+#endif
+ if (fd >= 0)
+ unlink(filename);
+ free(filename);
+ return fd;
+}
+
+static retvalue execute_into_file(const char * const argv[], /*@out@*/int *fd_p, int expected_exit_code) {
+ pid_t child, pid;
+ int fd, status;
+
+ fd = create_temporary_file();
+ if (fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating temporary file: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+
+ child = fork();
+ if (child == (pid_t)-1) {
+ int e = errno;
+ fprintf(stderr, "rredtool: Error %d forking: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (child == 0) {
+ int e, i;
+
+ do {
+ i = dup2(fd, 1);
+ e = errno;
+ } while (i < 0 && (e == EINTR || e == EBUSY));
+ if (i < 0) {
+ fprintf(stderr,
+"rredtool: Error %d in dup2(%d, 0): %s\n",
+ e, fd, strerror(e));
+ raise(SIGUSR1);
+ exit(EXIT_FAILURE);
+ }
+ close(fd);
+ closefrom(3);
+ execvp(argv[0], (char * const *)argv);
+ fprintf(stderr, "rredtool: Error %d executing %s: %s\n",
+ e, argv[0], strerror(e));
+ raise(SIGUSR1);
+ exit(EXIT_FAILURE);
+ }
+ do {
+ pid = waitpid(child, &status, 0);
+ } while (pid == (pid_t)-1 && errno == EINTR);
+ if (pid == (pid_t)-1) {
+ int e = errno;
+ fprintf(stderr,
+"rredtool: Error %d waiting for %s child %lu: %s!\n",
+ e, argv[0], (unsigned long)child, strerror(e));
+ (void)close(fd);
+ return RET_ERROR;
+ }
+ if (WIFEXITED(status) && WEXITSTATUS(status) == expected_exit_code) {
+ if (lseek(fd, 0, SEEK_SET) == (off_t)-1) {
+ int e = errno;
+ fprintf(stderr,
+"rredtool: Error %d rewinding temporary file to start: %s!\n",
+ e, strerror(e));
+ (void)close(fd);
+ return RET_ERROR;
+ }
+ *fd_p = fd;
+ return RET_OK;
+ }
+ close(fd);
+ if (WIFEXITED(status)) {
+ fprintf(stderr,
+"rredtool: %s returned with unexpected exit code %d\n",
+ argv[0], (int)(WEXITSTATUS(status)));
+ return RET_ERROR;
+ }
+ if (WIFSIGNALED(status)) {
+ if (WTERMSIG(status) != SIGUSR1)
+ fprintf(stderr, "rredtool: %s killed by signal %d\n",
+ argv[0], (int)(WTERMSIG(status)));
+ return RET_ERROR;
+ }
+ fprintf(stderr, "rredtool: %s child dies mysteriously (status=%d)\n",
+ argv[0], status);
+ return RET_ERROR;
+}
+
+struct old_index_file {
+ struct old_patch {
+ struct old_patch *next, *prev;
+ char *basefilename;
+ /* part until the + in the name */
+ char *nameprefix;
+ struct hash hash;
+ } *first, *last;
+ struct hash hash;
+};
+
+static void old_index_done(/*@only@*/struct old_index_file *o) {
+ while (o->first != NULL) {
+ struct old_patch *p = o->first;
+
+ o->first = p->next;
+ free(p->basefilename);
+ free(p->nameprefix);
+ free(p);
+ }
+ o->last = NULL;
+}
+
+static retvalue make_prefix_uniq(struct old_patch *o) {
+ struct old_patch *p, *last = NULL;
+ const char *lookfor = o->nameprefix;
+
+ /* make the prefix uniq by extending all previous occurrences
+ * of this prefix with an additional +. As this might already
+ * have happened, this has to be possibly repeated */
+
+ while (true) {
+ for (p = o->prev ; p != NULL ; p = p->prev) {
+ if (p == last)
+ continue;
+ if (strcmp(p->nameprefix, lookfor) == 0) {
+ char *h;
+ size_t l = strlen(p->nameprefix);
+
+ h = realloc(p->nameprefix, l+2);
+ if (FAILEDTOALLOC(h))
+ return RET_ERROR_OOM;
+ h[l] = '+' ;
+ h[l+1] = '\0';
+ p->nameprefix = h;
+ lookfor = h;
+ last = p;
+ break;
+ }
+ }
+ if (p == NULL)
+ return RET_OK;
+ }
+}
+
+static inline retvalue parse_old_index(char *p, size_t len, struct old_index_file *oldindex) {
+ char *q, *e = p + len;
+ off_t filesize;
+ struct old_patch *o;
+ retvalue r;
+
+ /* This is only supposed to parse files it wrote itself
+ * (otherwise not having merged patches would most likely break
+ * things in ugly ways), so parsing it can be very strict and easy: */
+
+#define checkorfail(val) if (e - p < (intptr_t)strlen(val) || memcmp(p, val, strlen(val)) != 0) return RET_NOTHING; else { p += strlen(val); }
+
+ checkorfail("SHA1-Current: ");
+ q = strchr(p, '\n');
+ if (q != NULL && q - p > 2 * SHA1_DIGEST_SIZE)
+ q = memchr(p, ' ', q - p);
+ if (q == NULL || q - p != 2 * SHA1_DIGEST_SIZE)
+ return RET_NOTHING;
+ memcpy(oldindex->hash.sha1, p, 2 * SHA1_DIGEST_SIZE);
+ oldindex->hash.sha1[2 * SHA1_DIGEST_SIZE] = '\0';
+ p = q;
+ if (*p == ' ') {
+ p++;
+ filesize = 0;
+ while (*p >= '0' && *p <= '9') {
+ filesize = 10 * filesize + (*p - '0');
+ p++;
+ }
+ oldindex->hash.len = filesize;
+ } else
+ oldindex->hash.len = (off_t)-1;
+ checkorfail("\nSHA1-History:\n");
+ while (*p == ' ') {
+ p++;
+
+ q = p;
+ while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) {
+ p++;
+ }
+ if (p - q != 2 * SHA1_DIGEST_SIZE)
+ return RET_NOTHING;
+
+ o = zNEW(struct old_patch);
+ if (FAILEDTOALLOC(o))
+ return RET_ERROR_OOM;
+ o->prev = oldindex->last;
+ oldindex->last = o;
+ if (o->prev == NULL)
+ oldindex->first = o;
+ else
+ o->prev->next = o;
+
+ memcpy(o->hash.sha1, q, 2 * SHA1_DIGEST_SIZE);
+
+ while (*p == ' ')
+ p++;
+ if (*p < '0' || *p > '9')
+ return RET_NOTHING;
+ filesize = 0;
+ while (*p >= '0' && *p <= '9') {
+ filesize = 10 * filesize + (*p - '0');
+ p++;
+ }
+ o->hash.len = filesize;
+ if (*p != ' ')
+ return RET_NOTHING;
+ p++;
+ q = strchr(p, '\n');
+ if (q == NULL)
+ return RET_NOTHING;
+ o->basefilename = strndup(p, (size_t)(q-p));
+ if (FAILEDTOALLOC(o->basefilename))
+ return RET_ERROR_OOM;
+ p = q + 1;
+ q = strchr(o->basefilename, '+');
+ if (q == NULL)
+ o->nameprefix = mprintf("%s+", o->basefilename);
+ else
+ o->nameprefix = strndup(o->basefilename,
+ 1 + (size_t)(q - o->basefilename));
+ if (FAILEDTOALLOC(o->nameprefix))
+ return RET_ERROR_OOM;
+ r = make_prefix_uniq(o);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* allow pseudo-empty fake patches */
+ if (memcmp(o->hash.sha1, oldindex->hash.sha1,
+ 2 * SHA1_DIGEST_SIZE) == 0)
+ continue;
+ // TODO: verify filename and create prefix...
+ }
+ checkorfail("SHA1-Patches:\n");
+ o = oldindex->first;
+ while (*p == ' ') {
+ p++;
+
+ if (o == NULL)
+ return RET_NOTHING;
+
+ q = p;
+ while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) {
+ p++;
+ }
+ if (p - q != 2 * SHA1_DIGEST_SIZE)
+ return RET_NOTHING;
+
+ while (*p == ' ')
+ p++;
+ if (*p < '0' || *p > '9')
+ return RET_NOTHING;
+ while (*p >= '0' && *p <= '9') {
+ p++;
+ }
+ if (*p != ' ')
+ return RET_NOTHING;
+ p++;
+ q = strchr(p, '\n');
+ if (q == NULL)
+ return RET_NOTHING;
+ if (strncmp(o->basefilename, p, (size_t)(q-p)) != 0
+ || o->basefilename[q-p] != '\0')
+ return RET_NOTHING;
+ p = q + 1;
+ o = o->next;
+ }
+ checkorfail("X-Patch-Precedence: merged\n");
+ if (*p != '\0' || p != e)
+ return RET_NOTHING;
+ // TODO: check for dangerous stuff (like ../ in basename)
+ // TODO: ignore patches where the filename is missing?
+ return RET_OK;
+#undef checkorfail
+}
+
+static retvalue read_old_index(const char *fullfilename, /*@out@*/struct old_index_file *oldindex) {
+ int fd, i;
+ char *buffer;
+ size_t buffersize = 102400, available = 0;
+ ssize_t bytes_read;
+ retvalue r;
+
+ setzero(struct old_index_file, oldindex);
+
+ if (!isregularfile(fullfilename))
+ return RET_NOTHING;
+
+ fd = open(fullfilename, O_RDONLY);
+ if (fd < 0) {
+ int e = errno;
+
+ fprintf(stderr, "rredtool: Error %d opening '%s': %s\n",
+ e, fullfilename, strerror(e));
+ return RET_ERRNO(e);
+ }
+
+ /* index file should not be that big, so read into memory as a whole */
+ buffer = malloc(buffersize);
+ if (FAILEDTOALLOC(buffer)) {
+ close(fd);
+ return RET_ERROR_OOM;
+ }
+ do {
+ bytes_read = read(fd, buffer + available,
+ buffersize - available - 1);
+ if (bytes_read < 0) {
+ int e = errno;
+
+ fprintf(stderr, "rredtool: Error %d reading '%s': %s\n",
+ e, fullfilename, strerror(e));
+ (void)close(fd);
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ assert ((size_t)bytes_read < buffersize - available);
+ available += bytes_read;
+ if (available + 1 >= buffersize) {
+ fprintf(stderr,
+"rredtool: Ridicilous long '%s' file!\n",
+ fullfilename);
+ (void)close(fd);
+ free(buffer);
+ return RET_ERROR;
+ }
+ } while (bytes_read > 0);
+ i = close(fd);
+ if (i != 0) {
+ int e = errno;
+
+ fprintf(stderr, "rredtool: Error %d reading '%s': %s\n",
+ e, fullfilename, strerror(e));
+ free(buffer);
+ return RET_ERRNO(e);
+ }
+ buffer[available] = '\0';
+
+ r = parse_old_index(buffer, available, oldindex);
+ free(buffer);
+ if (r == RET_NOTHING) {
+ /* wrong format, most likely a left over file */
+ fprintf(stderr,
+"rredtool: File '%s' does not look like created by rredtool, ignoring!\n",
+ fullfilename);
+ old_index_done(oldindex);
+ setzero(struct old_index_file, oldindex);
+ return RET_NOTHING;
+ }
+ if (RET_WAS_ERROR(r)) {
+ old_index_done(oldindex);
+ setzero(struct old_index_file, oldindex);
+ return r;
+ }
+ return RET_OK;
+}
+
+struct patch {
+ struct patch *next;
+ char *basefilename;
+ size_t basefilename_len;
+ char *fullfilename;
+ struct hash hash, from;
+};
+
+static void patches_free(struct patch *r) {
+ while (r != NULL) {
+ struct patch *n = r->next;
+
+ free(r->basefilename);
+ if (r->fullfilename != NULL) {
+ (void)unlink(r->fullfilename);
+ free(r->fullfilename);
+ }
+ free(r);
+ r = n;
+ }
+}
+
+static retvalue new_diff_file(struct patch **root_p, const char *directory, const char *relfilename, const char *since, const char date[DATELEN+1], struct modification *r) {
+ struct patch *p;
+ int i, status, fd, pipefds[2], tries = 3;
+ pid_t child, pid;
+ retvalue result;
+ struct fileandhash fh;
+
+ p = zNEW(struct patch);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+
+ if (since == NULL)
+ since = "";
+ p->basefilename = mprintf("%s%s", since, date);
+ if (FAILEDTOALLOC(p->basefilename)) {
+ patches_free(p);
+ return RET_ERROR_OOM;
+ }
+ p->basefilename_len = strlen(p->basefilename);
+ p->fullfilename = mprintf("%s/%s.diff/%s.gz.new",
+ directory, relfilename, p->basefilename);
+ if (FAILEDTOALLOC(p->fullfilename)) {
+ patches_free(p);
+ return RET_ERROR_OOM;
+ }
+ /* create the file */
+ while (tries-- > 0) {
+ int e;
+
+ fd = open(p->fullfilename, O_CREAT|O_EXCL|O_NOCTTY|O_WRONLY, 0666);
+ if (fd >= 0)
+ break;
+ e = errno;
+ if (e == EEXIST && tries > 0)
+ unlink(p->fullfilename);
+ else {
+ fprintf(stderr,
+"rredtool: Error %d creating '%s': %s\n",
+ e, p->fullfilename, strerror(e));
+ return RET_ERROR;
+ }
+ }
+ assert (fd > 0);
+ /* start an child to compress connected via a pipe */
+ i = pipe(pipefds);
+ assert (pipefds[0] > 0);
+ if (i != 0) {
+ int e = errno;
+ fprintf(stderr, "rredtool: Error %d creating pipe: %s\n",
+ e, strerror(e));
+ unlink(p->fullfilename);
+ return RET_ERROR;
+ }
+ child = fork();
+ if (child == (pid_t)-1) {
+ int e = errno;
+ fprintf(stderr, "rredtool: Error %d forking: %s\n",
+ e, strerror(e));
+ unlink(p->fullfilename);
+ return RET_ERROR;
+ }
+ if (child == 0) {
+ int e;
+
+ close(pipefds[1]);
+ do {
+ i = dup2(pipefds[0], 0);
+ e = errno;
+ } while (i < 0 && (e == EINTR || e == EBUSY));
+ if (i < 0) {
+ fprintf(stderr,
+"rredtool: Error %d in dup2(%d, 0): %s\n",
+ e, pipefds[0], strerror(e));
+ raise(SIGUSR1);
+ exit(EXIT_FAILURE);
+ }
+ do {
+ i = dup2(fd, 1);
+ e = errno;
+ } while (i < 0 && (e == EINTR || e == EBUSY));
+ if (i < 0) {
+ fprintf(stderr,
+"rredtool: Error %d in dup2(%d, 0): %s\n",
+ e, fd, strerror(e));
+ raise(SIGUSR1);
+ exit(EXIT_FAILURE);
+ }
+ close(pipefds[0]);
+ close(fd);
+ closefrom(3);
+ execlp("gzip", "gzip", "-9", (char*)NULL);
+ fprintf(stderr, "rredtool: Error %d executing gzip: %s\n",
+ e, strerror(e));
+ raise(SIGUSR1);
+ exit(EXIT_FAILURE);
+ }
+ close(pipefds[0]);
+ close(fd);
+ /* send the data to the child */
+ fh.f = fdopen(pipefds[1], "w");
+ if (fh.f == NULL) {
+ int e = errno;
+ fprintf(stderr,
+"rredtool: Error %d fdopen'ing write end of pipe to compressor: %s\n",
+ e, strerror(e));
+ close(pipefds[1]);
+ unlink(p->fullfilename);
+ patches_free(p);
+ kill(child, SIGTERM);
+ waitpid(child, NULL, 0);
+ return RET_ERROR;
+ }
+ SHA1Init(&fh.context);
+ fh.len = 0;
+ modification_printaspatch(&fh, r, hash_and_write);
+ result = RET_OK;
+ i = ferror(fh.f);
+ if (i != 0) {
+ fprintf(stderr, "rredtool: Error sending data to gzip!\n");
+ (void)fclose(fh.f);
+ result = RET_ERROR;
+ } else {
+ i = fclose(fh.f);
+ if (i != 0) {
+ int e = errno;
+ fprintf(stderr,
+"rredtool: Error %d sending data to gzip: %s!\n",
+ e, strerror(e));
+ result = RET_ERROR;
+ }
+ }
+ do {
+ pid = waitpid(child, &status, 0);
+ } while (pid == (pid_t)-1 && errno == EINTR);
+ if (pid == (pid_t)-1) {
+ int e = errno;
+ fprintf(stderr,
+"rredtool: Error %d waiting for gzip child %lu: %s!\n",
+ e, (unsigned long)child, strerror(e));
+ return RET_ERROR;
+ }
+ if (WIFEXITED(status) && WEXITSTATUS(status) == 0) {
+ if (RET_IS_OK(result)) {
+ finalize_sha1(&fh.context, fh.len, &p->hash);
+ p->next = *root_p;
+ *root_p = p;
+ }
+ return result;
+ }
+ unlink(p->fullfilename);
+ patches_free(p);
+ if (WIFEXITED(status)) {
+ fprintf(stderr,
+"rredtool: gzip returned with non-zero exit code %d\n",
+ (int)(WEXITSTATUS(status)));
+ return RET_ERROR;
+ }
+ if (WIFSIGNALED(status)) {
+ fprintf(stderr, "rredtool: gzip killed by signal %d\n",
+ (int)(WTERMSIG(status)));
+ return RET_ERROR;
+ }
+ fprintf(stderr, "rredtool: gzip child dies mysteriously (status=%d)\n",
+ status);
+ return RET_ERROR;
+}
+
+static retvalue write_new_index(const char *newindexfilename, const struct hash *newhash, const struct patch *root) {
+ int tries, fd, i;
+ const struct patch *p;
+
+ tries = 2;
+ while (tries > 0) {
+ errno = 0;
+ fd = open(newindexfilename,
+ O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666);
+ if (fd >= 0)
+ break;
+ if (errno == EINTR)
+ continue;
+ tries--;
+ if (errno != EEXIST)
+ break;
+ unlink(newindexfilename);
+ }
+ if (fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d creating '%s': %s\n",
+ e, newindexfilename, strerror(e));
+ return RET_ERROR;
+ }
+ i = dprintf(fd, "SHA1-Current: %s %lld\n" "SHA1-History:\n",
+ newhash->sha1, (long long)newhash->len);
+ for (p = root ; i >= 0 && p != NULL ; p = p->next) {
+ i = dprintf(fd, " %s %7ld %s\n",
+ p->from.sha1, (long int)p->from.len,
+ p->basefilename);
+ }
+ if (i >= 0)
+ i = dprintf(fd, "SHA1-Patches:\n");
+ for (p = root ; i >= 0 && p != NULL ; p = p->next) {
+ i = dprintf(fd, " %s %7ld %s\n",
+ p->hash.sha1, (long int)p->hash.len,
+ p->basefilename);
+ }
+ if (i >= 0)
+ i = dprintf(fd, "X-Patch-Precedence: merged\n");
+ if (i >= 0) {
+ i = close(fd);
+ fd = -1;
+ }
+ if (i < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d writing to '%s': %s\n",
+ e, newindexfilename, strerror(e));
+ if (fd >= 0)
+ (void)close(fd);
+ unlink(newindexfilename);
+ return RET_ERRNO(e);
+ }
+ return RET_OK;
+}
+
+static void remove_old_diffs(const char *relfilename, const char *diffdirectory, const char *indexfilename, const struct patch *keep) {
+ struct dirent *de;
+ DIR *dir;
+ const struct patch *p;
+
+ if (!isdirectory(diffdirectory))
+ return;
+
+ dir = opendir(diffdirectory);
+ if (dir == NULL)
+ return;
+
+ while ((de = readdir(dir)) != NULL) {
+ size_t len = strlen(de->d_name);
+
+ /* special rule for that */
+ if (len == 5 && strcmp(de->d_name, "Index") == 0)
+ continue;
+
+ /* if it does not end with .gz or .gz.new, ignore */
+ if (len >= 4 && memcmp(de->d_name + len - 4, ".new", 4) == 0)
+ len -= 4;
+ if (len < 3)
+ continue;
+ if (memcmp(de->d_name + len - 3, ".gz", 3) != 0)
+ continue;
+ len -= 3;
+
+ /* do not mark files to be deleted we still need: */
+ for (p = keep ; p != NULL ; p = p->next) {
+ if (p->basefilename_len != len)
+ continue;
+ if (memcmp(p->basefilename, de->d_name, len) == 0)
+ break;
+ }
+ if (p != NULL)
+ continue;
+ /* otherwise, tell reprepro this file is no longer needed: */
+ dprintf(3, "%s.diff/%s.tobedeleted\n",
+ relfilename,
+ de->d_name);
+ }
+ closedir(dir);
+ if (isregularfile(indexfilename) && keep == NULL)
+ dprintf(3, "%s.diff/Index.tobedeleted\n",
+ relfilename);
+}
+
+static retvalue ed_diff(const char *oldfullfilename, const char *newfullfilename, /*@out@*/struct rred_patch **rred_p) {
+ const char *argv[6];
+ int fd;
+ retvalue r;
+
+ argv[0] = "diff";
+ argv[1] = "--ed";
+ argv[2] = "--minimal";
+ argv[3] = oldfullfilename;
+ argv[4] = newfullfilename;
+ argv[5] = NULL;
+
+ r = execute_into_file(argv, &fd, 1);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ return patch_loadfd("<temporary file>", fd, -1, rred_p);
+}
+
+static retvalue read_old_patch(const char *directory, const char *relfilename, const struct old_patch *o, /*@out@*/struct rred_patch **rred_p) {
+ retvalue r;
+ const char *args[4];
+ char *filename;
+ int fd;
+
+ filename = mprintf("%s/%s.diff/%s.gz",
+ directory, relfilename, o->basefilename);
+
+ if (!isregularfile(filename))
+ return RET_NOTHING;
+ args[0] = "gunzip";
+ args[1] = "-c";
+ args[2] = filename;
+ args[3] = NULL;
+
+ r = execute_into_file(args, &fd, 0);
+ free(filename);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ return patch_loadfd("<temporary file>", fd, -1, rred_p);
+}
+
+static retvalue handle_diff(const char *directory, const char *mode, const char *relfilename, const char *fullfilename, const char *fullnewfilename, const char *diffdirectory, const char *indexfilename, const char *newindexfilename) {
+ retvalue r;
+ int patch_count;
+ struct hash oldhash, newhash;
+ char date[DATELEN + 1];
+ struct patch *p, *root = NULL;
+ enum {mode_OLD, mode_NEW, mode_CHANGE} m;
+ struct rred_patch *new_rred_patch;
+ struct modification *new_modifications;
+ struct old_index_file old_index;
+ struct old_patch *o;
+#if defined(APT_545694_WORKAROUND) || defined(APT_545699_WORKAROUND)
+ char *line;
+ struct modification *newdup;
+#endif
+
+ if (strcmp(mode, "new") == 0)
+ m = mode_NEW;
+ else if (strcmp(mode, "old") == 0)
+ m = mode_OLD;
+ else if (strcmp(mode, "change") == 0)
+ m = mode_CHANGE;
+ else {
+ usage(stderr);
+ fprintf(stderr,
+"Error: 4th argument to rredtool in .diff maintenance mode must be 'new', 'old' or 'change'!\n");
+ return RET_ERROR;
+ }
+
+ if (m == mode_NEW) {
+ /* There is no old file, nothing to do.
+ * except checking for old diff files
+ * and marking them to be deleted */
+ remove_old_diffs(relfilename, diffdirectory,
+ indexfilename, NULL);
+ return RET_OK;
+ }
+
+ r = get_date_string(date, sizeof(date));
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ assert (m == mode_OLD || m == mode_CHANGE);
+
+ /* calculate sha1 checksum of old file */
+ r = gen_sha1sum(fullfilename, &oldhash);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "rredtool: expected file '%s' is missing!\n",
+ fullfilename);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (m == mode_CHANGE) {
+ /* calculate sha1 checksum of the new file */
+ r = gen_sha1sum(fullnewfilename, &newhash);
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"rredtool: expected file '%s' is missing!\n",
+ fullnewfilename);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* if new == old, nothing to do */
+ if (newhash.len == oldhash.len &&
+ strcmp(newhash.sha1, oldhash.sha1) == 0) {
+ m = mode_OLD;
+ }
+ }
+
+ if (oldhash.len == 0 || (m == mode_CHANGE && newhash.len == 0)) {
+ /* Old or new file empty. treat as mode_NEW.
+ * (checked here instead of letting later
+ * more general optimisations catch this as
+ * this garantees there are enough lines to
+ * make patches longer to work around apt bugs,
+ * and because no need to parse Index if we want to delete
+ * it anyway) */
+ remove_old_diffs(relfilename, diffdirectory,
+ indexfilename, NULL);
+ return RET_OK;
+ }
+
+ r = read_old_index(indexfilename, &old_index);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* ignore old Index file if it does not match the old file */
+ if (old_index.hash.len != (off_t)-1 && old_index.hash.len != oldhash.len) {
+ old_index_done(&old_index);
+ memset(&old_index, 0, sizeof(old_index));
+ }
+ if (memcmp(old_index.hash.sha1, oldhash.sha1, 2*SHA1_DIGEST_SIZE) != 0) {
+ old_index_done(&old_index);
+ memset(&old_index, 0, sizeof(old_index));
+ }
+
+ if (m == mode_OLD) {
+ /* this index file did not change.
+ * keep old or delete if not current */
+ if (old_index.hash.sha1[0] != '\0') {
+ for (o = old_index.first ; o != NULL ; o = o->next)
+ dprintf(3, "%s.diff/%s.gz.keep\n",
+ relfilename, o->basefilename);
+ dprintf(3, "%s.diff/Index\n", relfilename);
+ } else {
+ remove_old_diffs(relfilename, diffdirectory,
+ indexfilename, NULL);
+ }
+ old_index_done(&old_index);
+ return RET_OK;
+ }
+ assert (m == mode_CHANGE);
+
+ mkdir(diffdirectory, 0777);
+
+#ifdef APT_545699_WORKAROUND
+ /* create a fake diff to work around http://bugs.debian.org/545699 */
+ newdup = NULL;
+ r = modification_addstuff(fullnewfilename, &newdup, &line);
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(newdup);
+ old_index_done(&old_index);
+ return r;
+ }
+ /* save this compressed and store it's sha1sum */
+ r = new_diff_file(&root, directory, relfilename, "aptbug545699+", date,
+ newdup);
+ modification_freelist(newdup);
+ free(line);
+ if (RET_WAS_ERROR(r)) {
+ old_index_done(&old_index);
+ return r;
+ }
+ root->from = newhash;
+#endif
+
+ /* create new diff calling diff --ed */
+ r = ed_diff(fullfilename, fullnewfilename, &new_rred_patch);
+ if (RET_WAS_ERROR(r)) {
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+
+ new_modifications = patch_getmodifications(new_rred_patch);
+ assert (new_modifications != NULL);
+
+#ifdef APT_545694_WORKAROUND
+ newdup = modification_dup(new_modifications);
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+ r = modification_addstuff(fullnewfilename, &newdup, &line);
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(newdup);
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+#endif
+
+ /* save this compressed and store it's sha1sum */
+ r = new_diff_file(&root, directory, relfilename, NULL, date,
+#ifdef APT_545694_WORKAROUND
+ newdup);
+ modification_freelist(newdup);
+ free(line);
+#else
+ new_modifications);
+#endif
+ // TODO: special handling of misparsing to cope with that better?
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+ root->from = oldhash;
+
+ /* if the diff is bigger than the new file,
+ * there is no point in not getting the full file.
+ * And as in all but extremely strange situations this
+ * also means all older patches will get bigger when merged,
+ * do not even bother to calculate them but remove all. */
+ if (root->hash.len > newhash.len) {
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ remove_old_diffs(relfilename, diffdirectory,
+ indexfilename, NULL);
+ return RET_OK;
+ }
+
+ patch_count = 1;
+ /* merge this into the old patches */
+ for (o = old_index.last ; o != NULL ; o = o->prev) {
+ struct rred_patch *old_rred_patch;
+ struct modification *d, *merged;
+
+ /* ignore old and new hash, to filter out old
+ * pseudo-empty patches and to reduce the number
+ * of patches in case the file is reverted to an
+ * earlier state */
+ if (memcmp(o->hash.sha1, old_index.hash.sha1,
+ sizeof(old_index.hash.sha1)) == 0)
+ continue;
+ if (memcmp(o->hash.sha1, newhash.sha1,
+ sizeof(newhash.sha1)) == 0)
+ continue;
+ /* limit number of patches
+ * (Index needs to be downloaded, too) */
+
+ if (patch_count >= max_patch_count)
+ continue;
+
+ /* empty files only make problems.
+ * If you have a non-empty file with the sha1sum of an empty
+ * one: Congratulations */
+ if (strcmp(o->hash.sha1,
+ "da39a3ee5e6b4b0d3255bfef95601890afd80709") == 0)
+ continue;
+
+ r = read_old_patch(directory, relfilename, o, &old_rred_patch);
+ if (r == RET_NOTHING)
+ continue;
+ // TODO: special handling of misparsing to cope with that better?
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+
+ d = modification_dup(new_modifications);
+ if (RET_WAS_ERROR(r)) {
+ patch_free(old_rred_patch);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+ r = combine_patches(&merged,
+ patch_getmodifications(old_rred_patch), d);
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(new_modifications);
+ patch_free(old_rred_patch);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+ if (merged == NULL) {
+ /* this should never happen as the sha1sum should
+ * already be the same, but better safe than sorry */
+ patch_free(old_rred_patch);
+ continue;
+ }
+#ifdef APT_545694_WORKAROUND
+ r = modification_addstuff(fullnewfilename, &merged, &line);
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(merged);
+ patch_free(old_rred_patch);
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+#endif
+ r = new_diff_file(&root, directory, relfilename,
+ o->nameprefix, date, merged);
+ modification_freelist(merged);
+#ifdef APT_545694_WORKAROUND
+ free(line);
+#endif
+ patch_free(old_rred_patch);
+ if (RET_WAS_ERROR(r)) {
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+ patches_free(root);
+ return r;
+ }
+ root->from = o->hash;
+
+ /* remove patches that are bigger than the new file */
+ if (root->hash.len >= newhash.len) {
+ struct patch *n;
+
+ n = root;
+ root = n->next;
+ n->next = NULL;
+ patches_free(n);
+ }
+ patch_count++;
+ }
+
+ modification_freelist(new_modifications);
+ patch_free(new_rred_patch);
+ old_index_done(&old_index);
+
+ assert (root != NULL);
+#ifdef APT_545699_WORKAROUND
+ assert (root->next != NULL);
+#endif
+
+ /* write new Index file */
+ r = write_new_index(newindexfilename, &newhash, root);
+ if (RET_WAS_ERROR(r)) {
+ patches_free(root);
+ return r;
+ }
+
+ /* tell reprepro to remove all no longer needed files */
+ remove_old_diffs(relfilename, diffdirectory, indexfilename, root);
+
+ /* tell reprepro to move those files to their final place
+ * and include the Index in the Release file */
+
+ for (p = root ; p != NULL ; p = p->next) {
+ /* the trailing . means add but do not put in Release */
+ dprintf(3, "%s.diff/%s.gz.new.\n",
+ relfilename, p->basefilename);
+ /* no longer delete: */
+ free(p->fullfilename);
+ p->fullfilename = NULL;
+ }
+ dprintf(3, "%s.diff/Index.new\n", relfilename);
+ patches_free(root);
+ return RET_OK;
+}
+
+static retvalue handle_diff_dir(const char *args[4]) {
+ const char *directory = args[0];
+ const char *mode = args[3];
+ const char *relfilename = args[2];
+ const char *relnewfilename = args[1];
+ char *fullfilename, *fullnewfilename;
+ char *diffdirectory;
+ char *indexfilename;
+ char *newindexfilename;
+ retvalue r;
+
+ fullfilename = mprintf("%s/%s", directory, relfilename);
+ fullnewfilename = mprintf("%s/%s", directory, relnewfilename);
+ if (FAILEDTOALLOC(fullfilename) || FAILEDTOALLOC(fullnewfilename)) {
+ free(fullfilename);
+ free(fullnewfilename);
+ return RET_ERROR_OOM;
+ }
+ diffdirectory = mprintf("%s.diff", fullfilename);
+ indexfilename = mprintf("%s.diff/Index", fullfilename);
+ newindexfilename = mprintf("%s.diff/Index.new", fullfilename);
+ if (FAILEDTOALLOC(diffdirectory) || FAILEDTOALLOC(indexfilename)
+ || FAILEDTOALLOC(newindexfilename)) {
+ free(diffdirectory);
+ free(indexfilename);
+ free(newindexfilename);
+ free(fullfilename);
+ free(fullnewfilename);
+ return RET_ERROR_OOM;
+ }
+ r = handle_diff(directory, mode, relfilename,
+ fullfilename, fullnewfilename, diffdirectory,
+ indexfilename, newindexfilename);
+ free(diffdirectory);
+ free(indexfilename);
+ free(newindexfilename);
+ free(fullfilename);
+ free(fullnewfilename);
+ return r;
+}
+
+static void write_to_file(const void *data, size_t len, void *to) {
+ FILE *f = to;
+ fwrite(data, len, 1, f);
+}
+
+int main(int argc, const char *argv[]) {
+ struct rred_patch *patches[argc];
+ struct modification *m;
+ retvalue r;
+ bool mergemode = false;
+ bool patchmode = false;
+ bool repreprohook = false;
+ int i, count;
+ const char *sourcename;
+ int debug = 0;
+
+ while ((i = getopt_long(argc, (char**)argv, "+hVDmpR", options, NULL)) != -1) {
+ switch (i) {
+ case 'h':
+ usage(stdout);
+ return EXIT_SUCCESS;
+ case 'V':
+ printf(
+"rred-tool from " PACKAGE_NAME " version " PACKAGE_VERSION);
+ return EXIT_SUCCESS;
+ case 'D':
+ debug++;
+ break;
+ case 'm':
+ mergemode = 1;
+ break;
+ case 'p':
+ patchmode = 1;
+ break;
+ case 'N':
+ max_patch_count = atoi(optarg);
+ break;
+ case 'R':
+ repreprohook = 1;
+ break;
+ case '?':
+ default:
+ return EXIT_FAILURE;
+
+ }
+ }
+
+ if (repreprohook && mergemode) {
+ fprintf(stderr,
+"Cannot do --reprepro-hook and --merge at the same time!\n");
+ return EXIT_FAILURE;
+ }
+ if (repreprohook && patchmode) {
+ fprintf(stderr,
+"Cannot do --reprepro-hook and --patch at the same time!\n");
+ return EXIT_FAILURE;
+ }
+
+ if (repreprohook || (!mergemode && !patchmode)) {
+ if (optind + 4 != argc) {
+ usage(stderr);
+ return EXIT_FAILURE;
+ }
+ r = handle_diff_dir(argv + optind);
+ if (r == RET_ERROR_OOM) {
+ fputs("Out of memory!\n", stderr);
+ }
+ if (RET_WAS_ERROR(r))
+ return EXIT_FAILURE;
+ return EXIT_SUCCESS;
+ }
+
+ i = optind;
+ if (!mergemode) {
+ if (i >= argc) {
+ fprintf(stderr, "Not enough arguments!\n");
+ return EXIT_FAILURE;
+ }
+ sourcename = argv[i++];
+ } else {
+ SETBUTNOTUSED( sourcename = NULL; )
+ }
+ if (mergemode && patchmode) {
+ fprintf(stderr,
+"Cannot do --merge and --patch at the same time!\n");
+ return EXIT_FAILURE;
+ }
+
+ count = 0;
+ while (i < argc) {
+ r = patch_load(argv[i], -1, &patches[count]);
+ if (RET_IS_OK(r))
+ count++;
+ if (RET_WAS_ERROR(r)) {
+ if (r == RET_ERROR_OOM)
+ fputs("Out of memory!\n", stderr);
+ else
+ fputs("Aborting...\n", stderr);
+ return EXIT_FAILURE;
+ }
+ i++;
+ }
+ if (count <= 0) {
+ fprintf(stderr, "Not enough patches for operation...\n");
+ return EXIT_FAILURE;
+ }
+ m = patch_getmodifications(patches[0]);
+ for (i = 1; i < count ; i++) {
+ struct modification *a = patch_getmodifications(patches[i]);
+ if (debug) {
+ fputs("--------RESULT SO FAR--------\n", stderr);
+ modification_printaspatch(stderr, m, write_to_file);
+ fputs("--------TO BE MERGED WITH-----\n", stderr);
+ modification_printaspatch(stderr, a, write_to_file);
+ fputs("-------------END--------------\n", stderr);
+ }
+ r = combine_patches(&m, m, a);
+ if (RET_WAS_ERROR(r)) {
+ for (i = 0 ; i < count ; i++) {
+ patch_free(patches[i]);
+ }
+ if (r == RET_ERROR_OOM)
+ fputs("Out of memory!\n", stderr);
+ else
+ fputs("Aborting...\n", stderr);
+ return EXIT_FAILURE;
+ }
+ }
+ r = RET_OK;
+ if (mergemode) {
+ modification_printaspatch(stdout, m, write_to_file);
+ } else {
+ r = patch_file(stdout, sourcename, m);
+ }
+ if (ferror(stdout)) {
+ fputs("Error writing to stdout!\n", stderr);
+ r = RET_ERROR;
+ }
+ modification_freelist(m);
+ for (i = 0 ; i < count ; i++)
+ patch_free(patches[i]);
+ if (r == RET_ERROR_OOM)
+ fputs("Out of memory!\n", stderr);
+ if (RET_WAS_ERROR(r))
+ return EXIT_FAILURE;
+ return EXIT_SUCCESS;
+}
diff --git a/sha1.c b/sha1.c
new file mode 100644
index 0000000..dcc7c43
--- /dev/null
+++ b/sha1.c
@@ -0,0 +1,201 @@
+/*
+SHA-1 in C
+By Steve Reid <sreid@sea-to-sky.net>
+100% Public Domain
+
+-----------------
+Modified 7/98
+By James H. Brown <jbrown@burgoyne.com>
+Still 100% Public Domain
+
+[changes omitted as reverted]
+-----------------
+Modified 8/98
+By Steve Reid <sreid@sea-to-sky.net>
+Still 100% public domain
+
+1- Removed #include <process.h> and used return() instead of exit()
+2- Fixed overwriting of finalcount in SHA1Final() (discovered by Chris Hall)
+3- Changed email address from steve@edmweb.com to sreid@sea-to-sky.net
+
+-----------------
+Modified 4/01
+By Saul Kravitz <Saul.Kravitz@celera.com>
+Still 100% PD
+Modified to run on Compaq Alpha hardware.
+
+-----------------
+Modified 07/2002
+By Ralph Giles <giles@ghostscript.com>
+Still 100% public domain
+modified for use with stdint types, autoconf
+code cleanup, removed attribution comments
+switched SHA1Final() argument order for consistency
+use SHA1_ prefix for public api
+move public api to sha1.h
+
+------------------------
+Modified 11/2007
+by Bernhard R. Link <brlink@debian.org>
+Still 100% public domain:
+Removed everything not related to hash itself,
+removed wiping of temp data (as not needed for public data)
+multiple modifications to make it more what I consider readable.
+using endian.h now.
+multiple more modifications...
+
+Modified 06/2008
+by Bernhard R. Link <brlink@debian.org>
+Still 100% public domain:
+use WORDS_BIGENDIAN instead of endian.h
+*/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "sha1.h"
+
+static void SHA1_Transform(uint32_t state[5], const uint8_t buffer[64]);
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+#define blk(i) (block[i&15] = rol(block[(i+13)&15]^block[(i+8)&15] \
+ ^block[(i+2)&15]^block[i&15],1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+block[i]+0x5A827999+rol(v,5);w=rol(w,30);
+#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
+#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
+#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+void SHA1_Transform(uint32_t state[5], const uint8_t buffer[64])
+{
+ uint32_t a, b, c, d, e;
+ uint32_t block[16];
+#ifndef WORDS_BIGENDIAN
+ int i;
+#endif
+
+ assert (sizeof(block) == 64*sizeof(uint8_t));
+#ifdef WORDS_BIGENDIAN
+ memcpy(block, buffer, sizeof(block));
+#else
+ for (i = 0 ; i < 16 ; i++) {
+ block[i] = (buffer[4*i]<<24) | (buffer[4*i+1]<<16) |
+ (buffer[4*i+2]<<8) | buffer[4*i+3];
+ }
+#endif
+
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+ R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+ R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+ R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+ R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+ R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+ R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+ R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+ R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+ R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+ R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+ R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+ R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+ R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+ R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+ R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+ R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+ R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+ R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+ R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+}
+
+
+/* SHA1Init - Initialize new context */
+void SHA1Init(struct SHA1_Context *context)
+{
+ /* SHA1 initialization constants */
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xEFCDAB89;
+ context->state[2] = 0x98BADCFE;
+ context->state[3] = 0x10325476;
+ context->state[4] = 0xC3D2E1F0;
+ context->count = 0;
+}
+
+
+/* Run your data through this. */
+void SHA1Update(struct SHA1_Context *context, const uint8_t* data, const size_t len)
+{
+ size_t i, j;
+
+ j = context->count & 63;
+ context->count += len;
+ if (j == 0) {
+ for (i = 0 ; len >= i + 64 ; i += 64) {
+ SHA1_Transform(context->state, data + i);
+ }
+ j = 0;
+ } else if ((j + len) >= 64) {
+ memcpy(&context->buffer[j], data, (i = 64-j));
+ SHA1_Transform(context->state, context->buffer);
+ for (; len >= i + 64 ; i += 64) {
+ SHA1_Transform(context->state, data + i);
+ }
+ j = 0;
+ }
+ else i = 0;
+ memcpy(&context->buffer[j], &data[i], len - i);
+}
+
+
+/* Add padding and return the message digest. */
+void SHA1Final(struct SHA1_Context *context, uint8_t digest[SHA1_DIGEST_SIZE])
+{
+ unsigned char i;
+ int j;
+ uint64_t bitcount;
+
+ bitcount = context->count << 3;
+ i = context->count & 63;
+ context->buffer[i] = '\200';
+ i++;
+ if (i > 56) {
+ if (i < 64)
+ memset(context->buffer + i, 0, 64-i);
+ SHA1_Transform(context->state, context->buffer);
+ i = 0;
+ }
+ if (i < 56) {
+ memset(context->buffer + i, 0, 56-i);
+ }
+ for (j = 7; j >= 0; j--) {
+ context->buffer[56 + j] = bitcount & 0xFF;
+ bitcount >>= 8;
+ }
+ SHA1_Transform(context->state, context->buffer);
+ for (i = 0; i < SHA1_DIGEST_SIZE; i++) {
+ digest[i] = (uint8_t)
+ ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+ }
+}
diff --git a/sha1.h b/sha1.h
new file mode 100644
index 0000000..35fe4bb
--- /dev/null
+++ b/sha1.h
@@ -0,0 +1,15 @@
+#ifndef REPREPRO_SHA1_H
+#define REPREPRO_SHA1_H
+
+struct SHA1_Context {
+ uint32_t state[5];
+ uint64_t count;
+ uint8_t buffer[64];
+};
+#define SHA1_DIGEST_SIZE 20
+
+void SHA1Init(/*@out@*/struct SHA1_Context *context);
+void SHA1Update(struct SHA1_Context *context, const uint8_t *data, const size_t len);
+void SHA1Final(struct SHA1_Context *context, /*@out@*/uint8_t digest[SHA1_DIGEST_SIZE]);
+
+#endif
diff --git a/sha256.c b/sha256.c
new file mode 100644
index 0000000..fe7eca6
--- /dev/null
+++ b/sha256.c
@@ -0,0 +1,274 @@
+/* sha256 implementation, taken (with minor modification) from sha256crypt.c,
+ which states:
+ Released into the Public Domain by Ulrich Drepper <drepper@redhat.com>.
+ Neglegible modifications by Bernhard R. Link, also in the public domain.
+*/
+
+#include <config.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include "sha256.h"
+
+#ifndef WORDS_BIGENDIAN
+# define SWAP(n) \
+ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24))
+#else
+# define SWAP(n) (n)
+#endif
+
+
+/* This array contains the bytes used to pad the buffer to the next
+ 64-byte boundary. (FIPS 180-2:5.1.1) */
+static const unsigned char fillbuf[64] = { 0x80, 0 /* , 0, 0, ... */ };
+
+
+/* Constants for SHA256 from FIPS 180-2:4.2.2. */
+static const uint32_t K[64] =
+ {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+ };
+
+
+/* Process LEN bytes of BUFFER, accumulating context into CTX.
+ It is assumed that LEN % 64 == 0. */
+static void
+sha256_process_block (const void *buffer, size_t len, struct SHA256_Context *ctx)
+{
+ const uint32_t *words = buffer;
+ size_t nwords = len / sizeof (uint32_t);
+ uint32_t a = ctx->H[0];
+ uint32_t b = ctx->H[1];
+ uint32_t c = ctx->H[2];
+ uint32_t d = ctx->H[3];
+ uint32_t e = ctx->H[4];
+ uint32_t f = ctx->H[5];
+ uint32_t g = ctx->H[6];
+ uint32_t h = ctx->H[7];
+
+ /* First increment the byte count. FIPS 180-2 specifies the possible
+ length of the file up to 2^64 bits. Here we only compute the
+ number of bytes. */
+ ctx->total += len;
+
+ /* Process all bytes in the buffer with 64 bytes in each round of
+ the loop. */
+ while (nwords > 0)
+ {
+ uint32_t W[64];
+ uint32_t a_save = a;
+ uint32_t b_save = b;
+ uint32_t c_save = c;
+ uint32_t d_save = d;
+ uint32_t e_save = e;
+ uint32_t f_save = f;
+ uint32_t g_save = g;
+ uint32_t h_save = h;
+
+ /* Operators defined in FIPS 180-2:4.1.2. */
+#define Ch(x, y, z) ((x & y) ^ (~x & z))
+#define Maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
+#define S0(x) (CYCLIC (x, 2) ^ CYCLIC (x, 13) ^ CYCLIC (x, 22))
+#define S1(x) (CYCLIC (x, 6) ^ CYCLIC (x, 11) ^ CYCLIC (x, 25))
+#define R0(x) (CYCLIC (x, 7) ^ CYCLIC (x, 18) ^ (x >> 3))
+#define R1(x) (CYCLIC (x, 17) ^ CYCLIC (x, 19) ^ (x >> 10))
+
+ /* It is unfortunate that C does not provide an operator for
+ cyclic rotation. Hope the C compiler is smart enough. */
+#define CYCLIC(w, s) ((w >> s) | (w << (32 - s)))
+
+ /* Compute the message schedule according to FIPS 180-2:6.2.2 step 2. */
+ for (unsigned int t = 0; t < 16; ++t)
+ {
+ W[t] = SWAP (*words);
+ ++words;
+ }
+ for (unsigned int t = 16; t < 64; ++t)
+ W[t] = R1 (W[t - 2]) + W[t - 7] + R0 (W[t - 15]) + W[t - 16];
+
+ /* The actual computation according to FIPS 180-2:6.2.2 step 3. */
+ for (unsigned int t = 0; t < 64; ++t)
+ {
+ uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
+ uint32_t T2 = S0 (a) + Maj (a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+ }
+
+ /* Add the starting values of the context according to FIPS 180-2:6.2.2
+ step 4. */
+ a += a_save;
+ b += b_save;
+ c += c_save;
+ d += d_save;
+ e += e_save;
+ f += f_save;
+ g += g_save;
+ h += h_save;
+
+ /* Prepare for the next round. */
+ nwords -= 16;
+ }
+
+ /* Put checksum in context given as argument. */
+ ctx->H[0] = a;
+ ctx->H[1] = b;
+ ctx->H[2] = c;
+ ctx->H[3] = d;
+ ctx->H[4] = e;
+ ctx->H[5] = f;
+ ctx->H[6] = g;
+ ctx->H[7] = h;
+}
+
+
+/* Initialize structure containing state of computation.
+ (FIPS 180-2:5.3.2) */
+void
+SHA256Init(struct SHA256_Context *ctx)
+{
+ ctx->H[0] = 0x6a09e667;
+ ctx->H[1] = 0xbb67ae85;
+ ctx->H[2] = 0x3c6ef372;
+ ctx->H[3] = 0xa54ff53a;
+ ctx->H[4] = 0x510e527f;
+ ctx->H[5] = 0x9b05688c;
+ ctx->H[6] = 0x1f83d9ab;
+ ctx->H[7] = 0x5be0cd19;
+
+ ctx->total = 0;
+ ctx->buflen = 0;
+}
+
+
+/* Process the remaining bytes in the internal buffer and the usual
+ prolog according to the standard and write the result to digest.
+ */
+void
+SHA256Final(struct SHA256_Context *ctx, uint8_t *digest)
+{
+ /* Take yet unprocessed bytes into account. */
+ uint32_t bytes = ctx->buflen;
+ uint32_t bitslow, bitshigh;
+ size_t pad;
+ int i;
+
+ /* Now count remaining bytes. */
+ ctx->total += bytes;
+
+ pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
+ memcpy (&ctx->buffer[bytes], fillbuf, pad);
+
+ /* Put the 64-bit file length in *bits* at the end of the buffer. */
+ bitslow = ctx->total << 3;
+ bitshigh = ctx->total >> 29;
+ bitslow = SWAP(bitslow);
+ memcpy(ctx->buffer + bytes + pad + 4, &bitslow, 4);
+ bitshigh = SWAP(bitshigh);
+ memcpy(ctx->buffer + bytes + pad, &bitshigh, 4);
+
+ /* Process last bytes. */
+ sha256_process_block (ctx->buffer, bytes + pad + 8, ctx);
+
+ for (i = 0; i < SHA256_DIGEST_SIZE; i++) {
+ digest[i] = (uint8_t)
+ ((ctx->H[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+ }
+}
+
+
+void
+SHA256Update(struct SHA256_Context *ctx, const uint8_t *buffer, size_t len)
+{
+ /* When we already have some bits in our internal buffer concatenate
+ both inputs first. */
+ if (ctx->buflen != 0)
+ {
+ size_t left_over = ctx->buflen;
+ size_t add = 128 - left_over > len ? len : 128 - left_over;
+
+ memcpy (&ctx->buffer[left_over], buffer, add);
+ ctx->buflen += add;
+
+ if (ctx->buflen > 64)
+ {
+ sha256_process_block (ctx->buffer, ctx->buflen & ~63, ctx);
+
+ ctx->buflen &= 63;
+ /* The regions in the following copy operation cannot overlap. */
+ memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63],
+ ctx->buflen);
+ }
+
+ buffer = buffer + add;
+ len -= add;
+ }
+
+ /* Process available complete blocks. */
+ if (len >= 64)
+ {
+/* To check alignment gcc has an appropriate operator. Other
+ compilers don't. */
+#if __GNUC__ >= 2
+# define UNALIGNED_P(p) (((uintptr_t) p) % __alignof__ (uint32_t) != 0)
+#else
+# define UNALIGNED_P(p) (((uintptr_t) p) % sizeof (uint32_t) != 0)
+#endif
+ if (UNALIGNED_P (buffer))
+ while (len > 64)
+ {
+ sha256_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx);
+ buffer = buffer + 64;
+ len -= 64;
+ }
+ else
+ {
+ sha256_process_block (buffer, len & ~63, ctx);
+ buffer = buffer + (len & ~63);
+ len &= 63;
+ }
+ }
+
+ /* Move remaining bytes into internal buffer. */
+ if (len > 0)
+ {
+ size_t left_over = ctx->buflen;
+
+ memcpy (&ctx->buffer[left_over], buffer, len);
+ left_over += len;
+ if (left_over >= 64)
+ {
+ sha256_process_block (ctx->buffer, 64, ctx);
+ left_over -= 64;
+ memcpy (ctx->buffer, &ctx->buffer[64], left_over);
+ }
+ ctx->buflen = left_over;
+ }
+}
diff --git a/sha256.h b/sha256.h
new file mode 100644
index 0000000..50c1638
--- /dev/null
+++ b/sha256.h
@@ -0,0 +1,20 @@
+#ifndef REPREPRO_SHA256_H
+#define REPREPRO_SHA256_H
+
+/* Structure to save state of computation between the single steps. */
+struct SHA256_Context
+{
+ uint32_t H[8];
+
+ uint64_t total;
+ uint32_t buflen;
+ char buffer[128]; /* NB: always correctly aligned for uint32_t. */
+};
+
+#define SHA256_DIGEST_SIZE 32
+
+void SHA256Init(/*@out@*/struct SHA256_Context *context);
+void SHA256Update(struct SHA256_Context *context, const uint8_t *data, size_t len);
+void SHA256Final(struct SHA256_Context *context, /*@out@*/uint8_t digest[SHA256_DIGEST_SIZE]);
+
+#endif
diff --git a/sha512.c b/sha512.c
new file mode 100644
index 0000000..1bd942e
--- /dev/null
+++ b/sha512.c
@@ -0,0 +1,309 @@
+/* sha512 implementation, taken (with minor modification) from sha512crypt.c,
+ which states:
+ Released into the Public Domain by Ulrich Drepper <drepper@redhat.com>.
+ Neglegible modifications by Bastian Germann which stem from reprepro's
+ sha256.c by Bernhard R. Link, also in the public domain.
+*/
+
+#include <config.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include "sha512.h"
+
+#ifndef WORDS_BIGENDIAN
+# define SWAP(n) \
+ (((n) << 56) \
+ | (((n) & 0xff00) << 40) \
+ | (((n) & 0xff0000) << 24) \
+ | (((n) & 0xff000000) << 8) \
+ | (((n) >> 8) & 0xff000000) \
+ | (((n) >> 24) & 0xff0000) \
+ | (((n) >> 40) & 0xff00) \
+ | ((n) >> 56))
+#else
+# define SWAP(n) (n)
+#endif
+
+
+/* This array contains the bytes used to pad the buffer to the next
+ 64-byte boundary. (FIPS 180-2:5.1.2) */
+static const unsigned char fillbuf[128] = { 0x80, 0 /* , 0, 0, ... */ };
+
+
+/* Constants for SHA512 from FIPS 180-2:4.2.3. */
+static const uint64_t K[80] =
+ {
+ UINT64_C (0x428a2f98d728ae22), UINT64_C (0x7137449123ef65cd),
+ UINT64_C (0xb5c0fbcfec4d3b2f), UINT64_C (0xe9b5dba58189dbbc),
+ UINT64_C (0x3956c25bf348b538), UINT64_C (0x59f111f1b605d019),
+ UINT64_C (0x923f82a4af194f9b), UINT64_C (0xab1c5ed5da6d8118),
+ UINT64_C (0xd807aa98a3030242), UINT64_C (0x12835b0145706fbe),
+ UINT64_C (0x243185be4ee4b28c), UINT64_C (0x550c7dc3d5ffb4e2),
+ UINT64_C (0x72be5d74f27b896f), UINT64_C (0x80deb1fe3b1696b1),
+ UINT64_C (0x9bdc06a725c71235), UINT64_C (0xc19bf174cf692694),
+ UINT64_C (0xe49b69c19ef14ad2), UINT64_C (0xefbe4786384f25e3),
+ UINT64_C (0x0fc19dc68b8cd5b5), UINT64_C (0x240ca1cc77ac9c65),
+ UINT64_C (0x2de92c6f592b0275), UINT64_C (0x4a7484aa6ea6e483),
+ UINT64_C (0x5cb0a9dcbd41fbd4), UINT64_C (0x76f988da831153b5),
+ UINT64_C (0x983e5152ee66dfab), UINT64_C (0xa831c66d2db43210),
+ UINT64_C (0xb00327c898fb213f), UINT64_C (0xbf597fc7beef0ee4),
+ UINT64_C (0xc6e00bf33da88fc2), UINT64_C (0xd5a79147930aa725),
+ UINT64_C (0x06ca6351e003826f), UINT64_C (0x142929670a0e6e70),
+ UINT64_C (0x27b70a8546d22ffc), UINT64_C (0x2e1b21385c26c926),
+ UINT64_C (0x4d2c6dfc5ac42aed), UINT64_C (0x53380d139d95b3df),
+ UINT64_C (0x650a73548baf63de), UINT64_C (0x766a0abb3c77b2a8),
+ UINT64_C (0x81c2c92e47edaee6), UINT64_C (0x92722c851482353b),
+ UINT64_C (0xa2bfe8a14cf10364), UINT64_C (0xa81a664bbc423001),
+ UINT64_C (0xc24b8b70d0f89791), UINT64_C (0xc76c51a30654be30),
+ UINT64_C (0xd192e819d6ef5218), UINT64_C (0xd69906245565a910),
+ UINT64_C (0xf40e35855771202a), UINT64_C (0x106aa07032bbd1b8),
+ UINT64_C (0x19a4c116b8d2d0c8), UINT64_C (0x1e376c085141ab53),
+ UINT64_C (0x2748774cdf8eeb99), UINT64_C (0x34b0bcb5e19b48a8),
+ UINT64_C (0x391c0cb3c5c95a63), UINT64_C (0x4ed8aa4ae3418acb),
+ UINT64_C (0x5b9cca4f7763e373), UINT64_C (0x682e6ff3d6b2b8a3),
+ UINT64_C (0x748f82ee5defb2fc), UINT64_C (0x78a5636f43172f60),
+ UINT64_C (0x84c87814a1f0ab72), UINT64_C (0x8cc702081a6439ec),
+ UINT64_C (0x90befffa23631e28), UINT64_C (0xa4506cebde82bde9),
+ UINT64_C (0xbef9a3f7b2c67915), UINT64_C (0xc67178f2e372532b),
+ UINT64_C (0xca273eceea26619c), UINT64_C (0xd186b8c721c0c207),
+ UINT64_C (0xeada7dd6cde0eb1e), UINT64_C (0xf57d4f7fee6ed178),
+ UINT64_C (0x06f067aa72176fba), UINT64_C (0x0a637dc5a2c898a6),
+ UINT64_C (0x113f9804bef90dae), UINT64_C (0x1b710b35131c471b),
+ UINT64_C (0x28db77f523047d84), UINT64_C (0x32caab7b40c72493),
+ UINT64_C (0x3c9ebe0a15c9bebc), UINT64_C (0x431d67c49c100d4c),
+ UINT64_C (0x4cc5d4becb3e42b6), UINT64_C (0x597f299cfc657e2a),
+ UINT64_C (0x5fcb6fab3ad6faec), UINT64_C (0x6c44198c4a475817)
+ };
+
+
+/* Process LEN bytes of BUFFER, accumulating context into CTX.
+ It is assumed that LEN % 128 == 0. */
+static void
+sha512_process_block (const void *buffer, size_t len, struct SHA512_Context *ctx)
+{
+ const uint64_t *words = buffer;
+ size_t nwords = len / sizeof (uint64_t);
+ uint64_t a = ctx->H[0];
+ uint64_t b = ctx->H[1];
+ uint64_t c = ctx->H[2];
+ uint64_t d = ctx->H[3];
+ uint64_t e = ctx->H[4];
+ uint64_t f = ctx->H[5];
+ uint64_t g = ctx->H[6];
+ uint64_t h = ctx->H[7];
+
+ /* First increment the byte count. FIPS 180-2 specifies the possible
+ length of the file up to 2^128 bits. Here we only compute the
+ number of bytes. Do a double word increment. */
+ ctx->total[0] += len;
+ if (ctx->total[0] < len)
+ ++ctx->total[1];
+
+ /* Process all bytes in the buffer with 128 bytes in each round of
+ the loop. */
+ while (nwords > 0)
+ {
+ uint64_t W[80];
+ uint64_t a_save = a;
+ uint64_t b_save = b;
+ uint64_t c_save = c;
+ uint64_t d_save = d;
+ uint64_t e_save = e;
+ uint64_t f_save = f;
+ uint64_t g_save = g;
+ uint64_t h_save = h;
+
+ /* Operators defined in FIPS 180-2:4.1.2. */
+#define Ch(x, y, z) ((x & y) ^ (~x & z))
+#define Maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
+#define S0(x) (CYCLIC (x, 28) ^ CYCLIC (x, 34) ^ CYCLIC (x, 39))
+#define S1(x) (CYCLIC (x, 14) ^ CYCLIC (x, 18) ^ CYCLIC (x, 41))
+#define R0(x) (CYCLIC (x, 1) ^ CYCLIC (x, 8) ^ (x >> 7))
+#define R1(x) (CYCLIC (x, 19) ^ CYCLIC (x, 61) ^ (x >> 6))
+
+ /* It is unfortunate that C does not provide an operator for
+ cyclic rotation. Hope the C compiler is smart enough. */
+#define CYCLIC(w, s) ((w >> s) | (w << (64 - s)))
+
+ /* Compute the message schedule according to FIPS 180-2:6.3.2 step 2. */
+ for (unsigned int t = 0; t < 16; ++t)
+ {
+ W[t] = SWAP (*words);
+ ++words;
+ }
+ for (unsigned int t = 16; t < 80; ++t)
+ W[t] = R1 (W[t - 2]) + W[t - 7] + R0 (W[t - 15]) + W[t - 16];
+
+ /* The actual computation according to FIPS 180-2:6.3.2 step 3. */
+ for (unsigned int t = 0; t < 80; ++t)
+ {
+ uint64_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t];
+ uint64_t T2 = S0 (a) + Maj (a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+ }
+
+ /* Add the starting values of the context according to FIPS 180-2:6.3.2
+ step 4. */
+ a += a_save;
+ b += b_save;
+ c += c_save;
+ d += d_save;
+ e += e_save;
+ f += f_save;
+ g += g_save;
+ h += h_save;
+
+ /* Prepare for the next round. */
+ nwords -= 16;
+ }
+
+ /* Put checksum in context given as argument. */
+ ctx->H[0] = a;
+ ctx->H[1] = b;
+ ctx->H[2] = c;
+ ctx->H[3] = d;
+ ctx->H[4] = e;
+ ctx->H[5] = f;
+ ctx->H[6] = g;
+ ctx->H[7] = h;
+}
+
+
+/* Initialize structure containing state of computation.
+ (FIPS 180-2:5.3.3) */
+void
+SHA512Init (struct SHA512_Context *ctx)
+{
+ ctx->H[0] = UINT64_C (0x6a09e667f3bcc908);
+ ctx->H[1] = UINT64_C (0xbb67ae8584caa73b);
+ ctx->H[2] = UINT64_C (0x3c6ef372fe94f82b);
+ ctx->H[3] = UINT64_C (0xa54ff53a5f1d36f1);
+ ctx->H[4] = UINT64_C (0x510e527fade682d1);
+ ctx->H[5] = UINT64_C (0x9b05688c2b3e6c1f);
+ ctx->H[6] = UINT64_C (0x1f83d9abfb41bd6b);
+ ctx->H[7] = UINT64_C (0x5be0cd19137e2179);
+
+ ctx->total[0] = ctx->total[1] = 0;
+ ctx->buflen = 0;
+}
+
+
+/* Process the remaining bytes in the internal buffer and the usual
+ prolog according to the standard and write the result to digest.
+
+ IMPORTANT: On some systems it is required that digest is correctly
+ aligned for a 32 bits value.
+*/
+void
+SHA512Final (struct SHA512_Context *ctx, uint8_t *digest)
+{
+ /* Take yet unprocessed bytes into account. */
+ uint64_t bytes = ctx->buflen;
+ size_t pad;
+
+ /* Now count remaining bytes. */
+ ctx->total[0] += bytes;
+ if (ctx->total[0] < bytes)
+ ++ctx->total[1];
+
+ pad = bytes >= 112 ? 128 + 112 - bytes : 112 - bytes;
+ memcpy (&ctx->buffer[bytes], fillbuf, pad);
+
+ /* Put the 128-bit file length in *bits* at the end of the buffer. */
+ *(uint64_t *) &ctx->buffer[bytes + pad + 8] = SWAP (ctx->total[0] << 3);
+ *(uint64_t *) &ctx->buffer[bytes + pad] = SWAP ((ctx->total[1] << 3) |
+ (ctx->total[0] >> 61));
+
+ /* Process last bytes. */
+ sha512_process_block (ctx->buffer, bytes + pad + 16, ctx);
+
+ for (unsigned int i = 0; i < 8; ++i)
+ ((uint64_t *) digest)[i] = SWAP (ctx->H[i]);
+}
+
+
+void
+SHA512Update (struct SHA512_Context *ctx, const uint8_t *buffer, size_t len)
+{
+ /* When we already have some bits in our internal buffer concatenate
+ both inputs first. */
+ if (ctx->buflen != 0)
+ {
+ size_t left_over = ctx->buflen;
+ size_t add = 256 - left_over > len ? len : 256 - left_over;
+
+ memcpy (&ctx->buffer[left_over], buffer, add);
+ ctx->buflen += add;
+
+ if (ctx->buflen > 128)
+ {
+ sha512_process_block (ctx->buffer, ctx->buflen & ~127, ctx);
+
+ ctx->buflen &= 127;
+ /* The regions in the following copy operation cannot overlap. */
+ memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~127],
+ ctx->buflen);
+ }
+
+ buffer = buffer + add;
+ len -= add;
+ }
+
+ /* Process available complete blocks. */
+ if (len >= 128)
+ {
+#if !_STRING_ARCH_unaligned
+/* To check alignment gcc has an appropriate operator. Other
+ compilers don't. */
+# if __GNUC__ >= 2
+# define UNALIGNED_P(p) (((uintptr_t) p) % __alignof__ (uint64_t) != 0)
+# else
+# define UNALIGNED_P(p) (((uintptr_t) p) % sizeof (uint64_t) != 0)
+# endif
+ if (UNALIGNED_P (buffer))
+ while (len > 128)
+ {
+ sha512_process_block (memcpy (ctx->buffer, buffer, 128), 128,
+ ctx);
+ buffer = buffer + 128;
+ len -= 128;
+ }
+ else
+#endif
+ {
+ sha512_process_block (buffer, len & ~127, ctx);
+ buffer = buffer + (len & ~127);
+ len &= 127;
+ }
+ }
+
+ /* Move remaining bytes into internal buffer. */
+ if (len > 0)
+ {
+ size_t left_over = ctx->buflen;
+
+ memcpy (&ctx->buffer[left_over], buffer, len);
+ left_over += len;
+ if (left_over >= 128)
+ {
+ sha512_process_block (ctx->buffer, 128, ctx);
+ left_over -= 128;
+ memcpy (ctx->buffer, &ctx->buffer[128], left_over);
+ }
+ ctx->buflen = left_over;
+ }
+}
diff --git a/sha512.h b/sha512.h
new file mode 100644
index 0000000..42f822e
--- /dev/null
+++ b/sha512.h
@@ -0,0 +1,20 @@
+#ifndef REPREPRO_SHA512_H
+#define REPREPRO_SHA512_H
+
+/* Structure to save state of computation between the single steps. */
+struct SHA512_Context
+{
+ uint64_t H[8];
+
+ uint64_t total[2];
+ uint64_t buflen;
+ char buffer[256]; /* NB: always correctly aligned for uint32_t. */
+};
+
+#define SHA512_DIGEST_SIZE 64
+
+void SHA512Init(/*@out@*/struct SHA512_Context *context);
+void SHA512Update(struct SHA512_Context *context, const uint8_t *data, size_t len);
+void SHA512Final(struct SHA512_Context *context, /*@out@*/uint8_t digest[SHA512_DIGEST_SIZE]);
+
+#endif
diff --git a/signature.c b/signature.c
new file mode 100644
index 0000000..93ff207
--- /dev/null
+++ b/signature.c
@@ -0,0 +1,570 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2009,2010 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <time.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "signature_p.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "dirs.h"
+#include "names.h"
+#include "chunks.h"
+#include "readtextfile.h"
+
+#ifdef HAVE_LIBGPGME
+gpgme_ctx_t context = NULL;
+
+retvalue gpgerror(gpg_error_t err) {
+ if (err != 0) {
+ fprintf(stderr, "gpgme gave error %s:%d: %s\n",
+ gpg_strsource(err), gpg_err_code(err),
+ gpg_strerror(err));
+ if (gpg_err_code(err) == GPG_ERR_ENOMEM)
+ return RET_ERROR_OOM;
+ else
+ return RET_ERROR_GPGME;
+ } else
+ return RET_OK;
+}
+
+/* Quick&dirty passphrase asking */
+static gpg_error_t signature_getpassphrase(UNUSED(void *hook), const char *uid_hint, UNUSED(const char *info), int prev_was_bad, int fd) {
+ char *msg;
+ const char *p;
+ int e = 0;
+
+ msg = mprintf("%s needs a passphrase\nPlease enter passphrase%s:",
+ (uid_hint!=NULL)?uid_hint:"key",
+ (prev_was_bad!=0)?" again":"");
+ if (msg == NULL)
+ return gpg_err_make(GPG_ERR_SOURCE_USER_1, GPG_ERR_ENOMEM);
+ p = getpass(msg);
+ if (write(fd, p, strlen(p)) < 0) {
+ e = errno;
+ }
+ if (write(fd, "\n", 1) < 0 && e == 0) {
+ e = errno;
+ }
+ if (e != 0) {
+ fprintf(stderr, "Error %d writing to fd %i: %s\n",
+ e, fd, strerror(e));
+ free(msg);
+ return RET_ERRNO(e);
+ }
+ free(msg);
+ return GPG_ERR_NO_ERROR;
+}
+#endif /* HAVE_LIBGPGME */
+
+retvalue signature_init(bool allowpassphrase){
+#ifdef HAVE_LIBGPGME
+ gpg_error_t err;
+
+ if (context != NULL)
+ return RET_NOTHING;
+ gpgme_check_version(NULL);
+ err = gpgme_engine_check_version(GPGME_PROTOCOL_OpenPGP);
+ if (err != 0)
+ return gpgerror(err);
+ err = gpgme_new(&context);
+ if (err != 0)
+ return gpgerror(err);
+ err = gpgme_set_protocol(context, GPGME_PROTOCOL_OpenPGP);
+ if (err != 0)
+ return gpgerror(err);
+ if (allowpassphrase)
+ gpgme_set_passphrase_cb(context, signature_getpassphrase,
+ NULL);
+ gpgme_set_armor(context, 1);
+#endif /* HAVE_LIBGPGME */
+ return RET_OK;
+}
+
+void signatures_done(void) {
+#ifdef HAVE_LIBGPGME
+ if (context != NULL) {
+ gpgme_release(context);
+ context = NULL;
+ }
+#endif /* HAVE_LIBGPGME */
+}
+
+#ifdef HAVE_LIBGPGME
+/* retrieve a list of fingerprints of keys having signed (valid) or
+ * which are mentioned in the signature (all). set broken if all signatures
+ * was broken (hints to a broken file, as opposed to expired or whatever
+ * else may make a signature invalid)). */
+static retvalue checksigs(const char *filename, struct signatures **signatures_p, bool *broken) {
+ gpgme_verify_result_t result;
+ gpgme_signature_t s;
+ bool had_valid = false, had_broken = false;
+ size_t count;
+ struct signatures *signatures;
+ struct signature *sig;
+
+ result = gpgme_op_verify_result(context);
+ if (result == NULL) {
+ fprintf(stderr,
+"Internal error communicating with libgpgme: no result record!\n\n");
+ return RET_ERROR_GPGME;
+ }
+ if (signatures_p != NULL) {
+ count = 0;
+ for (s = result->signatures ; s != NULL ; s = s->next) {
+ count++;
+ }
+ signatures = calloc(1, sizeof(struct signatures) +
+ count * sizeof(struct signature));
+ if (FAILEDTOALLOC(signatures))
+ return RET_ERROR_OOM;
+ signatures->count = count;
+ signatures->validcount = 0;
+ sig = signatures->signatures;
+ } else {
+ signatures = NULL;
+ sig = NULL;
+ }
+ for (s = result->signatures ; s != NULL ; s = s->next) {
+ enum signature_state state = sist_error;
+
+ if (signatures_p != NULL) {
+ sig->keyid = strdup(s->fpr);
+ if (FAILEDTOALLOC(sig->keyid)) {
+ signatures_free(signatures);
+ return RET_ERROR_OOM;
+ }
+ }
+ switch (gpg_err_code(s->status)) {
+ case GPG_ERR_NO_ERROR:
+ had_valid = true;
+ state = sist_valid;
+ if (signatures)
+ signatures->validcount++;
+ break;
+ case GPG_ERR_KEY_EXPIRED:
+ had_valid = true;
+ if (verbose > 0)
+ fprintf(stderr,
+"Ignoring signature with '%s' on '%s', as the key has expired.\n",
+ s->fpr, filename);
+ state = sist_mostly;
+ if (sig != NULL)
+ sig->expired_key = true;
+ break;
+ case GPG_ERR_CERT_REVOKED:
+ had_valid = true;
+ if (verbose > 0)
+ fprintf(stderr,
+"Ignoring signature with '%s' on '%s', as the key is revoked.\n",
+ s->fpr, filename);
+ state = sist_mostly;
+ if (sig != NULL)
+ sig->revoced_key = true;
+ break;
+ case GPG_ERR_SIG_EXPIRED:
+ had_valid = true;
+ if (verbose > 0) {
+ time_t timestamp = s->timestamp,
+ exp_timestamp = s->exp_timestamp;
+ fprintf(stderr,
+"Ignoring signature with '%s' on '%s', as the signature has expired.\n"
+" signature created %s, expired %s\n",
+ s->fpr, filename,
+ ctime(&timestamp),
+ ctime(&exp_timestamp));
+ }
+ state = sist_mostly;
+ if (sig != NULL)
+ sig->expired_signature = true;
+ break;
+ case GPG_ERR_BAD_SIGNATURE:
+ had_broken = true;
+ if (verbose > 0) {
+ fprintf(stderr,
+"WARNING: '%s' has a invalid signature with '%s'\n", filename, s->fpr);
+ }
+ state = sist_bad;
+ break;
+ case GPG_ERR_NO_PUBKEY:
+ if (verbose > 0) {
+ fprintf(stderr,
+"Could not check validity of signature with '%s' in '%s' as public key missing!\n",
+ s->fpr, filename);
+ }
+ state = sist_missing;
+ break;
+ case GPG_ERR_GENERAL:
+ fprintf(stderr,
+"gpgme returned an general error verifing signature with '%s' in '%s'!\n"
+"Try running gpg --verify '%s' manually for hints what is happening.\n"
+"If this does not print any errors, retry the command causing this message.\n",
+ s->fpr, filename,
+ filename);
+ signatures_free(signatures);
+ return RET_ERROR_GPGME;
+ /* there sadly no more is a way to make sure we have
+ * all possible ones handled */
+ default:
+ break;
+ }
+ if (state == sist_error) {
+ fprintf(stderr,
+"Error checking signature (gpgme returned unexpected value %d)!\n"
+"Please file a bug report, so reprepro can handle this in the future.\n",
+ gpg_err_code(s->status));
+ signatures_free(signatures);
+ return RET_ERROR_GPGME;
+ }
+ if (sig != NULL) {
+ sig->state = state;
+ sig++;
+ }
+ }
+ if (broken != NULL && had_broken && ! had_valid)
+ *broken = true;
+ if (signatures_p != NULL)
+ *signatures_p = signatures;
+ return RET_OK;
+}
+
+static retvalue check_primary_keys(struct signatures *signatures) {
+ /* Get the primary keys belonging to each signing key.
+ This might also invalidate a signature previously believed
+ valid if the primary key is expired */
+ int i;
+
+ for (i = 0 ; i < signatures->count ; i++) {
+ gpg_error_t err;
+ gpgme_key_t gpgme_key = NULL;
+ gpgme_subkey_t subkey;
+ struct signature *sig = &signatures->signatures[i];
+
+ if (sig->state == sist_error || sig->state == sist_missing) {
+ sig->primary_keyid = strdup(sig->keyid);
+ if (FAILEDTOALLOC(sig->primary_keyid))
+ return RET_ERROR_OOM;
+ continue;
+ }
+
+ err = gpgme_get_key(context, sig->keyid, &gpgme_key, 0);
+ if (err != 0) {
+ fprintf(stderr,
+"gpgme error %s:%d retrieving key '%s': %s\n",
+ gpg_strsource(err),
+ (int)gpg_err_code(err),
+ sig->keyid, gpg_strerror(err));
+ if (gpg_err_code(err) == GPG_ERR_ENOMEM)
+ return RET_ERROR_OOM;
+ else
+ return RET_ERROR_GPGME;
+ }
+ assert (gpgme_key != NULL);
+ /* the first "sub"key is the primary key */
+ subkey = gpgme_key->subkeys;
+ if (subkey->revoked) {
+ sig->revoced_key = true;
+ if (sig->state == sist_valid) {
+ sig->state = sist_mostly;
+ signatures->validcount--;
+ }
+ }
+ if (subkey->expired) {
+ sig->expired_key = true;
+ if (sig->state == sist_valid) {
+ sig->state = sist_mostly;
+ signatures->validcount--;
+ }
+ }
+ sig->primary_keyid = strdup(subkey->keyid);
+ gpgme_key_unref(gpgme_key);
+ if (FAILEDTOALLOC(sig->primary_keyid))
+ return RET_ERROR_OOM;
+ }
+ return RET_OK;
+}
+#endif /* HAVE_LIBGPGME */
+
+void signatures_free(struct signatures *signatures) {
+ int i;
+
+ if (signatures == NULL)
+ return;
+
+ for (i = 0 ; i < signatures->count ; i++) {
+ free(signatures->signatures[i].keyid);
+ free(signatures->signatures[i].primary_keyid);
+ }
+ free(signatures);
+}
+
+#ifdef HAVE_LIBGPGME
+static retvalue extract_signed_data(const char *buffer, size_t bufferlen, const char *filenametoshow, char **chunkread, /*@null@*/ /*@out@*/struct signatures **signatures_p, bool *brokensignature) {
+ char *chunk;
+ gpg_error_t err;
+ gpgme_data_t dh, dh_gpg;
+ size_t plain_len;
+ char *plain_data;
+ retvalue r;
+ struct signatures *signatures = NULL;
+ bool foundbroken = false;
+
+ r = signature_init(false);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ err = gpgme_data_new_from_mem(&dh_gpg, buffer, bufferlen, 0);
+ if (err != 0)
+ return gpgerror(err);
+
+ err = gpgme_data_new(&dh);
+ if (err != 0) {
+ gpgme_data_release(dh_gpg);
+ return gpgerror(err);
+ }
+ err = gpgme_op_verify(context, dh_gpg, NULL, dh);
+ if (gpg_err_code(err) == GPG_ERR_NO_DATA) {
+ if (verbose > 5)
+ fprintf(stderr,
+"Data seems not to be signed trying to use directly....\n");
+ gpgme_data_release(dh);
+ gpgme_data_release(dh_gpg);
+ return RET_NOTHING;
+ } else {
+ if (err != 0) {
+ gpgme_data_release(dh_gpg);
+ gpgme_data_release(dh);
+ return gpgerror(err);
+ }
+ if (signatures_p != NULL || brokensignature != NULL) {
+ r = checksigs(filenametoshow,
+ (signatures_p!=NULL)?&signatures:NULL,
+ (brokensignature!=NULL)?&foundbroken:NULL);
+ if (RET_WAS_ERROR(r)) {
+ gpgme_data_release(dh_gpg);
+ gpgme_data_release(dh);
+ return r;
+ }
+ }
+ gpgme_data_release(dh_gpg);
+ plain_data = gpgme_data_release_and_get_mem(dh, &plain_len);
+ if (plain_data == NULL) {
+ fprintf(stderr,
+"(not yet fatal) ERROR: libgpgme failed to extract the plain data out of\n"
+"'%s'.\n"
+"While it did so in a way indicating running out of memory, experience says\n"
+"this also happens when gpg returns a error code it does not understand.\n"
+"To check this please try running gpg --verify '%s' manually.\n"
+"Continuing extracting it ignoring all signatures...",
+ filenametoshow, filenametoshow);
+ signatures_free(signatures);
+ return RET_NOTHING;
+ }
+ if (signatures != NULL) {
+ r = check_primary_keys(signatures);
+ if (RET_WAS_ERROR(r)) {
+ signatures_free(signatures);
+ return r;
+ }
+ }
+ }
+
+ if (FAILEDTOALLOC(plain_data))
+ r = RET_ERROR_OOM;
+ else {
+ size_t len;
+ const char *afterchanges;
+
+ chunk = malloc(plain_len + 1);
+ len = chunk_extract(chunk, plain_data, plain_len, false,
+ &afterchanges);
+ if (len == 0) {
+ fprintf(stderr,
+"Could only find spaces within '%s'!\n",
+ filenametoshow);
+ free(chunk);
+ r = RET_ERROR;
+ } else if (afterchanges != plain_data + plain_len) {
+ if (*afterchanges == '\0')
+ fprintf(stderr,
+"Unexpected \\0 character within '%s'!\n",
+ filenametoshow);
+ else
+ fprintf(stderr,
+"Unexpected data after ending empty line in '%s'!\n",
+ filenametoshow);
+ free(chunk);
+ r = RET_ERROR;
+ } else
+ *chunkread = chunk;
+ }
+#ifdef HAVE_GPGPME_FREE
+ gpgme_free(plain_data);
+#else
+ free(plain_data);
+#endif
+ if (RET_IS_OK(r)) {
+ if (signatures_p != NULL)
+ *signatures_p = signatures;
+ if (brokensignature != NULL)
+ *brokensignature = foundbroken;
+ } else {
+ signatures_free(signatures);
+ }
+ return r;
+}
+#endif /* HAVE_LIBGPGME */
+
+/* Read a single chunk from a file, that may be signed. */
+retvalue signature_readsignedchunk(const char *filename, const char *filenametoshow, char **chunkread, /*@null@*/ /*@out@*/struct signatures **signatures_p, bool *brokensignature) {
+ char *chunk;
+ const char *startofchanges, *afterchunk;
+ const char *endmarker;
+ size_t chunklen, len;
+ retvalue r;
+
+ r = readtextfile(filename, filenametoshow, &chunk, &chunklen);
+ if (!RET_IS_OK(r))
+ return r;
+
+ if (chunklen == 0) {
+ fprintf(stderr, "Unexpected empty file '%s'!\n",
+ filenametoshow);
+ free(chunk);
+ return RET_ERROR;
+ }
+
+ startofchanges = chunk_getstart(chunk, chunklen, false);
+
+ /* fast-track unsigned chunks: */
+ if (startofchanges[0] != '-') {
+ const char *afterchanges;
+
+ len = chunk_extract(chunk, chunk, chunklen, false,
+ &afterchanges);
+
+ if (len == 0) {
+ fprintf(stderr,
+"Could only find spaces within '%s'!\n",
+ filenametoshow);
+ free(chunk);
+ return RET_ERROR;
+ }
+ if (*afterchanges != '\0') {
+ fprintf(stderr,
+"Error parsing '%s': Seems not to be signed but has spurious empty line.\n",
+ filenametoshow);
+ free(chunk);
+ return RET_ERROR;
+ }
+ if (verbose > 5 && strncmp(chunk, "Format:", 7) != 0
+ && strncmp(chunk, "Source:", 7) != 0)
+ fprintf(stderr,
+"Data seems not to be signed trying to use directly...\n");
+ assert (chunk[len] == '\0');
+ *chunkread = realloc(chunk, len + 1);
+ if (FAILEDTOALLOC(*chunkread))
+ *chunkread = chunk;
+ if (signatures_p != NULL)
+ *signatures_p = NULL;
+ if (brokensignature != NULL)
+ *brokensignature = false;
+ return RET_OK;
+ }
+
+#ifdef HAVE_LIBGPGME
+ r = extract_signed_data(chunk, chunklen, filenametoshow, chunkread,
+ signatures_p, brokensignature);
+ if (r != RET_NOTHING) {
+ free(chunk);
+ return r;
+ }
+#endif
+ /* We have no libgpgme, it failed, or could not find signature data,
+ * trying to extract it manually, ignoring signatures: */
+
+ if (strncmp(startofchanges, "-----BEGIN", 10) != 0) {
+ fprintf(stderr,
+"Strange content of '%s': First non-space character is '-',\n"
+"but it does not begin with '-----BEGIN'.\n", filenametoshow);
+ free(chunk);
+ return RET_ERROR;
+#ifndef HAVE_LIBGPGME
+ } else {
+ fprintf(stderr,
+"Cannot check signatures from '%s' as compiled without support for libgpgme!\n"
+"Extracting the content manually without looking at the signature...\n", filenametoshow);
+#endif
+ }
+ startofchanges = chunk_over(startofchanges);
+
+ len = chunk_extract(chunk, startofchanges, chunklen - (startofchanges - chunk),
+ false, &afterchunk);
+
+ if (len == 0) {
+ fprintf(stderr, "Could not find any data within '%s'!\n",
+ filenametoshow);
+ free(chunk);
+ return RET_ERROR;
+ }
+
+ endmarker = strstr(chunk, "\n-----");
+ if (endmarker != NULL) {
+ endmarker++;
+ assert ((size_t)(endmarker-chunk) < len);
+ len = endmarker-chunk;
+ chunk[len] = '\0';
+ } else if (*afterchunk == '\0') {
+ fprintf(stderr,
+"ERROR: Could not find end marker of signed data within '%s'.\n"
+"Cannot determine what is data and what is not!\n",
+filenametoshow);
+ free(chunk);
+ return RET_ERROR;
+ } else if (strncmp(afterchunk, "-----", 5) != 0) {
+ fprintf(stderr, "ERROR: Spurious empty line within '%s'.\n"
+"Cannot determine what is data and what is not!\n",
+ filenametoshow);
+ free(chunk);
+ return RET_ERROR;
+ }
+
+ assert (chunk[len] == '\0');
+ if (signatures_p != NULL) {
+ /* pointer to structure with count 0 to make clear
+ * it is not unsigned */
+ *signatures_p = calloc(1, sizeof(struct signatures));
+ if (FAILEDTOALLOC(*signatures_p)) {
+ free(chunk);
+ return RET_ERROR_OOM;
+ }
+ }
+ *chunkread = realloc(chunk, len + 1);
+ if (FAILEDTOALLOC(*chunkread))
+ *chunkread = chunk;
+ if (brokensignature != NULL)
+ *brokensignature = false;
+ return RET_OK;
+}
+
diff --git a/signature.h b/signature.h
new file mode 100644
index 0000000..294229b
--- /dev/null
+++ b/signature.h
@@ -0,0 +1,66 @@
+#ifndef REPREPRO_SIGNATURE_H
+#define REPREPRO_SIGNATURE_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+
+/* does not need to be called if allowpassphrase if false,
+ * argument will only take effect if called the first time */
+retvalue signature_init(bool allowpassphrase);
+
+struct signature_requirement;
+void signature_requirements_free(/*@only@*/struct signature_requirement *);
+retvalue signature_requirement_add(struct signature_requirement **, const char *);
+void free_known_keys(void);
+
+retvalue signature_check(const struct signature_requirement *, const char *, const char *, const char *, size_t);
+retvalue signature_check_inline(const struct signature_requirement *, const char *, /*@out@*/char **);
+
+
+struct signatures {
+ int count, validcount;
+ struct signature {
+ char *keyid;
+ char *primary_keyid;
+ /* valid is only true if none of the others is true,
+ all may be false due to non-signing keys used for
+ signing or things like that */
+ enum signature_state {
+ /* internal error: */
+ sist_error=0,
+ /* key missing, can not be checked: */
+ sist_missing,
+ /* broken signature, content may be corrupt: */
+ sist_bad,
+ /* good signature, but may not sign or al: */
+ sist_invalid,
+ /* good signature, but check expire bits: */
+ sist_mostly,
+ /* good signature, no objections: */
+ sist_valid
+ } state;
+ /* subkey or primary key are expired */
+ bool expired_key;
+ /* signature is expired */
+ bool expired_signature;
+ /* key or primary key revoced */
+ bool revoced_key;
+ } signatures[];
+};
+void signatures_free(/*@null@*//*@only@*/struct signatures *);
+/* Read a single chunk from a file, that may be signed. */
+retvalue signature_readsignedchunk(const char *filename, const char *filenametoshow, char **chunkread, /*@null@*/ /*@out@*/struct signatures **signatures, bool *brokensignature);
+
+struct signedfile;
+struct strlist;
+
+retvalue signature_startsignedfile(/*@out@*/struct signedfile **);
+void signedfile_write(struct signedfile *, const void *, size_t);
+/* generate signature in temporary file */
+retvalue signedfile_create(struct signedfile *, const char *, char **, char **, const struct strlist *, bool /*willcleanup*/);
+void signedfile_free(/*@only@*/struct signedfile *);
+
+void signatures_done(void);
+#endif
diff --git a/signature_check.c b/signature_check.c
new file mode 100644
index 0000000..c5e1aa1
--- /dev/null
+++ b/signature_check.c
@@ -0,0 +1,924 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2009,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <time.h>
+#include <string.h>
+#include <fcntl.h>
+#include "signature_p.h"
+#include "ignore.h"
+#include "chunks.h"
+#include "readtextfile.h"
+
+
+#ifdef HAVE_LIBGPGME
+
+static retvalue parse_condition_part(bool *allow_subkeys_p, bool *allow_bad_p, const char *full_condition, const char **condition_p, /*@out@*/ char **next_key_p) {
+ const char *key = *condition_p, *p;
+ char *next_key, *q;
+ size_t kl;
+
+ *allow_bad_p = false;
+ *allow_subkeys_p = false;
+
+ while (*key != '\0' && xisspace(*key))
+ key++;
+ if (*key == '\0') {
+ fprintf(stderr,
+"Error: unexpected end of VerifyRelease condition '%s'!\n",
+ full_condition);
+ return RET_ERROR;
+ }
+
+ p = key;
+ while ((*p >= 'A' && *p <= 'F') || (*p >= 'a' && *p <= 'f')
+ || (*p >= '0' && *p <= '9'))
+ p++;
+ if (*p != '\0' && !xisspace(*p) && *p != '|' && *p != '!' && *p != '+') {
+ fprintf(stderr,
+"Error: Unexpected character 0x%02hhx='%c' in VerifyRelease condition '%s'!\n",
+ *p, *p, full_condition);
+ return RET_ERROR;
+ }
+ kl = p - key;
+ if (kl < 8) {
+ fprintf(stderr,
+"Error: Too short key id '%.*s' in VerifyRelease condition '%s'!\n",
+ (int)kl, key, full_condition);
+ return RET_ERROR;
+ }
+ next_key = strndup(key, kl);
+ if (FAILEDTOALLOC(next_key))
+ return RET_ERROR_OOM;
+ key = p;
+ for (q = next_key ; *q != '\0' ; q++) {
+ if (*q >= 'a' && *q <= 'f')
+ *q -= 'a' - 'A';
+ }
+ while (*key != '\0' && xisspace(*key))
+ key++;
+ if (*key == '!') {
+ *allow_bad_p = true;
+ key++;
+ }
+ while (*key != '\0' && xisspace(*key))
+ key++;
+ if (*key == '+') {
+ *allow_subkeys_p = true;
+ key++;
+ }
+ while (*key != '\0' && xisspace(*key))
+ key++;
+ if ((*key >= 'A' && *key <= 'F')
+ || (*key >= 'a' && *key <= 'f')
+ || (*key >= '0' && *key <= '9')) {
+ free(next_key);
+ fprintf(stderr,
+"Error: Space separated key-ids in VerifyRelease condition '%s'!\n"
+"(Alternate keys can be separated with '|'. Do not put spaces in key-ids.)\n",
+ full_condition);
+ return RET_ERROR;
+ }
+ if (*key != '\0' && *key != '|') {
+ free(next_key);
+ fprintf(stderr,
+"Error: Unexpected character 0x%02hhx='%c' in VerifyRelease condition '%s'!\n",
+ *key, *key, full_condition);
+ return RET_ERROR;
+ }
+ if (*key == '|')
+ key++;
+ *next_key_p = next_key;
+ *condition_p = key;
+ return RET_OK;
+}
+
+static struct known_key {
+ struct known_key *next;
+ /* subkeys, first is primary key */
+ int count;
+ struct known_subkey {
+ /* full fingerprint or keyid */
+ char *name;
+ unsigned int name_len;
+ /* true if revoked */
+ bool revoked;
+ /* true if expired */
+ bool expired;
+ /* false if invalid or cannot sign */
+ bool cansign;
+ } subkeys[];
+} *known_keys = NULL;
+
+struct requested_key {
+ /* pointer to the key in question */
+ const struct known_key *key;
+ /* which of those keys are requested, -1 for any (i.e. allow subkeys) */
+ int subkey;
+ /* allow some problems, if requested by the user */
+ bool allow_bad;
+};
+
+static retvalue found_key(struct known_key *k, int i, bool allow_subkeys, bool allow_bad, const char *full_condition, const struct known_key **key_found, int *index_found) {
+ if (!allow_bad && k->subkeys[i].revoked) {
+ fprintf(stderr,
+"VerifyRelease condition '%s' lists revoked key '%s'.\n"
+"(To use it anyway, append it with a '!' to force usage).\n",
+ full_condition, k->subkeys[i].name);
+ return RET_ERROR;
+ }
+ if (!allow_bad && k->subkeys[i].expired) {
+ fprintf(stderr,
+"VerifyRelease condition '%s' lists expired key '%s'.\n"
+"(To use it anyway, append it with a '!' to force usage).\n",
+ full_condition, k->subkeys[i].name);
+ return RET_ERROR;
+ }
+ if (!allow_bad && !k->subkeys[i].cansign) {
+ fprintf(stderr,
+"VerifyRelease condition '%s' lists non-signing key '%s'.\n"
+"(To use it anyway, append it with a '!' to force usage).\n",
+ full_condition, k->subkeys[i].name);
+ return RET_ERROR;
+ }
+ if (allow_subkeys) {
+ if (i != 0) {
+ fprintf(stderr,
+"VerifyRelease condition '%s' lists non-primary key '%s' with '+'.\n",
+ full_condition, k->subkeys[i].name);
+ return RET_ERROR;
+ }
+ *index_found = -1;
+ } else
+ *index_found = i;
+ *key_found = k;
+ return RET_OK;
+}
+
+/* name must already be upper-case */
+static retvalue load_key(const char *name, bool allow_subkeys, bool allow_bad, const char *full_condition, const struct known_key **key_found, int *index_found) {
+ gpg_error_t err;
+ gpgme_key_t gpgme_key = NULL;
+ gpgme_subkey_t subkey;
+ int found = -1;
+ struct known_key *k;
+ int i;
+ size_t l = strlen(name);
+
+ /* first look if this key was already retrieved: */
+ for (k = known_keys ; k != NULL ; k = k->next) {
+ for(i = 0 ; i < k->count ; i++) {
+ struct known_subkey *s = &k->subkeys[i];
+
+ if (s->name_len < l)
+ continue;
+ if (memcmp(name, s->name + (s->name_len - l), l) != 0)
+ continue;
+ return found_key(k, i, allow_subkeys, allow_bad,
+ full_condition,
+ key_found, index_found);
+ }
+ }
+ /* If not yet found, request it: */
+ err = gpgme_get_key(context, name, &gpgme_key, 0);
+ if ((gpg_err_code(err) == GPG_ERR_EOF) && gpgme_key == NULL) {
+ fprintf(stderr, "Error: unknown key '%s'!\n", name);
+ return RET_ERROR_MISSING;
+ }
+ if (err != 0) {
+ fprintf(stderr, "gpgme error %s:%d retrieving key '%s': %s\n",
+ gpg_strsource(err), (int)gpg_err_code(err),
+ name, gpg_strerror(err));
+ if (gpg_err_code(err) == GPG_ERR_ENOMEM)
+ return RET_ERROR_OOM;
+ else
+ return RET_ERROR_GPGME;
+ }
+ i = 0;
+ subkey = gpgme_key->subkeys;
+ while (subkey != NULL) {
+ subkey = subkey->next;
+ i++;
+ }
+ k = calloc(1, sizeof(struct known_key)
+ + i * sizeof(struct known_subkey));
+ if (FAILEDTOALLOC(k)) {
+ gpgme_key_unref(gpgme_key);
+ return RET_ERROR_OOM;
+ }
+ k->count = i;
+ k->next = known_keys;
+ known_keys = k;
+
+ subkey = gpgme_key->subkeys;
+ for (i = 0 ; i < k->count ; i++ , subkey = subkey->next) {
+ struct known_subkey *s = &k->subkeys[i];
+
+ assert (subkey != NULL);
+
+ s->revoked = subkey->revoked;
+ s->expired = subkey->expired;
+ s->cansign = subkey->can_sign && !subkey->invalid;
+ s->name = strdup(subkey->keyid);
+ if (FAILEDTOALLOC(s->name)) {
+ gpgme_key_unref(gpgme_key);
+ return RET_ERROR_OOM;
+ }
+ for (char *p = s->name ; *p != '\0' ; p++) {
+ if (*p >= 'a' && *p <= 'z')
+ *p -= 'a'-'A';
+ }
+ s->name_len = strlen(s->name);
+ if (memcmp(name, s->name + (s->name_len - l), l) == 0)
+ found = i;
+ }
+ assert (subkey == NULL);
+ gpgme_key_unref(gpgme_key);
+ if (found < 0) {
+ fprintf(stderr, "Error: not a valid key id '%s'!\n"
+"Use hex-igits from the end of the key as identifier\n", name);
+ return RET_ERROR;
+ }
+ return found_key(k, found, allow_subkeys, allow_bad,
+ full_condition, key_found, index_found);
+}
+
+static void free_known_key(/*@only@*/struct known_key *k) {
+ int i;
+
+ for (i = 0 ; i < k->count ; i++) {
+ free(k->subkeys[i].name);
+ }
+ free(k);
+}
+
+void free_known_keys(void) {
+ while (known_keys != NULL) {
+ struct known_key *k = known_keys;
+ known_keys = k->next;
+ free_known_key(k);
+ }
+ known_keys = NULL;
+}
+
+/* This checks a Release.gpg/Release file pair. requirements is a list of
+ * requirements. (as this Release file can be requested by multiple update
+ * rules, there can be multiple requirements for one file) */
+
+struct signature_requirement {
+ /* next condition */
+ struct signature_requirement *next;
+ /* the original description for error messages */
+ char *condition;
+ /* an array of or-connected conditions */
+ size_t num_keys;
+ struct requested_key keys[];
+};
+#define sizeof_requirement(n) (sizeof(struct signature_requirement) + (n) * sizeof(struct requested_key))
+
+void signature_requirements_free(struct signature_requirement *list) {
+ while (list != NULL) {
+ struct signature_requirement *p = list;
+ list = p->next;
+
+ free(p->condition);
+ free(p);
+ }
+}
+
+static bool key_good(const struct requested_key *req, const gpgme_signature_t signatures) {
+ const struct known_key *k = req->key;
+ gpgme_signature_t sig;
+
+ for (sig = signatures ; sig != NULL ; sig = sig->next) {
+ const char *fpr = sig->fpr;
+ size_t l = strlen(sig->fpr);
+ int i;
+ /* while gpg reports the subkey of an key that is expired
+ to be expired to, it does not tell this in the signature,
+ so we use this here... */
+ bool key_expired = false;
+
+ if (req->subkey < 0) {
+ /* any subkey is allowed */
+ for(i = 0 ; i < k->count ; i++) {
+ const struct known_subkey *s = &k->subkeys[i];
+
+ if (s->name_len > l)
+ continue;
+ if (memcmp(s->name, fpr + (l - s->name_len),
+ s->name_len) != 0)
+ continue;
+ key_expired = k->subkeys[i].expired;
+ break;
+ }
+ if (i >= k->count)
+ continue;
+ } else {
+ const struct known_subkey *s;
+
+ assert (req->subkey < k->count);
+ s = &k->subkeys[req->subkey];
+ if (memcmp(s->name, fpr + (l - s->name_len),
+ s->name_len) != 0)
+ continue;
+ key_expired = k->subkeys[req->subkey].expired;
+ }
+ /* only accept perfectly good signatures and silently
+ ignore everything else. Those are warned about or
+ even accepted in the run with key_good_enough */
+ if (gpg_err_code(sig->status) == GPG_ERR_NO_ERROR
+ && !key_expired)
+ return true;
+ /* we have to continue otherwise,
+ as another subkey might still follow */
+ continue;
+ }
+ /* no valid signature with this key found */
+ return false;
+}
+
+static bool key_good_enough(const struct requested_key *req, const gpgme_signature_t signatures, const char *releasegpg, const char *release) {
+ const struct known_key *k = req->key;
+ gpgme_signature_t sig;
+
+ for (sig = signatures ; sig != NULL ; sig = sig->next) {
+ const char *fpr = sig->fpr;
+ size_t l = strlen(sig->fpr);
+ int i;
+ bool key_expired = false; /* dito */
+
+ if (req->subkey < 0) {
+ /* any subkey is allowed */
+ for(i = 0 ; i < k->count ; i++) {
+ const struct known_subkey *s = &k->subkeys[i];
+
+ if (s->name_len > l)
+ continue;
+ if (memcmp(s->name, fpr + (l - s->name_len),
+ s->name_len) != 0)
+ continue;
+ key_expired = k->subkeys[i].expired;
+ break;
+ }
+ if (i >= k->count)
+ continue;
+ } else {
+ const struct known_subkey *s;
+
+ assert (req->subkey < k->count);
+ s = &k->subkeys[req->subkey];
+ if (memcmp(s->name, fpr + (l - s->name_len),
+ s->name_len) != 0)
+ continue;
+ key_expired = k->subkeys[req->subkey].expired;
+ }
+ /* this key we look for. if it is acceptable, we are finished.
+ if it is not acceptable, we still have to look at the other
+ signatures, as a signature with another subkey is following
+ */
+ switch (gpg_err_code(sig->status)) {
+ case GPG_ERR_NO_ERROR:
+ if (! key_expired)
+ return true;
+ if (req->allow_bad && IGNORABLE(expiredkey)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"WARNING: valid signature in '%s' with parent-expired '%s' is accepted as requested!\n",
+ releasegpg, fpr);
+ return true;
+ }
+ fprintf(stderr,
+"Not accepting valid signature in '%s' with parent-EXPIRED '%s'\n", releasegpg, fpr);
+ if (verbose >= 0)
+ fprintf(stderr,
+"(To ignore it append a ! to the key and run reprepro with --ignore=expiredkey)\n");
+ /* not accepted */
+ continue;
+ case GPG_ERR_KEY_EXPIRED:
+ if (req->allow_bad && IGNORABLE(expiredkey)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"WARNING: valid signature in '%s' with expired '%s' is accepted as requested!\n",
+ releasegpg, fpr);
+ return true;
+ }
+ fprintf(stderr,
+"Not accepting valid signature in '%s' with EXPIRED '%s'\n", releasegpg, fpr);
+ if (verbose >= 0)
+ fprintf(stderr,
+"(To ignore it append a ! to the key and run reprepro with --ignore=expiredkey)\n");
+ /* not accepted */
+ continue;
+ case GPG_ERR_CERT_REVOKED:
+ if (req->allow_bad && IGNORABLE(revokedkey)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"WARNING: valid signature in '%s' with revoked '%s' is accepted as requested!\n",
+ releasegpg, fpr);
+ return RET_OK;
+ }
+ fprintf(stderr,
+"Not accepting valid signature in '%s' with REVOKED '%s'\n", releasegpg, fpr);
+ if (verbose >= 0)
+ fprintf(stderr,
+"(To ignore it append a ! to the key and run reprepro with --ignore=revokedkey)\n");
+ /* not accepted */
+ continue;
+ case GPG_ERR_SIG_EXPIRED:
+ if (req->allow_bad && IGNORABLE(expiredsignature)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"WARNING: valid but expired signature in '%s' with '%s' is accepted as requested!\n",
+ releasegpg, fpr);
+ return RET_OK;
+ }
+ fprintf(stderr,
+"Not accepting valid but EXPIRED signature in '%s' with '%s'\n", releasegpg, fpr);
+ if (verbose >= 0)
+ fprintf(stderr,
+"(To ignore it append a ! to the key and run reprepro with --ignore=expiredsignature)\n");
+ /* not accepted */
+ continue;
+ case GPG_ERR_BAD_SIGNATURE:
+ case GPG_ERR_NO_PUBKEY:
+ /* not accepted */
+ continue;
+ case GPG_ERR_GENERAL:
+ if (release == NULL)
+ fprintf(stderr,
+"gpgme returned an general error verifing signature with '%s' in '%s'!\n"
+"Try running gpg --verify '%s' manually for hints what is happening.\n"
+"If this does not print any errors, retry the command causing this message.\n",
+ fpr, releasegpg,
+ releasegpg);
+ else
+ fprintf(stderr,
+"gpgme returned an general error verifing signature with '%s' in '%s'!\n"
+"Try running gpg --verify '%s' '%s' manually for hints what is happening.\n"
+"If this does not print any errors, retry the command causing this message.\n",
+ fpr, releasegpg,
+ releasegpg, release);
+ continue;
+ /* there sadly no more is a way to make sure we have
+ * all possible ones handled */
+ default:
+ break;
+ }
+ fprintf(stderr,
+"Error checking signature (gpgme returned unexpected value %d)!\n"
+"Please file a bug report, so reprepro can handle this in the future.\n",
+ gpg_err_code(sig->status));
+ return false;
+ }
+ return false;
+}
+
+retvalue signature_requirement_add(struct signature_requirement **list_p, const char *condition) {
+ struct signature_requirement *req;
+ const char *full_condition = condition;
+ retvalue r;
+
+ r = signature_init(false);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (condition == NULL || strcmp(condition, "blindtrust") == 0)
+ return RET_NOTHING;
+
+ /* no need to add the same condition multiple times */
+ for (req = *list_p ; req != NULL ; req = req->next) {
+ if (strcmp(req->condition, condition) == 0)
+ return RET_NOTHING;
+ }
+
+ req = malloc(sizeof_requirement(1));
+ if (FAILEDTOALLOC(req))
+ return RET_ERROR_OOM;
+ req->next = NULL;
+ req->condition = strdup(condition);
+ if (FAILEDTOALLOC(req->condition)) {
+ free(req);
+ return RET_ERROR_OOM;
+ }
+ req->num_keys = 0;
+ do {
+ bool allow_subkeys, allow_bad;
+ char *next_key;
+
+ r = parse_condition_part(&allow_subkeys, &allow_bad,
+ full_condition, &condition, &next_key);
+ ASSERT_NOT_NOTHING(r);
+ if (RET_WAS_ERROR(r)) {
+ signature_requirements_free(req);
+ return r;
+ }
+ req->keys[req->num_keys].allow_bad = allow_bad;
+ r = load_key(next_key, allow_subkeys, allow_bad,
+ full_condition,
+ &req->keys[req->num_keys].key,
+ &req->keys[req->num_keys].subkey);
+ free(next_key);
+ if (RET_WAS_ERROR(r)) {
+ signature_requirements_free(req);
+ return r;
+ }
+ req->num_keys++;
+
+ if (*condition != '\0') {
+ struct signature_requirement *h;
+
+ h = realloc(req, sizeof_requirement(req->num_keys+1));
+ if (FAILEDTOALLOC(h)) {
+ signature_requirements_free(req);
+ return r;
+ }
+ req = h;
+ } else
+ break;
+ } while (true);
+ req->next = *list_p;
+ *list_p = req;
+ return RET_OK;
+}
+
+static void print_signatures(FILE *f, gpgme_signature_t s, const char *releasegpg) {
+ char timebuffer[20];
+ struct tm *tm;
+ time_t t;
+
+ if (s == NULL) {
+ fprintf(f, "gpgme reported no signatures in '%s':\n"
+"Either there are really none or something else is strange.\n"
+"One known reason for this effect is forgeting -b when signing.\n",
+ releasegpg);
+ return;
+ }
+
+ fprintf(f, "Signatures in '%s':\n", releasegpg);
+ for (; s != NULL ; s = s->next) {
+ t = s->timestamp; tm = localtime(&t);
+ strftime(timebuffer, 19, "%Y-%m-%d", tm);
+ fprintf(f, "'%s' (signed %s): ", s->fpr, timebuffer);
+ switch (gpg_err_code(s->status)) {
+ case GPG_ERR_NO_ERROR:
+ fprintf(f, "valid\n");
+ continue;
+ case GPG_ERR_KEY_EXPIRED:
+ fprintf(f, "expired key\n");
+ continue;
+ case GPG_ERR_CERT_REVOKED:
+ fprintf(f, "key revoced\n");
+ continue;
+ case GPG_ERR_SIG_EXPIRED:
+ t = s->exp_timestamp; tm = localtime(&t);
+ strftime(timebuffer, 19, "%Y-%m-%d", tm);
+ fprintf(f, "expired signature (since %s)\n",
+ timebuffer);
+ continue;
+ case GPG_ERR_BAD_SIGNATURE:
+ fprintf(f, "bad signature\n");
+ continue;
+ case GPG_ERR_NO_PUBKEY:
+ fprintf(f, "missing pubkey\n");
+ continue;
+ default:
+ fprintf(f, "unknown\n");
+ continue;
+ }
+ }
+}
+
+static inline retvalue verify_signature(const struct signature_requirement *requirements, const char *releasegpg, const char *releasename) {
+ gpgme_verify_result_t result;
+ int i;
+ const struct signature_requirement *req;
+
+ result = gpgme_op_verify_result(context);
+ if (result == NULL) {
+ fprintf(stderr,
+"Internal error communicating with libgpgme: no result record!\n\n");
+ return RET_ERROR_GPGME;
+ }
+
+ for (req = requirements ; req != NULL ; req = req->next) {
+ bool fulfilled = false;
+
+ /* check first for good signatures, and then for good enough
+ signatures, to not pester the user with warnings of one
+ of the alternate keys, if the last one is good enough */
+
+ for (i = 0 ; (size_t)i < req->num_keys ; i++) {
+
+ if (key_good(&req->keys[i], result->signatures)) {
+ fulfilled = true;
+ break;
+ }
+ }
+ for (i = 0 ; !fulfilled && (size_t)i < req->num_keys ; i++) {
+
+ if (key_good_enough(&req->keys[i], result->signatures,
+ releasegpg, releasename)) {
+ fulfilled = true;
+ break;
+ }
+ }
+ if (!fulfilled) {
+ fprintf(stderr,
+"ERROR: Condition '%s' not fulfilled for '%s'.\n",
+ req->condition, releasegpg);
+ print_signatures(stderr, result->signatures,
+ releasegpg);
+ return RET_ERROR_BADSIG;
+ }
+ if (verbose > 10) {
+ fprintf(stdout, "Condition '%s' fulfilled for '%s'.\n",
+ req->condition, releasegpg);
+ }
+ }
+ if (verbose > 20)
+ print_signatures(stdout, result->signatures, releasegpg);
+ return RET_OK;
+}
+
+retvalue signature_check(const struct signature_requirement *requirements, const char *releasegpg, const char *releasename, const char *releasedata, size_t releaselen) {
+ gpg_error_t err;
+ int gpgfd;
+ gpgme_data_t dh, dh_gpg;
+
+ assert (requirements != NULL);
+
+ if (FAILEDTOALLOC(releasedata) || FAILEDTOALLOC(releasegpg))
+ return RET_ERROR_OOM;
+
+ assert (context != NULL);
+
+ /* Read the file and its signature into memory: */
+ gpgfd = open(releasegpg, O_RDONLY|O_NOCTTY);
+ if (gpgfd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error opening '%s': %s\n",
+ releasegpg, strerror(e));
+ return RET_ERRNO(e);
+ }
+ err = gpgme_data_new_from_fd(&dh_gpg, gpgfd);
+ if (err != 0) {
+ (void)close(gpgfd);
+ fprintf(stderr, "Error reading '%s':\n", releasegpg);
+ return gpgerror(err);
+ }
+ err = gpgme_data_new_from_mem(&dh, releasedata, releaselen, 0);
+ if (err != 0) {
+ gpgme_data_release(dh_gpg);
+ return gpgerror(err);
+ }
+
+ /* Verify the signature */
+
+ err = gpgme_op_verify(context, dh_gpg, dh, NULL);
+ gpgme_data_release(dh_gpg);
+ gpgme_data_release(dh);
+ close(gpgfd);
+ if (err != 0) {
+ fprintf(stderr, "Error verifying '%s':\n", releasegpg);
+ return gpgerror(err);
+ }
+
+ return verify_signature(requirements, releasegpg, releasename);
+}
+
+retvalue signature_check_inline(const struct signature_requirement *requirements, const char *filename, char **chunk_p) {
+ gpg_error_t err;
+ gpgme_data_t dh, dh_gpg;
+ int fd;
+
+ fd = open(filename, O_RDONLY|O_NOCTTY);
+ if (fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error opening '%s': %s\n",
+ filename, strerror(e));
+ return RET_ERRNO(e);
+ }
+ err = gpgme_data_new_from_fd(&dh_gpg, fd);
+ if (err != 0) {
+ (void)close(fd);
+ return gpgerror(err);
+ }
+
+ err = gpgme_data_new(&dh);
+ if (err != 0) {
+ (void)close(fd);
+ gpgme_data_release(dh_gpg);
+ return gpgerror(err);
+ }
+ err = gpgme_op_verify(context, dh_gpg, NULL, dh);
+ (void)close(fd);
+ if (gpg_err_code(err) == GPG_ERR_NO_DATA) {
+ char *chunk; const char *n;
+ size_t len;
+ retvalue r;
+
+ gpgme_data_release(dh);
+ gpgme_data_release(dh_gpg);
+
+ r = readtextfile(filename, filename, &chunk, &len);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ assert (chunk[len] == '\0');
+ len = chunk_extract(chunk, chunk, len, false, &n);
+ if (chunk[0] == '-' || *n != '\0') {
+ fprintf(stderr,
+"Cannot parse '%s': found no signature but does not looks safe to be assumed unsigned, either.\n",
+ filename);
+ free(chunk);
+ return RET_ERROR;
+ }
+ if (requirements != NULL) {
+ free(chunk);
+ return RET_ERROR_BADSIG;
+ }
+ fprintf(stderr,
+"WARNING: No signature found in %s, assuming it is unsigned!\n",
+ filename);
+ assert (chunk[len] == '\0');
+ *chunk_p = realloc(chunk, len+1);
+ if (FAILEDTOALLOC(*chunk_p))
+ *chunk_p = chunk;
+ return RET_OK;
+ } else {
+ char *plain_data, *chunk;
+ const char *n;
+ size_t plain_len, len;
+ retvalue r;
+
+ if (err != 0) {
+ gpgme_data_release(dh_gpg);
+ gpgme_data_release(dh);
+ return gpgerror(err);
+ }
+ gpgme_data_release(dh_gpg);
+ plain_data = gpgme_data_release_and_get_mem(dh, &plain_len);
+ if (plain_data == NULL) {
+ fprintf(stderr,
+"Error: libgpgme failed to extract the plain data out of\n"
+"'%s'.\n"
+"While it did so in a way indicating running out of memory, experience says\n"
+"this also happens when gpg returns a error code it does not understand.\n"
+"To check this please try running gpg --verify '%s' manually.\n"
+"Continuing extracting it ignoring all signatures...",
+ filename, filename);
+ return RET_ERROR;
+ }
+ chunk = malloc(plain_len+1);
+ if (FAILEDTOALLOC(chunk))
+ return RET_ERROR_OOM;
+ len = chunk_extract(chunk, plain_data, plain_len, false, &n);
+#ifdef HAVE_GPGPME_FREE
+ gpgme_free(plain_data);
+#else
+ free(plain_data);
+#endif
+ assert (len <= plain_len);
+ if (plain_len != (size_t)(n - plain_data)) {
+ fprintf(stderr,
+"Cannot parse '%s': extraced signed data looks malformed.\n",
+ filename);
+ r = RET_ERROR;
+ } else
+ r = verify_signature(requirements, filename, NULL);
+ if (RET_IS_OK(r)) {
+ *chunk_p = realloc(chunk, len+1);
+ if (FAILEDTOALLOC(*chunk_p))
+ *chunk_p = chunk;
+ } else
+ free(chunk);
+ return r;
+ }
+}
+#else /* HAVE_LIBGPGME */
+
+retvalue signature_check(const struct signature_requirement *requirements, const char *releasegpg, const char *releasename, const char *releasedata, size_t releaselen) {
+ assert (requirements != NULL);
+
+ if (FAILEDTOALLOC(releasedata) || FAILEDTOALLOC(releasegpg))
+ return RET_ERROR_OOM;
+ fprintf(stderr,
+"ERROR: Cannot check signatures as this reprepro binary is compiled with support\n"
+"for libgpgme.\n"); // TODO: "Only running external programs is supported.\n"
+ return RET_ERROR_GPGME;
+}
+
+retvalue signature_check_inline(const struct signature_requirement *requirements, const char *filename, char **chunk_p) {
+ retvalue r;
+ char *chunk; size_t len;
+ const char *n;
+
+ if (requirements != NULL) {
+ fprintf(stderr,
+"ERROR: Cannot check signatures as this reprepro binary is compiled with support\n"
+"for libgpgme.\n");
+ return RET_ERROR_GPGME;
+ }
+ r = readtextfile(filename, filename, &chunk, &len);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (chunk[len] == '\0');
+
+ len = chunk_extract(chunk, chunk, len, false, &n);
+ if (len == 0) {
+ fprintf(stderr, "Could not find any data within '%s'!\n",
+ filename);
+ free(chunk);
+ return RET_ERROR;
+ }
+ if (chunk[0] == '-') {
+ const char *endmarker;
+
+ if (len < 10 || memcmp(chunk, "-----BEGIN", 10) != 0) {
+ fprintf(stderr,
+"Strange content of '%s': First non-space character is '-',\n"
+"but it does not begin with '-----BEGIN'.\n", filename);
+ free(chunk);
+ return RET_ERROR;
+ }
+ len = chunk_extract(chunk, n, strlen(n), false, &n);
+
+ endmarker = strstr(chunk, "\n-----");
+ if (endmarker != NULL) {
+ endmarker++;
+ assert ((size_t)(endmarker-chunk) < len);
+ len = endmarker-chunk;
+ chunk[len] = '\0';
+ } else if (*n == '\0') {
+ fprintf(stderr,
+"ERROR: Could not find end marker of signed data within '%s'.\n"
+"Cannot determine what is data and what is not!\n",
+ filename);
+ free(chunk);
+ return RET_ERROR;
+ } else if (strncmp(n, "-----", 5) != 0) {
+ fprintf(stderr,
+"ERROR: Spurious empty line within '%s'.\n"
+"Cannot determine what is data and what is not!\n",
+ filename);
+ free(chunk);
+ return RET_ERROR;
+ }
+ } else {
+ if (*n != '\0') {
+ fprintf(stderr,
+"Cannot parse '%s': found no signature but does not looks safe to be assumed unsigned, either.\n",
+ filename);
+ return RET_ERROR;
+ }
+ fprintf(stderr,
+"WARNING: No signature found in %s, assuming it is unsigned!\n",
+ filename);
+ }
+ assert (chunk[len] == '\0');
+ *chunk_p = realloc(chunk, len+1);
+ if (FAILEDTOALLOC(*chunk_p))
+ *chunk_p = chunk;
+ return RET_OK;
+}
+
+void signature_requirements_free(/*@only@*/struct signature_requirement *p) {
+ free(p);
+}
+
+retvalue signature_requirement_add(UNUSED(struct signature_requirement **x), const char *condition) {
+ if (condition == NULL || strcmp(condition, "blindtrust") == 0)
+ return RET_NOTHING;
+
+ fprintf(stderr,
+"ERROR: Cannot check signatures as this reprepro binary is compiled with support\n"
+"for libgpgme.\n"); // TODO: "Only running external programs is supported.\n"
+ return RET_ERROR_GPGME;
+}
+
+void free_known_keys(void) {
+}
+
+#endif /* HAVE_LIBGPGME */
diff --git a/signature_p.h b/signature_p.h
new file mode 100644
index 0000000..4a2c78b
--- /dev/null
+++ b/signature_p.h
@@ -0,0 +1,18 @@
+#ifndef REPREPRO_SIGNATURE_P_H
+#define REPREPRO_SIGNATURE_P_H
+
+#ifdef HAVE_LIBGPGME
+#include <gpg-error.h>
+#include <gpgme.h>
+
+extern gpgme_ctx_t context;
+#endif
+
+#include "globals.h"
+#include "error.h"
+#include "signature.h"
+
+#ifdef HAVE_LIBGPGME
+retvalue gpgerror(gpg_error_t err);
+#endif
+#endif
diff --git a/signedfile.c b/signedfile.c
new file mode 100644
index 0000000..2dfa348
--- /dev/null
+++ b/signedfile.c
@@ -0,0 +1,502 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2009,2010,2012 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <time.h>
+#include <string.h>
+#include <fcntl.h>
+
+#include "signature_p.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "dirs.h"
+#include "names.h"
+#include "chunks.h"
+#include "release.h"
+#include "filecntl.h"
+#include "hooks.h"
+
+#ifdef HAVE_LIBGPGME
+static retvalue check_signature_created(bool clearsign, bool willcleanup, /*@null@*/const struct strlist *options, const char *filename, const char *signaturename) {
+ gpgme_sign_result_t signresult;
+ char *uidoptions;
+ int i;
+
+ signresult = gpgme_op_sign_result(context);
+ if (signresult != NULL && signresult->signatures != NULL)
+ return RET_OK;
+ /* in an ideal world, this point is never reached.
+ * Sadly it is and people are obviously confused by it,
+ * so do some work to give helpful messages. */
+ if (options != NULL) {
+ assert (options->count > 0);
+ uidoptions = mprintf(" -u '%s'", options->values[0]);
+ for (i = 1 ;
+ uidoptions != NULL && i < options->count ;
+ i++) {
+ char *u = mprintf("%s -u '%s'", uidoptions,
+ options->values[i]);
+ free(uidoptions);
+ uidoptions = u;
+ }
+ if (FAILEDTOALLOC(uidoptions))
+ return RET_ERROR_OOM;
+ } else
+ uidoptions = NULL;
+
+ if (signresult == NULL)
+ fputs(
+"Error: gpgme returned NULL unexpectedly for gpgme_op_sign_result\n", stderr);
+ else
+ fputs("Error: gpgme created no signature!\n", stderr);
+ fputs(
+"This most likely means gpg is confused or produces some error libgpgme is\n"
+"not able to understand. Try running\n", stderr);
+ if (willcleanup)
+ fprintf(stderr,
+"gpg %s --output 'some-other-file' %s 'some-file'\n",
+ (uidoptions==NULL)?"":uidoptions,
+ clearsign?"--clearsign":"--detach-sign");
+ else
+ fprintf(stderr,
+"gpg %s --output '%s' %s '%s'\n",
+ (uidoptions==NULL)?"":uidoptions,
+ signaturename,
+ clearsign?"--clearsign":"--detach-sign",
+ filename);
+ fputs(
+"for hints what this error might have been. (Sometimes just running\n"
+"it once manually seems also to help...)\n", stderr);
+ return RET_ERROR_GPGME;
+}
+
+static retvalue signature_to_file(gpgme_data_t dh_gpg, const char *signaturename) {
+ char *signature_data;
+ const char *p;
+ size_t signature_len;
+ ssize_t written;
+ int fd, e, ret;
+
+ signature_data = gpgme_data_release_and_get_mem(dh_gpg, &signature_len);
+ if (FAILEDTOALLOC(signature_data))
+ return RET_ERROR_OOM;
+ fd = open(signaturename, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY|O_NOFOLLOW, 0666);
+ if (fd < 0) {
+ free(signature_data);
+ return RET_ERRNO(errno);
+ }
+ p = signature_data;
+ while (signature_len > 0) {
+ written = write(fd, p, signature_len);
+ if (written < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d writing to %s: %s\n",
+ e, signaturename,
+ strerror(e));
+ free(signature_data);
+ (void)close(fd);
+ return RET_ERRNO(e);
+ }
+ signature_len -= written;
+ p += written;
+ }
+#ifdef HAVE_GPGPME_FREE
+ gpgme_free(signature_data);
+#else
+ free(signature_data);
+#endif
+ ret = close(fd);
+ if (ret < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d writing to %s: %s\n",
+ e, signaturename,
+ strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (verbose > 1) {
+ printf("Successfully created '%s'\n", signaturename);
+ }
+ return RET_OK;
+}
+
+static retvalue create_signature(bool clearsign, gpgme_data_t dh, /*@null@*/const struct strlist *options, const char *filename, const char *signaturename, bool willcleanup) {
+ gpg_error_t err;
+ gpgme_data_t dh_gpg;
+ retvalue r;
+
+ err = gpgme_data_new(&dh_gpg);
+ if (err != 0)
+ return gpgerror(err);
+ err = gpgme_op_sign(context, dh, dh_gpg,
+ clearsign?GPGME_SIG_MODE_CLEAR:GPGME_SIG_MODE_DETACH);
+ if (err != 0)
+ return gpgerror(err);
+ r = check_signature_created(clearsign, willcleanup,
+ options, filename, signaturename);
+ if (RET_WAS_ERROR(r)) {
+ gpgme_data_release(dh_gpg);
+ return r;
+ }
+ /* releases dh_gpg: */
+ return signature_to_file(dh_gpg, signaturename);
+}
+
+static retvalue signature_sign(const struct strlist *options, const char *filename, void *data, size_t datalen, const char *signaturename, const char *clearsignfilename, bool willcleanup) {
+ retvalue r;
+ int i;
+ gpg_error_t err;
+ gpgme_data_t dh;
+
+ assert (options != NULL && options->count > 0);
+ assert (options->values[0][0] != '!');
+
+ r = signature_init(false);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ gpgme_signers_clear(context);
+ if (options->count == 1 &&
+ (strcasecmp(options->values[0], "yes") == 0 ||
+ strcasecmp(options->values[0], "default") == 0)) {
+ /* use default options */
+ options = NULL;
+ } else for (i = 0 ; i < options->count ; i++) {
+ const char *option = options->values[i];
+ gpgme_key_t key;
+
+ err = gpgme_op_keylist_start(context, option, 1);
+ if (err != 0)
+ return gpgerror(err);
+ err = gpgme_op_keylist_next(context, &key);
+ if (gpg_err_code(err) == GPG_ERR_EOF) {
+ fprintf(stderr,
+"Could not find any key matching '%s'!\n", option);
+ return RET_ERROR;
+ }
+ err = gpgme_signers_add(context, key);
+ gpgme_key_unref(key);
+ if (err != 0) {
+ gpgme_op_keylist_end(context);
+ return gpgerror(err);
+ }
+ gpgme_op_keylist_end(context);
+ }
+
+ err = gpgme_data_new_from_mem(&dh, data, datalen, 0);
+ if (err != 0) {
+ return gpgerror(err);
+ }
+
+ r = create_signature(false, dh, options,
+ filename, signaturename, willcleanup);
+ if (RET_WAS_ERROR(r)) {
+ gpgme_data_release(dh);
+ return r;
+ }
+ i = gpgme_data_seek(dh, 0, SEEK_SET);
+ if (i < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d rewinding gpgme's data buffer to start: %s\n",
+ e, strerror(e));
+ gpgme_data_release(dh);
+ return RET_ERRNO(e);
+ }
+ r = create_signature(true, dh, options,
+ filename, clearsignfilename, willcleanup);
+ gpgme_data_release(dh);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+#endif /* HAVE_LIBGPGME */
+
+static retvalue signature_with_extern(const struct strlist *options, const char *filename, const char *clearsignfilename, char **detachedfilename_p) {
+ const char *clearsign;
+ const char *detached;
+ struct stat s;
+ int status;
+ pid_t child, found;
+ const char *command;
+
+ assert (options->count == 2);
+ command = options->values[1];
+ clearsign = (clearsignfilename == NULL)?"":clearsignfilename;
+ detached = (*detachedfilename_p == NULL)?"":*detachedfilename_p;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ if (lstat(filename, &s) != 0 || !S_ISREG(s.st_mode)) {
+ fprintf(stderr, "Internal error: lost unsigned file '%s'?!\n",
+ filename);
+ return RET_ERROR;
+ }
+
+ child = fork();
+ if (child == 0) {
+ /* Try to close all open fd but 0,1,2 */
+ closefrom(3);
+ sethookenvironment(NULL, NULL, NULL, NULL);
+ (void)execl(command, command, filename,
+ clearsign, detached, ENDOFARGUMENTS);
+ fprintf(stderr, "Error executing '%s' '%s' '%s' '%s': %s\n",
+ command, filename, clearsign, detached,
+ strerror(errno));
+ _exit(255);
+ }
+ if (child < 0) {
+ int e = errno;
+ fprintf(stderr, "Error forking: %d=%s!\n", e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ errno = 0;
+ while ((found = waitpid(child, &status, 0)) < 0) {
+ int e = errno;
+ if (e != EINTR) {
+ fprintf(stderr,
+"Error %d waiting for signing-command child %ld: %s!\n",
+ e, (long)child, strerror(e));
+ return RET_ERRNO(e);
+ }
+ }
+ if (found != child) {
+ fprintf(stderr,
+"Confusing return value %ld from waitpid(%ld, ..., 0)", (long)found, (long)child);
+ return RET_ERROR;
+ }
+ if (!WIFEXITED(status)) {
+ fprintf(stderr,
+"Error: Signing-hook '%s' called with arguments '%s' '%s' '%s' terminated abnormally!\n",
+ command, filename, clearsign, detached);
+ return RET_ERROR;
+ }
+ if (WEXITSTATUS(status) != 0) {
+ fprintf(stderr,
+"Error: Signing-hook '%s' called with arguments '%s' '%s' '%s' returned with exit code %d!\n",
+ command, filename, clearsign, detached,
+ (int)(WEXITSTATUS(status)));
+ return RET_ERROR;
+ }
+ if (clearsignfilename != NULL) {
+ if (lstat(clearsign, &s) != 0 || !S_ISREG(s.st_mode)) {
+ fprintf(stderr,
+"Error: Script '%s' did not generate '%s'!\n",
+ command, clearsign);
+ return RET_ERROR;
+ } else if (s.st_size == 0) {
+ fprintf(stderr,
+"Error: Script '%s' created an empty '%s' file!\n",
+ command, clearsign);
+ return RET_ERROR;
+ }
+ }
+ if (*detachedfilename_p != NULL) {
+ if (lstat(detached, &s) != 0 || !S_ISREG(s.st_mode)) {
+ /* no detached signature, no an error if there
+ * was a clearsigned file:*/
+ if (clearsignfilename == NULL) {
+ fprintf(stderr,
+"Error: Script '%s' did not generate '%s'!\n",
+ command, detached);
+ return RET_ERROR;
+ } else {
+ if (verbose > 1)
+ fprintf(stderr,
+"Ignoring legacy detached signature '%s' not generated by '%s'\n",
+ detached, command);
+ detached = NULL;
+ free(*detachedfilename_p);
+ *detachedfilename_p = NULL;
+ }
+ } else if (s.st_size == 0) {
+ fprintf(stderr,
+"Error: Script '%s' created an empty '%s' file!\n",
+ command, detached);
+ return RET_ERROR;
+ }
+ }
+ return RET_OK;
+}
+
+struct signedfile {
+ retvalue result;
+#define DATABUFFERUNITS (128ul * 1024ul)
+ size_t bufferlen, buffersize;
+ char *buffer;
+};
+
+retvalue signature_startsignedfile(struct signedfile **out) {
+ struct signedfile *n;
+
+ n = zNEW(struct signedfile);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->bufferlen = 0;
+ n->buffersize = DATABUFFERUNITS;
+ n->buffer = malloc(n->buffersize);
+ if (FAILEDTOALLOC(n->buffer)) {
+ free(n);
+ return RET_ERROR_OOM;
+ }
+ *out = n;
+ return RET_OK;
+}
+
+void signedfile_free(struct signedfile *f) {
+ if (f == NULL)
+ return;
+ free(f->buffer);
+ free(f);
+ return;
+}
+
+
+/* store data into buffer */
+void signedfile_write(struct signedfile *f, const void *data, size_t len) {
+
+ /* no need to try anything if there already was an error */
+ if (RET_WAS_ERROR(f->result))
+ return;
+
+ if (len > f->buffersize - f->bufferlen) {
+ size_t blocks = (len + f->bufferlen)/DATABUFFERUNITS;
+ size_t newsize = (blocks + 1) * DATABUFFERUNITS;
+ char *newbuffer;
+
+ /* realloc is wasteful, but should not happen too often */
+ newbuffer = realloc(f->buffer, newsize);
+ if (FAILEDTOALLOC(newbuffer)) {
+ free(f->buffer);
+ f->buffer = NULL;
+ f->result = RET_ERROR_OOM;
+ return;
+ }
+ f->buffer = newbuffer;
+ f->buffersize = newsize;
+ assert (f->bufferlen < f->buffersize);
+ }
+ assert (len <= f->buffersize - f->bufferlen);
+ memcpy(f->buffer + f->bufferlen, data, len);
+ f->bufferlen += len;
+ assert (f->bufferlen <= f->buffersize);
+}
+
+retvalue signedfile_create(struct signedfile *f, const char *newplainfilename, char **newsignedfilename_p, char **newdetachedsignature_p, const struct strlist *options, bool willcleanup) {
+ size_t len, ofs;
+ int fd, ret;
+
+ if (RET_WAS_ERROR(f->result))
+ return f->result;
+
+ /* write content to file */
+
+ assert (newplainfilename != NULL);
+
+ (void)dirs_make_parent(newplainfilename);
+ (void)unlink(newplainfilename);
+
+ fd = open(newplainfilename, O_WRONLY|O_CREAT|O_TRUNC|O_NOCTTY, 0666);
+ if (fd < 0) {
+ int e = errno;
+ fprintf(stderr, "Error creating file '%s': %s\n",
+ newplainfilename,
+ strerror(e));
+ return RET_ERRNO(e);
+ }
+ ofs = 0;
+ len = f->bufferlen;
+ while (len > 0) {
+ ssize_t written;
+
+ written = write(fd, f->buffer + ofs, len);
+ if (written < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d writing to file '%s': %s\n",
+ e, newplainfilename,
+ strerror(e));
+ (void)close(fd);
+ return RET_ERRNO(e);
+ }
+ assert ((size_t)written <= len);
+ ofs += written;
+ len -= written;
+ }
+ ret = close(fd);
+ if (ret < 0) {
+ int e = errno;
+ fprintf(stderr, "Error %d writing to file '%s': %s\n",
+ e, newplainfilename,
+ strerror(e));
+ return RET_ERRNO(e);
+ }
+ /* now do the actual signing */
+ if (options != NULL && options->count > 0) {
+ retvalue r;
+ const char *newsigned = *newsignedfilename_p;
+ const char *newdetached = *newdetachedsignature_p;
+
+ /* make sure the new files do not already exist: */
+ if (unlink(newdetached) != 0 && errno != ENOENT) {
+ fprintf(stderr,
+"Could not remove '%s' to prepare replacement: %s\n",
+ newdetached, strerror(errno));
+ return RET_ERROR;
+ }
+ if (unlink(newsigned) != 0 && errno != ENOENT) {
+ fprintf(stderr,
+"Could not remove '%s' to prepare replacement: %s\n",
+ newsigned, strerror(errno));
+ return RET_ERROR;
+ }
+ /* if an hook is given, use that instead */
+ if (options->values[0][0] == '!')
+ r = signature_with_extern(options, newplainfilename,
+ newsigned, newdetachedsignature_p);
+ else
+#ifdef HAVE_LIBGPGME
+ r = signature_sign(options,
+ newplainfilename,
+ f->buffer, f->bufferlen,
+ newdetached, newsigned,
+ willcleanup);
+#else /* HAVE_LIBGPGME */
+ fputs(
+"ERROR: Cannot creature signatures as this reprepro binary is not compiled\n"
+"with support for libgpgme. (Only external signing using 'Signwith: !hook'\n"
+"is supported.\n", stderr);
+ return RET_ERROR_GPGME;
+#endif
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else {
+ /* no signatures requested */
+ free(*newsignedfilename_p);
+ *newsignedfilename_p = NULL;
+ free(*newdetachedsignature_p);
+ *newdetachedsignature_p = NULL;
+ }
+ return RET_OK;
+}
diff --git a/sizes.c b/sizes.c
new file mode 100644
index 0000000..b10ccab
--- /dev/null
+++ b/sizes.c
@@ -0,0 +1,256 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2011 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include "error.h"
+#include "strlist.h"
+#include "distribution.h"
+#include "database.h"
+#include "database_p.h"
+#include "files.h"
+#include "sizes.h"
+
+struct distribution_sizes {
+ struct distribution_sizes *next;
+ const char *codename;
+ char *v;
+ size_t codename_len;
+ struct {
+ unsigned long long all, onlyhere;
+ } this, withsnapshots;
+ bool seen, seensnapshot;
+};
+
+static void distribution_sizes_freelist(struct distribution_sizes *ds) {
+ while (ds != NULL) {
+ struct distribution_sizes *s = ds;
+ ds = ds->next;
+
+ free(s->v);
+ free(s);
+ }
+}
+
+static bool fromdist(struct distribution_sizes *dist, const char *data, size_t len, bool *snapshot_p) {
+ if (len < dist->codename_len + 1)
+ return false;
+ if (data[dist->codename_len] == '=')
+ *snapshot_p = true;
+ else if (data[dist->codename_len] == '|' ||
+ data[dist->codename_len] == ' ')
+ *snapshot_p = false;
+ else
+ return false;
+ return memcmp(data, dist->codename, dist->codename_len) == 0;
+}
+
+static retvalue count_sizes(struct cursor *cursor, bool specific, struct distribution_sizes *ds, unsigned long long *all_p, unsigned long long *onlyall_p) {
+ const char *key, *data;
+ size_t len;
+ char *last_file = NULL;
+ unsigned long long filesize = 0;
+ bool onlyone = true;
+ struct distribution_sizes *last_dist;
+ struct distribution_sizes *s;
+ bool snapshot;
+ unsigned long long all = 0, onlyall = 0;
+
+ while (cursor_nexttempdata(rdb_references, cursor,
+ &key, &data, &len)) {
+ if (last_file == NULL || strcmp(last_file, key) != 0) {
+ if (last_file != NULL) {
+ free(last_file);
+ for (s = ds ; s != NULL ; s = s->next) {
+ s->seen = false;
+ s->seensnapshot = false;
+ }
+ }
+ last_file = strdup(key);
+ if (FAILEDTOALLOC(last_file))
+ return RET_ERROR_OOM;
+ onlyone = true;
+ filesize = 0;
+ last_dist = NULL;
+ }
+ if (data[0] == 'u' && data[1] == '|') {
+ data += 2;
+ len -= 2;
+ } else if (data[0] == 's' && data[1] == '=') {
+ data += 2;
+ len -= 2;
+ }
+ if (last_dist != NULL &&
+ fromdist(last_dist, data, len, &snapshot)) {
+ /* same distribution again */
+ if (!snapshot && !last_dist->seen) {
+ last_dist->seen = true;
+ last_dist->this.all += filesize;
+ if (onlyone)
+ last_dist->this.onlyhere += filesize;
+ }
+ continue;
+ }
+ s = ds;
+ while (s != NULL && !fromdist(s, data, len, &snapshot))
+ s = s->next;
+ if (s == NULL) {
+ if (onlyone && last_dist != NULL) {
+ if (!last_dist->seen)
+ last_dist->this.onlyhere -= filesize;
+ last_dist->withsnapshots.onlyhere -= filesize;
+ }
+ if (last_dist != NULL)
+ onlyall -= filesize;
+ onlyone = false;
+ if (!specific) {
+ struct distribution_sizes **s_p = &ds->next;
+ const char *p;
+
+ p = data;
+ while (*p != '\0' && *p != ' ' && *p != '|'
+ && *p != '=')
+ p++;
+ if (*p == '\0')
+ continue;
+ while (*s_p != NULL)
+ s_p = &(*s_p)->next;
+ s = zNEW(struct distribution_sizes);
+ if (FAILEDTOALLOC(s)) {
+ free(last_file);
+ return RET_ERROR_OOM;
+ }
+ *s_p = s;
+ s->v = strndup(data, (p-data) + 1);
+ if (FAILEDTOALLOC(s)) {
+ free(last_file);
+ return RET_ERROR_OOM;
+ }
+ s->v[p-data] = '*';
+ s->codename = s->v;
+ s->codename_len = p-data;
+ snapshot = *p == '=';
+ } else
+ /* last_dist not changed on purpose */
+ continue;
+ }
+ /* found it to belong to distribution s */
+ if (s->seen) {
+ assert (last_dist != NULL);
+ assert (!onlyone);
+ continue;
+ }
+ if (s->seensnapshot && !snapshot) {
+ s->seen = true;
+ s->this.all += filesize;
+ assert (last_dist != NULL);
+ assert (!onlyone);
+ continue;
+ }
+ /* distribution seen for this file the first time */
+ if (last_dist != NULL) {
+ if (onlyone) {
+ last_dist->withsnapshots.onlyhere -= filesize;
+ if (last_dist->seen)
+ last_dist->this.onlyhere -= filesize;
+ onlyone = false;
+ }
+ assert (filesize != 0);
+ } else {
+ /* and this is the first time
+ * we are interested in the file */
+ filesize = files_getsize(key);
+ assert (filesize != 0);
+ if (onlyone)
+ onlyall += filesize;
+ all += filesize;
+ }
+ last_dist = s;
+ if (snapshot) {
+ s->seensnapshot = true;
+ } else {
+ s->seen = true;
+ last_dist->this.all += filesize;
+ if (onlyone)
+ last_dist->this.onlyhere += filesize;
+ }
+ last_dist->withsnapshots.all += filesize;
+ if (onlyone)
+ last_dist->withsnapshots.onlyhere += filesize;
+ }
+ free(last_file);
+ *all_p = all;
+ *onlyall_p = onlyall;
+ return RET_OK;
+}
+
+retvalue sizes_distributions(struct distribution *alldistributions, bool specific) {
+ struct cursor *cursor;
+ retvalue result, r;
+ struct distribution_sizes *ds = NULL, **lds = &ds, *s;
+ struct distribution *d;
+ unsigned long long all = 0, onlyall = 0;
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ s = zNEW(struct distribution_sizes);
+ if (FAILEDTOALLOC(s)) {
+ distribution_sizes_freelist(ds);
+ return RET_ERROR_OOM;
+ }
+ s->codename = d->codename;
+ s->codename_len = strlen(d->codename);
+ *lds = s;
+ lds = &s->next;
+ }
+ if (ds == NULL)
+ return RET_NOTHING;
+ r = table_newglobalcursor(rdb_references, true, &cursor);
+ if (!RET_IS_OK(r)) {
+ distribution_sizes_freelist(ds);
+ return r;
+ }
+ result = count_sizes(cursor, specific, ds, &all, &onlyall);
+ r = cursor_close(rdb_references, cursor);
+ RET_ENDUPDATE(result, r);
+ if (RET_IS_OK(result)) {
+ printf("%-15s %13s %13s %13s %13s\n",
+ "Codename", "Size", "Only", "Size(+s)",
+ "Only(+s)");
+ for (s = ds ; s != NULL ; s = s->next) {
+ printf("%-15s %13llu %13llu %13llu %13llu\n",
+ s->codename,
+ s->this.all,
+ s->this.onlyhere,
+ s->withsnapshots.all,
+ s->withsnapshots.onlyhere);
+ }
+ if (specific && ds->next != NULL)
+ printf("%-15s %13s %13s %13llu %13llu\n",
+ "<all selected> ",
+ "", "",
+ all, onlyall);
+ }
+ distribution_sizes_freelist(ds);
+ return result;
+}
diff --git a/sizes.h b/sizes.h
new file mode 100644
index 0000000..f982f7a
--- /dev/null
+++ b/sizes.h
@@ -0,0 +1,6 @@
+#ifndef REPREPRO_SIZES_H
+#define REPREPRO_SIZES_H
+
+retvalue sizes_distributions(struct distribution * /*all*/, bool /* specific */);
+
+#endif
diff --git a/sourcecheck.c b/sourcecheck.c
new file mode 100644
index 0000000..8182639
--- /dev/null
+++ b/sourcecheck.c
@@ -0,0 +1,482 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2010,2011,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "error.h"
+#include "distribution.h"
+#include "trackingt.h"
+#include "package.h"
+#include "sourcecheck.h"
+
+/* This is / will be the implementation of the
+ * unusedsources
+ * withoutsource
+ * reportcruft
+ * removecruft (to be implemented)
+ * commands.
+ *
+ * Currently those only work with tracking enabled, but
+ * are in this file as the implementation without tracking
+ * will need similar infrastructure */
+
+
+/* TODO: some tree might be more efficient, check how bad the comparisons are here */
+struct info_source {
+ struct info_source *next;
+ char *name;
+ struct info_source_version {
+ struct info_source_version *next;
+ char *version;
+ bool used;
+ } version;
+};
+
+static void free_source_info(struct info_source *s) {
+ while (s != NULL) {
+ struct info_source *h = s;
+ s = s->next;
+
+ while (h->version.next != NULL) {
+ struct info_source_version *v = h->version.next;
+ h->version.next = v->next;
+ free(v->version);
+ free(v);
+ }
+ free(h->version.version);
+ free(h->name);
+ free(h);
+ }
+}
+
+static retvalue collect_source_versions(struct distribution *d, struct info_source **out) {
+ struct info_source *root = NULL, *last = NULL;
+ struct target *t;
+ struct package_cursor cursor;
+ retvalue result = RET_NOTHING, r;
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (t->architecture != architecture_source)
+ continue;
+ r = package_openiterator(t, true, true, &cursor);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ while (package_next(&cursor)) {
+ struct info_source **into = NULL;
+ struct info_source_version *v;
+
+ r = package_getversion(&cursor.current);
+ if (!RET_IS_OK(r)) {
+ RET_UPDATE(result, r);
+ continue;
+ }
+ if (last != NULL) {
+ int c;
+ c = strcmp(cursor.current.name, last->name);
+ if (c < 0) {
+ /* start at the beginning */
+ last = NULL;
+ } else while (c > 0) {
+ into = &last->next;
+ if (last->next == NULL)
+ break;
+ last = last->next;
+ c = strcmp(cursor.current.name, last->name);
+ if (c == 0) {
+ into = NULL;
+ break;
+ }
+ }
+ }
+ /* if into != NULL, place there,
+ * if last != NULL, already found */
+ if (last == NULL) {
+ into = &root;
+ while ((last = *into) != NULL) {
+ int c;
+ c = strcmp(cursor.current.name, last->name);
+ if (c == 0) {
+ into = NULL;
+ break;
+ }
+ if (c < 0)
+ break;
+ into = &last->next;
+ }
+ }
+ if (into != NULL) {
+ last = zNEW(struct info_source);
+ if (FAILEDTOALLOC(last)) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ last->name = strdup(cursor.current.name);
+ if (FAILEDTOALLOC(last->name)) {
+ free(last);
+ result = RET_ERROR_OOM;
+ break;
+ }
+ last->version.version = package_dupversion(
+ &cursor.current);
+ if (FAILEDTOALLOC(last->version.version)) {
+ result = RET_ERROR_OOM;
+ free(last->name);
+ free(last);
+ break;
+ }
+ last->next = *into;
+ *into = last;
+ RET_UPDATE(result, RET_OK);
+ continue;
+ }
+ assert (last != NULL);
+ assert (strcmp(cursor.current.name, last->name)==0);
+
+ v = &last->version;
+ while (strcmp(v->version, cursor.current.version) != 0) {
+ if (v->next == NULL) {
+ v->next = zNEW(struct info_source_version);
+ if (FAILEDTOALLOC(v->next)) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ v = v->next;
+ v->version = package_dupversion(
+ &cursor.current);
+ if (FAILEDTOALLOC(v->version)) {
+ result = RET_ERROR_OOM;
+ break;
+ }
+ RET_UPDATE(result, RET_OK);
+ break;
+ }
+ v = v->next;
+ }
+ }
+ r = package_closeiterator(&cursor);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ }
+ if (RET_IS_OK(result))
+ *out = root;
+ else {
+ assert (result != RET_NOTHING || root == NULL);
+ free_source_info(root);
+ }
+ return result;
+}
+
+static retvalue process_binaries(struct distribution *d, struct info_source *sources, retvalue (*action)(struct package *, void *), void *privdata) {
+ struct target *t;
+ struct package_cursor cursor;
+ retvalue result = RET_NOTHING, r;
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (t->architecture == architecture_source)
+ continue;
+ r = package_openiterator(t, true, true, &cursor);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ while (package_next(&cursor)) {
+ struct info_source *s;
+ struct info_source_version *v;
+
+ r = package_getsource(&cursor.current);
+ if (!RET_IS_OK(r)) {
+ RET_UPDATE(result, r);
+ continue;
+ }
+ const char *source = cursor.current.source;
+ const char *version = cursor.current.sourceversion;
+
+ s = sources;
+ while (s != NULL && strcmp(s->name, source) < 0) {
+ s = s->next;
+ }
+ if (s != NULL && strcmp(source, s->name) == 0) {
+ v = &s->version;
+ while (v != NULL && strcmp(version, v->version) != 0)
+ v = v->next;
+ } else
+ v = NULL;
+ if (v != NULL) {
+ v->used = true;
+ } else if (action != NULL) {
+ r = action(&cursor.current, privdata);
+ RET_UPDATE(result, r);
+ }
+ }
+ r = package_closeiterator(&cursor);
+ if (RET_WAS_ERROR(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ }
+ return result;
+}
+
+static retvalue listunusedsources(struct distribution *d, const struct trackedpackage *pkg) {
+ bool hasbinary = false, hassource = false;
+ int i;
+
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ if (pkg->refcounts[i] == 0)
+ continue;
+ if (pkg->filetypes[i] == 's')
+ hassource = true;
+ if (pkg->filetypes[i] == 'b')
+ hasbinary = true;
+ if (pkg->filetypes[i] == 'a')
+ hasbinary = true;
+ }
+ if (hassource && ! hasbinary) {
+ printf("%s %s %s\n", d->codename, pkg->sourcename,
+ pkg->sourceversion);
+ return RET_OK;
+ }
+ return RET_NOTHING;
+}
+
+retvalue unusedsources(struct distribution *alldistributions) {
+ struct distribution *d;
+ retvalue result = RET_NOTHING, r;
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ if (!atomlist_in(&d->architectures, architecture_source))
+ continue;
+ if (d->tracking != dt_NONE) {
+ r = tracking_foreach_ro(d, listunusedsources);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return r;
+ continue;
+ }
+ struct info_source *sources = NULL;
+ const struct info_source *s;
+ const struct info_source_version *v;
+
+ r = collect_source_versions(d, &sources);
+ if (!RET_IS_OK(r))
+ continue;
+
+ r = process_binaries(d, sources, NULL, NULL);
+ RET_UPDATE(result, r);
+ for (s = sources ; s != NULL ; s = s->next) {
+ for (v = &s->version ; v != NULL ; v = v->next) {
+ if (v->used)
+ continue;
+ printf("%s %s %s\n", d->codename,
+ s->name, v->version);
+ }
+ }
+ free_source_info(sources);
+ }
+ return result;
+}
+
+static retvalue listsourcemissing(struct distribution *d, const struct trackedpackage *pkg) {
+ bool hasbinary = false, hassource = false;
+ int i;
+
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ if (pkg->refcounts[i] == 0)
+ continue;
+ if (pkg->filetypes[i] == 's')
+ hassource = true;
+ if (pkg->filetypes[i] == 'b')
+ hasbinary = true;
+ if (pkg->filetypes[i] == 'a')
+ hasbinary = true;
+ }
+ if (hasbinary && ! hassource) {
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ if (pkg->refcounts[i] == 0)
+ continue;
+ if (pkg->filetypes[i] != 'b' && pkg->filetypes[i] != 'a')
+ continue;
+ printf("%s %s %s %s\n", d->codename, pkg->sourcename,
+ pkg->sourceversion,
+ pkg->filekeys.values[i]);
+ }
+ return RET_OK;
+ }
+ return RET_NOTHING;
+}
+
+static retvalue listmissing(struct package *package, UNUSED(void*data)) {
+ retvalue r;
+ struct strlist list;
+
+ r = package->target->getfilekeys(package->control, &list);
+ if (!RET_IS_OK(r))
+ return r;
+ assert (list.count == 1);
+ printf("%s %s %s %s\n", package->target->distribution->codename,
+ package->source, package->sourceversion, list.values[0]);
+ strlist_done(&list);
+ return RET_OK;
+}
+
+retvalue sourcemissing(struct distribution *alldistributions) {
+ struct distribution *d;
+ retvalue result = RET_NOTHING, r;
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ if (!atomlist_in(&d->architectures, architecture_source)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"Not processing distribution '%s', as it has no source packages.\n",
+ d->codename);
+ continue;
+ }
+ if (d->tracking != dt_NONE) {
+ r = tracking_foreach_ro(d, listsourcemissing);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else {
+ struct info_source *sources = NULL;
+
+ r = collect_source_versions(d, &sources);
+ if (!RET_IS_OK(r))
+ continue;
+
+ r = process_binaries(d, sources, listmissing, NULL);
+ RET_UPDATE(result, r);
+ free_source_info(sources);
+ }
+
+ }
+ return result;
+}
+
+static retvalue listcruft(struct distribution *d, const struct trackedpackage *pkg) {
+ bool hasbinary = false, hassource = false;
+ int i;
+
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ if (pkg->refcounts[i] == 0)
+ continue;
+ if (pkg->filetypes[i] == 's')
+ hassource = true;
+ if (pkg->filetypes[i] == 'b')
+ hasbinary = true;
+ if (pkg->filetypes[i] == 'a')
+ hasbinary = true;
+ }
+ if (hasbinary && ! hassource) {
+ printf("binaries-without-source %s %s %s\n", d->codename,
+ pkg->sourcename, pkg->sourceversion);
+ return RET_OK;
+ } else if (hassource && ! hasbinary) {
+ printf("source-without-binaries %s %s %s\n", d->codename,
+ pkg->sourcename, pkg->sourceversion);
+ return RET_OK;
+ }
+ return RET_NOTHING;
+}
+
+static retvalue listmissingonce(struct package *package, void *data) {
+ struct info_source **already = data;
+ struct info_source *s;
+
+ for (s = *already ; s != NULL ; s = s->next) {
+ if (strcmp(s->name, package->source) != 0)
+ continue;
+ if (strcmp(s->version.version, package->sourceversion) != 0)
+ continue;
+ return RET_NOTHING;
+ }
+ s = zNEW(struct info_source);
+ if (FAILEDTOALLOC(s))
+ return RET_ERROR_OOM;
+ s->name = strdup(package->source);
+ s->version.version = strdup(package->sourceversion);
+ if (FAILEDTOALLOC(s->name) || FAILEDTOALLOC(s->version.version)) {
+ free(s->name);
+ free(s->version.version);
+ free(s);
+ return RET_ERROR_OOM;
+ }
+ s->next = *already;
+ *already = s;
+ printf("binaries-without-source %s %s %s\n",
+ package->target->distribution->codename,
+ package->source, package->sourceversion);
+ return RET_OK;
+}
+
+retvalue reportcruft(struct distribution *alldistributions) {
+ struct distribution *d;
+ retvalue result = RET_NOTHING, r;
+
+ for (d = alldistributions ; d != NULL ; d = d->next) {
+ if (!d->selected)
+ continue;
+ if (!atomlist_in(&d->architectures, architecture_source)) {
+ if (verbose >= 0)
+ fprintf(stderr,
+"Not processing distribution '%s', as it has no source packages.\n",
+ d->codename);
+ continue;
+ }
+ if (d->tracking != dt_NONE) {
+ r = tracking_foreach_ro(d, listcruft);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return r;
+ continue;
+ }
+ struct info_source *sources = NULL;
+ struct info_source *list = NULL;
+ const struct info_source *s;
+ const struct info_source_version *v;
+
+ r = collect_source_versions( d, &sources);
+ if (!RET_IS_OK(r))
+ continue;
+
+ r = process_binaries( d, sources,
+ listmissingonce, &list);
+ RET_UPDATE(result, r);
+ for (s = sources ; s != NULL ; s = s->next) {
+ for (v = &s->version ; v != NULL ; v = v->next) {
+ if (v->used)
+ continue;
+ printf("source-without-binaries %s %s %s\n",
+ d->codename, s->name, v->version);
+ }
+ }
+ free_source_info(list);
+ free_source_info(sources);
+ }
+ return result;
+}
diff --git a/sourcecheck.h b/sourcecheck.h
new file mode 100644
index 0000000..5bd2015
--- /dev/null
+++ b/sourcecheck.h
@@ -0,0 +1,8 @@
+#ifndef REPREPRO_SOURCECHECK_H
+#define REPREPRO_SOURCECHECK_H
+
+retvalue unusedsources(struct distribution *);
+retvalue sourcemissing(struct distribution *);
+retvalue reportcruft(struct distribution *);
+
+#endif
diff --git a/sourceextraction.c b/sourceextraction.c
new file mode 100644
index 0000000..d28458a
--- /dev/null
+++ b/sourceextraction.c
@@ -0,0 +1,716 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2008 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#ifdef HAVE_LIBARCHIVE
+#include <archive.h>
+#include <archive_entry.h>
+#if ARCHIVE_VERSION_NUMBER < 3000000
+#define archive_read_free archive_read_finish
+#endif
+#endif
+
+#include "error.h"
+#include "filecntl.h"
+#include "chunks.h"
+#include "uncompression.h"
+#include "sourceextraction.h"
+
+struct sourceextraction {
+ bool failed, completed;
+ int difffile, tarfile, debiantarfile;
+ enum compression diffcompression, tarcompression, debiancompression;
+ /*@null@*/ char **section_p, **priority_p;
+};
+
+struct sourceextraction *sourceextraction_init(char **section_p, char **priority_p) {
+ struct sourceextraction *n;
+
+ n = zNEW(struct sourceextraction);
+ if (FAILEDTOALLOC(n))
+ return n;
+ n->difffile = -1;
+ n->tarfile = -1;
+ n->debiantarfile = -1;
+ n->section_p = section_p;
+ n->priority_p = priority_p;
+ return n;
+}
+
+void sourceextraction_abort(struct sourceextraction *e) {
+ free(e);
+}
+
+/* with must be a string constant, no pointer! */
+#define endswith(name, len, with) (len >= sizeof(with) && memcmp(name+(len+1-sizeof(with)), with, sizeof(with)-1) == 0)
+
+/* register a file part of this source */
+void sourceextraction_setpart(struct sourceextraction *e, int i, const char *basefilename) {
+ size_t bl = strlen(basefilename);
+ enum compression c;
+
+ if (e->failed)
+ return;
+
+ c = compression_by_suffix(basefilename, &bl);
+
+ if (endswith(basefilename, bl, ".dsc"))
+ return;
+ else if (endswith(basefilename, bl, ".asc"))
+ return;
+ else if (endswith(basefilename, bl, ".diff")) {
+ e->difffile = i;
+ e->diffcompression = c;
+ return;
+ } else if (endswith(basefilename, bl, ".debian.tar")) {
+ e->debiantarfile = i;
+ e->debiancompression = c;
+ return;
+ } else if (endswith(basefilename, bl, ".tar")) {
+ e->tarfile = i;
+ e->tarcompression = c;
+ return;
+ } else {
+ // TODO: errormessage
+ e->failed = true;
+ }
+}
+
+/* return the next needed file */
+bool sourceextraction_needs(struct sourceextraction *e, int *ofs_p) {
+ if (e->failed || e->completed)
+ return false;
+ if (e->difffile >= 0) {
+ if (!uncompression_supported(e->diffcompression))
+ // TODO: errormessage
+ return false;
+ *ofs_p = e->difffile;
+ return true;
+ } else if (e->debiantarfile >= 0) {
+#ifdef HAVE_LIBARCHIVE
+ if (!uncompression_supported(e->debiancompression))
+ return false;
+ *ofs_p = e->debiantarfile;
+ return true;
+#else
+ return false;
+#endif
+ } else if (e->tarfile >= 0) {
+#ifdef HAVE_LIBARCHIVE
+ if (!uncompression_supported(e->tarcompression))
+ return false;
+ *ofs_p = e->tarfile;
+ return true;
+#else
+ return false;
+#endif
+ } else
+ return false;
+}
+
+static retvalue parsediff(struct compressedfile *f, /*@null@*/char **section_p, /*@null@*/char **priority_p, bool *found_p) {
+ size_t destlength, lines_in, lines_out;
+ const char *p, *s; char *garbage;
+#define BUFSIZE 4096
+ char buffer[BUFSIZE];
+ int bytes_read, used = 0, filled = 0;
+
+ auto inline bool u_getline(void);
+ inline bool u_getline(void) {
+ do {
+ if (filled - used > 0) {
+ char *n;
+
+ p = buffer + used;
+ n = memchr(p, '\n', filled - used);
+ if (n != NULL) {
+ used += 1 + (n - p);
+ *n = '\0';
+ while (--n >= p && *n == '\r')
+ *n = '\0';
+ return true;
+ }
+ } else { assert (filled == used);
+ filled = 0;
+ used = 0;
+ }
+ if (filled == BUFSIZE) {
+ if (used == 0)
+ /* overlong line */
+ return false;
+ memmove(buffer, buffer + used, filled - used);
+ filled -= used;
+ used = 0;
+ }
+ bytes_read = uncompress_read(f, buffer + filled,
+ BUFSIZE - filled);
+ if (bytes_read <= 0)
+ return false;
+ filled += bytes_read;
+ } while (true);
+ }
+ auto inline char u_overlinegetchar(void);
+ inline char u_overlinegetchar(void) {
+ const char *n;
+ char ch;
+
+ if (filled - used > 0) {
+ ch = buffer[used];
+ } else { assert (filled == used);
+ used = 0;
+ bytes_read = uncompress_read(f, buffer, BUFSIZE);
+ if (bytes_read <= 0) {
+ filled = 0;
+ return '\0';
+ }
+ filled = bytes_read;
+ ch = buffer[0];
+ }
+ if (ch == '\n')
+ return '\0';
+
+ /* over rest of the line */
+ n = memchr(buffer + used, '\n', filled - used);
+ if (n != NULL) {
+ used = 1 + (n - buffer);
+ return ch;
+ }
+ used = 0;
+ filled = 0;
+ /* need to read more to get to the end of the line */
+ do { /* these lines can be long */
+ bytes_read = uncompress_read(f, buffer, BUFSIZE);
+ if (bytes_read <= 0)
+ return false;
+ n = memchr(buffer, '\n', bytes_read);
+ } while (n == NULL);
+ used = 1 + (n - buffer);
+ filled = bytes_read;
+ return ch;
+ }
+
+ /* we are assuming the exact format dpkg-source generates here... */
+
+ if (!u_getline()) {
+ /* empty or strange file */
+ *found_p = false;
+ return RET_OK;
+ }
+ if (memcmp(p, "diff ", 4) == 0) {
+ /* one exception is allowing diff lines,
+ * as diff -ru adds them ... */
+ if (!u_getline()) {
+ /* strange file */
+ *found_p = false;
+ return RET_OK;
+ }
+ }
+ if (unlikely(memcmp(p, "--- ", 4) != 0))
+ return RET_NOTHING;
+ if (!u_getline())
+ /* so short a file? */
+ return RET_NOTHING;
+ if (unlikely(memcmp(p, "+++ ", 4) != 0))
+ return RET_NOTHING;
+ p += 4;
+ s = strchr(p, '/');
+ if (unlikely(s == NULL))
+ return RET_NOTHING;
+ s++;
+ /* another exception to allow diff output directly:
+ * +++ lines might have garbage after a tab... */
+ garbage = strchr(s, '\t');
+ if (garbage != NULL)
+ *garbage = '\0';
+ destlength = s - p;
+ /* ignore all files that are not x/debian/control */
+ while (strcmp(s, "debian/control") != 0) {
+ if (unlikely(interrupted()))
+ return RET_ERROR_INTERRUPTED;
+ if (!u_getline())
+ return RET_NOTHING;
+ while (memcmp(p, "@@ -", 4) == 0) {
+ if (unlikely(interrupted()))
+ return RET_ERROR_INTERRUPTED;
+ p += 4;
+ while (*p != ',' && *p != ' ') {
+ if (unlikely(*p == '\0'))
+ return RET_NOTHING;
+ p++;
+ }
+ if (*p == ' ')
+ lines_in = 1;
+ else {
+ p++;
+ lines_in = 0;
+ while (*p >= '0' && *p <= '9') {
+ lines_in = 10*lines_in + (*p-'0');
+ p++;
+ }
+ }
+ while (*p == ' ')
+ p++;
+ if (unlikely(*(p++) != '+'))
+ return RET_NOTHING;
+ while (*p >= '0' && *p <= '9')
+ p++;
+ if (*p == ',') {
+ p++;
+ lines_out = 0;
+ while (*p >= '0' && *p <= '9') {
+ lines_out = 10*lines_out + (*p-'0');
+ p++;
+ }
+ } else if (*p == ' ')
+ lines_out = 1;
+ else
+ return RET_NOTHING;
+ while (*p == ' ')
+ p++;
+ if (unlikely(*p != '@'))
+ return RET_NOTHING;
+
+ while (lines_in > 0 || lines_out > 0) {
+ char ch;
+
+ ch = u_overlinegetchar();
+ switch (ch) {
+ case '+':
+ if (unlikely(lines_out == 0))
+ return RET_NOTHING;
+ lines_out--;
+ break;
+ case ' ':
+ if (unlikely(lines_out == 0))
+ return RET_NOTHING;
+ lines_out--;
+ /* no break */
+ __attribute__ ((fallthrough));
+ case '-':
+ if (unlikely(lines_in == 0))
+ return RET_NOTHING;
+ lines_in--;
+ break;
+ default:
+ return RET_NOTHING;
+ }
+ }
+ if (!u_getline()) {
+ *found_p = false;
+ /* nothing found successfully */
+ return RET_OK;
+ }
+ }
+ if (memcmp(p, "\\ No newline at end of file", 27) == 0) {
+ if (!u_getline()) {
+ /* nothing found successfully */
+ *found_p = false;
+ return RET_OK;
+ }
+ }
+ if (memcmp(p, "diff ", 4) == 0) {
+ if (!u_getline()) {
+ /* strange file, but nothing explicitly wrong */
+ *found_p = false;
+ return RET_OK;
+ }
+ }
+ if (unlikely(memcmp(p, "--- ", 4) != 0))
+ return RET_NOTHING;
+ if (!u_getline())
+ return RET_NOTHING;
+ if (unlikely(memcmp(p, "+++ ", 4) != 0))
+ return RET_NOTHING;
+ p += 4;
+ s = strchr(p, '/');
+ if (unlikely(s == NULL))
+ return RET_NOTHING;
+ /* another exception to allow diff output directly:
+ * +++ lines might have garbage after a tab... */
+ garbage = strchr(s, '\t');
+ if (garbage != NULL)
+ *garbage = '\0';
+ /* if it does not always have the same directory, then
+ * we cannot be sure it has no debian/control, so we
+ * have to fail... */
+ s++;
+ if (s != p + destlength)
+ return RET_NOTHING;
+ }
+ /* found debian/control */
+ if (!u_getline())
+ return RET_NOTHING;
+ if (unlikely(memcmp(p, "@@ -", 4) != 0))
+ return RET_NOTHING;
+ p += 4;
+ p++;
+ while (*p != ',' && *p != ' ') {
+ if (unlikely(*p == '\0'))
+ return RET_NOTHING;
+ p++;
+ }
+ if (*p == ',') {
+ p++;
+ while (*p >= '0' && *p <= '9')
+ p++;
+ }
+ while (*p == ' ')
+ p++;
+ if (unlikely(*(p++) != '+'))
+ return RET_NOTHING;
+ if (*(p++) != '1' || *(p++) != ',') {
+ /* a diff not starting at the first line (or not being
+ * more than one line) is not yet supported */
+ return RET_NOTHING;
+ }
+ lines_out = 0;
+ while (*p >= '0' && *p <= '9') {
+ lines_out = 10*lines_out + (*p-'0');
+ p++;
+ }
+ while (*p == ' ')
+ p++;
+ if (unlikely(*p != '@'))
+ return RET_NOTHING;
+ while (lines_out > 0) {
+ if (unlikely(interrupted()))
+ return RET_ERROR_INTERRUPTED;
+ if (!u_getline())
+ return RET_NOTHING;
+
+ switch (*(p++)) {
+ case '-':
+ break;
+ default:
+ return RET_NOTHING;
+ case ' ':
+ case '+':
+ if (unlikely(lines_out == 0))
+ return RET_NOTHING;
+ lines_out--;
+ if (section_p != NULL &&
+ strncasecmp(p, "Section:", 8) == 0) {
+ p += 8;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ s = p;
+ while (*s != ' ' && *s != '\t' &&
+ *s != '\0' && *s != '\r')
+ s++;
+ if (s == p)
+ return RET_NOTHING;
+ *section_p = strndup(p, s-p);
+ if (FAILEDTOALLOC(*section_p))
+ return RET_ERROR_OOM;
+ while (*s == ' ' || *s == '\t' ||
+ *s == '\r')
+ s++;
+ if (*s != '\0')
+ return RET_NOTHING;
+ continue;
+ }
+ if (priority_p != NULL &&
+ strncasecmp(p, "Priority:", 9) == 0) {
+ p += 9;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ s = p;
+ while (*s != ' ' && *s != '\t' &&
+ *s != '\0' && *s != '\r')
+ s++;
+ if (s == p)
+ return RET_NOTHING;
+ *priority_p = strndup(p, s-p);
+ if (FAILEDTOALLOC(*priority_p))
+ return RET_ERROR_OOM;
+ while (*s == ' ' || *s == '\t' ||
+ *s == '\r')
+ s++;
+ if (*s != '\0')
+ return RET_NOTHING;
+ continue;
+ }
+ if (*p == '\0') {
+ /* end of control data, we are
+ * finished */
+ *found_p = true;
+ return RET_OK;
+ }
+ break;
+ }
+ }
+ /* cannot yet handle a .diff not containing the full control */
+ return RET_NOTHING;
+}
+
+#ifdef HAVE_LIBARCHIVE
+static retvalue read_source_control_file(struct sourceextraction *e, struct archive *tar, struct archive_entry *entry) {
+ // TODO: implement...
+ size_t size, len, controllen;
+ ssize_t got;
+ char *buffer;
+ const char *aftercontrol;
+
+ size = archive_entry_size(entry);
+ if (size <= 0)
+ return RET_NOTHING;
+ if (size > 10*1024*1024)
+ return RET_NOTHING;
+ buffer = malloc(size+2);
+ if (FAILEDTOALLOC(buffer))
+ return RET_ERROR_OOM;
+ len = 0;
+ while ((got = archive_read_data(tar, buffer+len, ((size_t)size+1)-len)) > 0
+ && !interrupted()) {
+ len += got;
+ if (len > size) {
+ free(buffer);
+ return RET_NOTHING;
+ }
+ }
+ if (unlikely(interrupted())) {
+ free(buffer);
+ return RET_ERROR_INTERRUPTED;
+ }
+ if (got < 0) {
+ free(buffer);
+ return RET_NOTHING;
+ }
+ buffer[len] = '\0';
+ // TODO: allow a saved .diff for this file applied here
+
+ controllen = chunk_extract(buffer, buffer, len, true, &aftercontrol);
+ if (controllen == 0) {
+ free(buffer);
+ return RET_NOTHING;
+ }
+
+ if (e->section_p != NULL)
+ (void)chunk_getvalue(buffer, "Section", e->section_p);
+ if (e->priority_p != NULL)
+ (void)chunk_getvalue(buffer, "Priority", e->priority_p);
+ free(buffer);
+ return RET_OK;
+}
+
+static int compressedfile_open(UNUSED(struct archive *a), UNUSED(void *v)) {
+ return ARCHIVE_OK;
+}
+
+static int compressedfile_close(UNUSED(struct archive *a), UNUSED(void *v)) {
+ return ARCHIVE_OK;
+}
+
+static ssize_t compressedfile_read(UNUSED(struct archive *a), void *d, const void **buffer_p) {
+ struct compressedfile *f = d;
+ // TODO malloc buffer instead
+ static char mybuffer[4096];
+
+ *buffer_p = mybuffer;
+ return uncompress_read(f, mybuffer, 4096);
+}
+
+static retvalue parse_tarfile(struct sourceextraction *e, const char *filename, enum compression c, /*@out@*/bool *found_p) {
+ struct archive *tar;
+ struct archive_entry *entry;
+ struct compressedfile *file;
+ int a;
+ retvalue r, r2;
+
+ /* While an .tar, especially an .orig.tar can be very ugly
+ * (they should be pristine upstream tars, so dpkg-source works around
+ * a lot of ugliness),
+ * we are looking for debian/control. This is unlikely to be in an ugly
+ * upstream tar verbatimly. */
+
+ if (!isregularfile(filename))
+ return RET_NOTHING;
+
+ tar = archive_read_new();
+ if (FAILEDTOALLOC(tar))
+ return RET_ERROR_OOM;
+ archive_read_support_format_tar(tar);
+ archive_read_support_format_gnutar(tar);
+
+ r = uncompress_open(&file, filename, c);
+ if (!RET_IS_OK(r)) {
+ archive_read_free(tar);
+ return r;
+ }
+
+ a = archive_read_open(tar, file, compressedfile_open,
+ compressedfile_read, compressedfile_close);
+ if (a != ARCHIVE_OK) {
+ int err = archive_errno(tar);
+ if (err != -EINVAL && err != 0)
+ fprintf(stderr,
+"Error %d trying to extract control information from %s:\n" "%s\n",
+ err, filename, archive_error_string(tar));
+ else
+ fprintf(stderr,
+"Error trying to extract control information from %s:\n" "%s\n",
+ filename, archive_error_string(tar));
+ archive_read_free(tar);
+ uncompress_abort(file);
+ return RET_ERROR;
+ }
+ while ((a=archive_read_next_header(tar, &entry)) == ARCHIVE_OK) {
+ const char *name = archive_entry_pathname(entry);
+ const char *s;
+ bool iscontrol;
+
+ if (name[0] == '.' && name[1] == '/')
+ name += 2;
+ s = strchr(name, '/');
+ if (s == NULL)
+ // TODO: is this already enough to give up totally?
+ iscontrol = false;
+ else
+ iscontrol = strcmp(s+1, "debian/control") == 0 ||
+ strcmp(name, "debian/control") == 0;
+
+ if (iscontrol) {
+ r = read_source_control_file(e, tar, entry);
+ archive_read_free(tar);
+ r2 = uncompress_error(file);
+ RET_UPDATE(r, r2);
+ uncompress_abort(file);
+ *found_p = true;
+ return r;
+ }
+ a = archive_read_data_skip(tar);
+ if (a != ARCHIVE_OK) {
+ int err = archive_errno(tar);
+ printf("Error %d skipping %s within %s: %s\n",
+ err, name, filename,
+ archive_error_string(tar));
+ archive_read_free(tar);
+ if (err == 0 || err == -EINVAL)
+ r = RET_ERROR;
+ else
+ r = RET_ERRNO(err);
+ r2 = uncompress_error(file);
+ RET_UPDATE(r, r2);
+ uncompress_abort(file);
+ return r;
+ }
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+ }
+ if (a != ARCHIVE_EOF) {
+ int err = archive_errno(tar);
+ fprintf(stderr, "Error %d reading %s: %s\n",
+ err, filename, archive_error_string(tar));
+ archive_read_free(tar);
+ if (err == 0 || err == -EINVAL)
+ r = RET_ERROR;
+ else
+ r = RET_ERRNO(err);
+ r2 = uncompress_error(file);
+ RET_UPDATE(r, r2);
+ uncompress_abort(file);
+ return r;
+ }
+ archive_read_free(tar);
+ *found_p = false;
+ return uncompress_close(file);
+}
+#endif
+
+/* full file name of requested files ready to analyse */
+retvalue sourceextraction_analyse(struct sourceextraction *e, const char *fullfilename) {
+ retvalue r;
+ bool found;
+
+#ifndef HAVE_LIBARCHIVE
+ assert (e->difffile >= 0);
+#endif
+ if (e->difffile >= 0) {
+ struct compressedfile *f;
+
+ assert (uncompression_supported(e->diffcompression));
+ e->difffile = -1;
+
+ r = uncompress_open(&f, fullfilename, e->diffcompression);
+ if (!RET_IS_OK(r)) {
+ e->failed = true;
+ /* being unable to read a file is no hard error... */
+ return RET_NOTHING;
+ }
+ r = parsediff(f, e->section_p, e->priority_p, &found);
+ if (RET_IS_OK(r)) {
+ if (!found)
+ r = uncompress_close(f);
+ else {
+ r = uncompress_error(f);
+ uncompress_abort(f);
+ }
+ } else {
+ uncompress_abort(f);
+ }
+ if (!RET_IS_OK(r))
+ e->failed = true;
+ else if (found)
+ /* do not look in the tar, we found debian/control */
+ e->completed = true;
+ return r;
+ }
+
+#ifdef HAVE_LIBARCHIVE
+ if (e->debiantarfile >= 0) {
+ e->debiantarfile = -1;
+ r = parse_tarfile(e, fullfilename, e->debiancompression,
+ &found);
+ if (!RET_IS_OK(r))
+ e->failed = true;
+ else if (found)
+ /* do not look in the tar, we found debian/control */
+ e->completed = true;
+ return r;
+ }
+#endif
+
+ /* if it's not the diff nor the .debian.tar, look into the .tar file: */
+ assert (e->tarfile >= 0);
+ e->tarfile = -1;
+
+#ifdef HAVE_LIBARCHIVE
+ r = parse_tarfile(e, fullfilename, e->tarcompression, &found);
+ if (!RET_IS_OK(r))
+ e->failed = true;
+ else if (found)
+ /* do not look in the tar, we found debian/control */
+ e->completed = true;
+ return r;
+#else
+ return RET_NOTHING;
+#endif
+}
+
+retvalue sourceextraction_finish(struct sourceextraction *e) {
+ if (e->completed) {
+ free(e);
+ return RET_OK;
+ }
+ free(e);
+ return RET_NOTHING;
+}
diff --git a/sourceextraction.h b/sourceextraction.h
new file mode 100644
index 0000000..e41bd35
--- /dev/null
+++ b/sourceextraction.h
@@ -0,0 +1,21 @@
+#ifndef REPREPRO_SOURCEEXTRACTION_H
+#define REPREPRO_SOURCEEXTRACTION_H
+
+struct sourceextraction;
+
+/*@NULL@*/struct sourceextraction *sourceextraction_init(/*@null@*/char **section_p, /*@null@*/char **priority_p);
+
+void sourceextraction_abort(/*@only@*/struct sourceextraction *);
+
+/* register a file part of this source */
+void sourceextraction_setpart(struct sourceextraction *, int , const char *);
+
+/* return the next needed file */
+bool sourceextraction_needs(struct sourceextraction *, /*@out@*/int *);
+
+/* full file name of requested files ready to analyse */
+retvalue sourceextraction_analyse(struct sourceextraction *, const char *);
+
+retvalue sourceextraction_finish(/*@only@*/struct sourceextraction *);
+
+#endif
diff --git a/sources.c b/sources.c
new file mode 100644
index 0000000..1e7efcb
--- /dev/null
+++ b/sources.c
@@ -0,0 +1,733 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008,2009,2010 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <assert.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <string.h>
+#include <strings.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "chunks.h"
+#include "sources.h"
+#include "names.h"
+#include "dirs.h"
+#include "dpkgversions.h"
+#include "override.h"
+#include "tracking.h"
+#include "signature.h"
+#include "package.h"
+
+/* split a "<md5> <size> <filename>" into md5sum and filename */
+static retvalue calc_parsefileline(const char *fileline, /*@out@*/char **filename) {
+ const char *p, *fn, *fnend;
+ char *filen;
+
+ assert (fileline != NULL);
+ if (*fileline == '\0')
+ return RET_NOTHING;
+
+ /* the md5sums begins after the (perhaps) heading spaces ... */
+ p = fileline;
+ while (*p != '\0' && (*p == ' ' || *p == '\t'))
+ p++;
+ if (*p == '\0')
+ return RET_NOTHING;
+ /* ... and ends with the following spaces. */
+ while (*p != '\0' && !(*p == ' ' || *p == '\t'))
+ p++;
+ if (*p == '\0') {
+ fprintf(stderr, "Expecting more data after md5sum!\n");
+ return RET_ERROR;
+ }
+ /* Then the size of the file is expected: */
+ while ((*p == ' ' || *p == '\t'))
+ p++;
+ while (*p !='\0' && !(*p == ' ' || *p == '\t'))
+ p++;
+ if (*p == '\0') {
+ fprintf(stderr, "Expecting more data after size!\n");
+ return RET_ERROR;
+ }
+ /* Then the filename */
+ fn = p;
+ while ((*fn == ' ' || *fn == '\t'))
+ fn++;
+ fnend = fn;
+ while (*fnend != '\0' && !(*fnend == ' ' || *fnend == '\t'))
+ fnend++;
+
+ filen = strndup(fn, fnend-fn);
+ if (FAILEDTOALLOC(filen))
+ return RET_ERROR_OOM;
+ *filename = filen;
+ return RET_OK;
+}
+
+static retvalue getBasenames(const struct strlist *filelines, /*@out@*/struct strlist *basenames) {
+ int i;
+ retvalue r;
+
+ assert (filelines != NULL && basenames != NULL);
+
+ r = strlist_init_n(filelines->count, basenames);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = RET_NOTHING;
+ for (i = 0 ; i < filelines->count ; i++) {
+ char *basefilename;
+ const char *fileline = filelines->values[i];
+
+ r = calc_parsefileline(fileline, &basefilename);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Malformed Files: line '%s'!\n",
+ fileline);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+
+ r = strlist_add(basenames, basefilename);
+ if (RET_WAS_ERROR(r)) {
+ break;
+ }
+ r = RET_OK;
+ }
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(basenames);
+ } else {
+ assert (filelines->count == basenames->count);
+ }
+ return r;
+}
+
+retvalue sources_getversion(const char *control, char **version) {
+ retvalue r;
+
+ r = chunk_getvalue(control, "Version", version);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n",
+ control);
+ return RET_ERROR;
+ }
+ return r;
+}
+
+retvalue sources_getarchitecture(UNUSED(const char *chunk), architecture_t *architecture_p) {
+ *architecture_p = architecture_source;
+ return RET_OK;
+}
+
+retvalue sources_getinstalldata(const struct target *t, struct package *package, char **control, struct strlist *filekeys, struct checksumsarray *origfiles) {
+ retvalue r;
+ char *origdirectory, *directory, *mychunk;
+ struct strlist myfilekeys;
+ struct strlist filelines[cs_hashCOUNT];
+ struct checksumsarray files;
+ enum checksumtype cs;
+ bool gothash = false;
+ const char *chunk = package->control;
+
+ assert (package->architecture == architecture_source);
+
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ assert (source_checksum_names[cs] != NULL);
+ r = chunk_getextralinelist(chunk, source_checksum_names[cs],
+ &filelines[cs]);
+ if (r == RET_NOTHING)
+ strlist_init(&filelines[cs]);
+ else if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum) {
+ strlist_done(&filelines[cs]);
+ }
+ return r;
+ } else
+ gothash = true;
+ }
+ if (!gothash) {
+ fprintf(stderr,
+"Missing 'Files' (or 'SHA1' or ...) entry in '%s'!\n",
+ chunk);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++)
+ strlist_done(&filelines[cs]);
+ return RET_ERROR;
+ }
+ r = checksumsarray_parse(&files, filelines, package->name);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ strlist_done(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = chunk_getvalue(chunk, "Directory", &origdirectory);
+ if (r == RET_NOTHING) {
+/* Flat repositories can come without this, TODO: add warnings in other cases
+ fprintf(stderr, "Missing 'Directory' entry in '%s'!\n", chunk);
+ r = RET_ERROR;
+*/
+ origdirectory = strdup(".");
+ if (FAILEDTOALLOC(origdirectory))
+ r = RET_ERROR_OOM;
+ }
+ if (RET_WAS_ERROR(r)) {
+ checksumsarray_done(&files);
+ return r;
+ }
+
+ r = propersourcename(package->name);
+ assert (r != RET_NOTHING);
+ if (RET_IS_OK(r))
+ r = properfilenames(&files.names);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Forbidden characters in source package '%s'!\n", package->name);
+ free(origdirectory);
+ checksumsarray_done(&files);
+ return r;
+ }
+
+ directory = calc_sourcedir(t->component, package->name);
+ if (FAILEDTOALLOC(directory))
+ r = RET_ERROR_OOM;
+ else
+ r = calc_dirconcats(directory, &files.names, &myfilekeys);
+ if (RET_WAS_ERROR(r)) {
+ free(directory);
+ free(origdirectory);
+ checksumsarray_done(&files);
+ return r;
+ }
+ r = calc_inplacedirconcats(origdirectory, &files.names);
+ free(origdirectory);
+ if (!RET_WAS_ERROR(r)) {
+ char *n;
+
+ n = chunk_normalize(chunk, "Package", package->name);
+ if (FAILEDTOALLOC(n))
+ mychunk = NULL;
+ else
+ mychunk = chunk_replacefield(n,
+ "Directory", directory, true);
+ free(n);
+ if (FAILEDTOALLOC(mychunk))
+ r = RET_ERROR_OOM;
+ }
+ free(directory);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&myfilekeys);
+ checksumsarray_done(&files);
+ return r;
+ }
+ *control = mychunk;
+ strlist_move(filekeys, &myfilekeys);
+ checksumsarray_move(origfiles, &files);
+ return RET_OK;
+}
+
+retvalue sources_getfilekeys(const char *chunk, struct strlist *filekeys) {
+ char *origdirectory;
+ struct strlist basenames;
+ retvalue r;
+ struct strlist filelines;
+
+
+ /* Read the directory given there */
+ r = chunk_getvalue(chunk, "Directory", &origdirectory);
+ if (r == RET_NOTHING) {
+ //TODO: check if it is even text and do not print
+ //of looking binary??
+ fprintf(stderr, "Does not look like source control: '%s'\n",
+ chunk);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = chunk_getextralinelist(chunk, "Files", &filelines);
+ if (r == RET_NOTHING) {
+ //TODO: check if it is even text and do not print
+ //of looking binary??
+ fprintf(stderr, "Does not look like source control: '%s'\n",
+ chunk);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ return r;
+ }
+ r = getBasenames(&filelines, &basenames);
+ strlist_done(&filelines);
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ return r;
+ }
+
+ r = calc_dirconcats(origdirectory, &basenames, filekeys);
+ free(origdirectory);
+ strlist_done(&basenames);
+ return r;
+}
+
+retvalue sources_getchecksums(const char *chunk, struct checksumsarray *out) {
+ char *origdirectory;
+ struct checksumsarray a;
+ retvalue r;
+ struct strlist filelines[cs_hashCOUNT];
+ enum checksumtype cs;
+
+ /* Read the directory given there */
+ r = chunk_getvalue(chunk, "Directory", &origdirectory);
+ if (!RET_IS_OK(r))
+ return r;
+
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ assert (source_checksum_names[cs] != NULL);
+ r = chunk_getextralinelist(chunk, source_checksum_names[cs],
+ &filelines[cs]);
+ if (r == RET_NOTHING) {
+ if (cs == cs_md5sum) {
+ fprintf(stderr,
+"Missing 'Files' entry in '%s'!\n",
+ chunk);
+ r = RET_ERROR;
+ } else
+ strlist_init(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum) {
+ strlist_done(&filelines[cs]);
+ }
+ free(origdirectory);
+ return r;
+ }
+ }
+ r = checksumsarray_parse(&a, filelines, "source chunk");
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ strlist_done(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ return r;
+ }
+
+ r = calc_inplacedirconcats(origdirectory, &a.names);
+ free(origdirectory);
+ if (RET_WAS_ERROR(r)) {
+ checksumsarray_done(&a);
+ return r;
+ }
+ checksumsarray_move(out, &a);
+ return RET_OK;
+}
+
+retvalue sources_doreoverride(const struct target *target, const char *packagename, const char *controlchunk, /*@out@*/char **newcontrolchunk) {
+ const struct overridedata *o;
+ struct fieldtoadd *fields;
+ char *newchunk;
+ retvalue r;
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ o = override_search(target->distribution->overrides.dsc, packagename);
+ if (o == NULL)
+ return RET_NOTHING;
+
+ r = override_allreplacefields(o, &fields);
+ if (!RET_IS_OK(r))
+ return r;
+ newchunk = chunk_replacefields(controlchunk, fields,
+ "Directory", true);
+ addfield_free(fields);
+ if (FAILEDTOALLOC(newchunk))
+ return RET_ERROR_OOM;
+ *newcontrolchunk = newchunk;
+ return RET_OK;
+}
+
+retvalue sources_retrack(const char *sourcename, const char *chunk, trackingdb tracks) {
+ retvalue r;
+ char *sourceversion;
+ struct trackedpackage *pkg;
+ struct strlist filekeys;
+ int i;
+
+ //TODO: eliminate duplicate code!
+ assert(sourcename!=NULL);
+
+ if (interrupted())
+ return RET_ERROR_INTERRUPTED;
+
+ r = chunk_getvalue(chunk, "Version", &sourceversion);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n",
+ chunk);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+
+ r = sources_getfilekeys(chunk, &filekeys);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Malformed source control:'%s'\n", chunk);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(sourceversion);
+ return r;
+ }
+
+ r = tracking_getornew(tracks, sourcename, sourceversion, &pkg);
+ free(sourceversion);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&filekeys);
+ return r;
+ }
+
+ // TODO: error handling is suboptimal here.
+ // is there a way to again remove old additions (esp. references)
+ // where something fails?
+ for (i = 0 ; i < filekeys.count ; i++) {
+ r = trackedpackage_addfilekey(tracks, pkg,
+ ft_SOURCE, filekeys.values[i], true);
+ filekeys.values[i] = NULL;
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&filekeys);
+ trackedpackage_free(pkg);
+ return r;
+ }
+ }
+ strlist_done(&filekeys);
+ return tracking_save(tracks, pkg);
+}
+
+retvalue sources_getsourceandversion(const char *chunk, const char *packagename, char **source, char **version) {
+ retvalue r;
+ char *sourceversion;
+ char *sourcename;
+
+ //TODO: eliminate duplicate code!
+ assert(packagename!=NULL);
+
+ r = chunk_getvalue(chunk, "Version", &sourceversion);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n",
+ chunk);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ sourcename = strdup(packagename);
+ if (FAILEDTOALLOC(sourcename)) {
+ free(sourceversion);
+ return RET_ERROR_OOM;
+ }
+ *source = sourcename;
+ *version = sourceversion;
+ return RET_OK;
+}
+
+/****************************************************************/
+
+static inline retvalue getvalue(const char *filename, const char *chunk, const char *field, char **value) {
+ retvalue r;
+
+ r = chunk_getvalue(chunk, field, value);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing '%s' field in %s!\n",
+ field, filename);
+ r = RET_ERROR;
+ }
+ return r;
+}
+
+static inline retvalue checkvalue(const char *filename, const char *chunk, const char *field) {
+ retvalue r;
+
+ r = chunk_checkfield(chunk, field);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Cannot find '%s' field in %s!\n",
+ field, filename);
+ r = RET_ERROR;
+ }
+ return r;
+}
+
+static inline retvalue getvalue_n(const char *chunk, const char *field, char **value) {
+ retvalue r;
+
+ r = chunk_getvalue(chunk, field, value);
+ if (r == RET_NOTHING) {
+ *value = NULL;
+ }
+ return r;
+}
+
+retvalue sources_readdsc(struct dsc_headers *dsc, const char *filename, const char *filenametoshow, bool *broken) {
+ retvalue r;
+ struct strlist filelines[cs_hashCOUNT];
+ enum checksumtype cs;
+
+ r = signature_readsignedchunk(filename, filenametoshow,
+ &dsc->control, NULL, broken);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (verbose > 100) {
+ fprintf(stderr, "Extracted control chunk from '%s': '%s'\n",
+ filenametoshow, dsc->control);
+ }
+
+ /* first look for fields that should be there */
+
+ r = chunk_getname(dsc->control, "Source", &dsc->name, false);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Source' field in %s!\n",
+ filenametoshow);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* This is needed and cannot be ignored unless
+ * sources_complete is changed to not need it */
+ r = checkvalue(filenametoshow, dsc->control, "Format");
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = checkvalue(filenametoshow, dsc->control, "Maintainer");
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = getvalue(filenametoshow, dsc->control, "Version", &dsc->version);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ r = getvalue_n(dsc->control, SECTION_FIELDNAME, &dsc->section);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = getvalue_n(dsc->control, PRIORITY_FIELDNAME, &dsc->priority);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ assert (source_checksum_names[cs] != NULL);
+ r = chunk_getextralinelist(dsc->control,
+ source_checksum_names[cs], &filelines[cs]);
+ if (r == RET_NOTHING) {
+ if (cs == cs_md5sum) {
+ fprintf(stderr,
+"Missing 'Files' field in '%s'!\n",
+ filenametoshow);
+ r = RET_ERROR;
+ } else
+ strlist_init(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum) {
+ strlist_done(&filelines[cs]);
+ }
+ return r;
+ }
+ }
+ r = checksumsarray_parse(&dsc->files, filelines, filenametoshow);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ strlist_done(&filelines[cs]);
+ }
+ return r;
+}
+
+void sources_done(struct dsc_headers *dsc) {
+ free(dsc->name);
+ free(dsc->version);
+ free(dsc->control);
+ checksumsarray_done(&dsc->files);
+ free(dsc->section);
+ free(dsc->priority);
+}
+
+retvalue sources_complete(const struct dsc_headers *dsc, const char *directory, const struct overridedata *override, const char *section, const char *priority, char **newcontrol) {
+ retvalue r;
+ struct fieldtoadd *replace;
+ char *newchunk, *newchunk2;
+ char *newfilelines, *newsha1lines, *newsha256lines;
+
+ assert(section != NULL && priority != NULL);
+
+ newchunk2 = chunk_normalize(dsc->control, "Package", dsc->name);
+ if (FAILEDTOALLOC(newchunk2))
+ return RET_ERROR_OOM;
+
+ r = checksumsarray_genfilelist(&dsc->files,
+ &newfilelines, &newsha1lines, &newsha256lines);
+ if (RET_WAS_ERROR(r)) {
+ free(newchunk2);
+ return r;
+ }
+ assert (newfilelines != NULL);
+ replace = aodfield_new("Checksums-Sha256", newsha256lines, NULL);
+ if (!FAILEDTOALLOC(replace))
+ replace = aodfield_new("Checksums-Sha1", newsha1lines, replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = deletefield_new("Source", replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = addfield_new("Files", newfilelines, replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = addfield_new("Directory", directory, replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = deletefield_new("Status", replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = addfield_new(SECTION_FIELDNAME, section, replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = addfield_new(PRIORITY_FIELDNAME, priority, replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = override_addreplacefields(override, replace);
+ if (FAILEDTOALLOC(replace)) {
+ free(newsha256lines);
+ free(newsha1lines);
+ free(newfilelines);
+ free(newchunk2);
+ return RET_ERROR_OOM;
+ }
+
+ newchunk = chunk_replacefields(newchunk2, replace, "Files", true);
+ free(newsha256lines);
+ free(newsha1lines);
+ free(newfilelines);
+ free(newchunk2);
+ addfield_free(replace);
+ if (FAILEDTOALLOC(newchunk)) {
+ return RET_ERROR_OOM;
+ }
+
+ *newcontrol = newchunk;
+
+ return RET_OK;
+}
+
+/* update Checksums */
+retvalue sources_complete_checksums(const char *chunk, const struct strlist *filekeys, struct checksums **c, char **out) {
+ struct fieldtoadd *replace;
+ char *newchunk;
+ char *newfilelines, *newsha1lines, *newsha256lines;
+ struct checksumsarray checksums;
+ retvalue r;
+ int i;
+
+ /* fake a checksumarray... */
+ checksums.checksums = c;
+ checksums.names.count = filekeys->count;
+ checksums.names.values = nzNEW(filekeys->count, char *);
+ if (FAILEDTOALLOC(checksums.names.values))
+ return RET_ERROR_OOM;
+ for (i = 0 ; i < filekeys->count ; i++) {
+ checksums.names.values[i] = (char*)
+ dirs_basename(filekeys->values[i]);
+ }
+
+ r = checksumsarray_genfilelist(&checksums,
+ &newfilelines, &newsha1lines, &newsha256lines);
+ free(checksums.names.values);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (newfilelines != NULL);
+ replace = aodfield_new("Checksums-Sha256", newsha256lines, NULL);
+ if (!FAILEDTOALLOC(replace))
+ replace = aodfield_new("Checksums-Sha1", newsha1lines, replace);
+ if (!FAILEDTOALLOC(replace))
+ replace = addfield_new("Files", newfilelines, replace);
+ if (FAILEDTOALLOC(replace)) {
+ free(newsha256lines);
+ free(newsha1lines);
+ free(newfilelines);
+ return RET_ERROR_OOM;
+ }
+ newchunk = chunk_replacefields(chunk, replace, "Files", true);
+ free(newsha256lines);
+ free(newsha1lines);
+ free(newfilelines);
+ addfield_free(replace);
+ if (FAILEDTOALLOC(newchunk))
+ return RET_ERROR_OOM;
+
+ *out = newchunk;
+ return RET_OK;
+}
+
+char *calc_source_basename(const char *name, const char *version) {
+ const char *v = strchr(version, ':');
+ if (v != NULL)
+ v++;
+ else
+ v = version;
+ return mprintf("%s_%s.dsc", name, v);
+}
+
+char *calc_sourcedir(component_t component, const char *sourcename) {
+
+ assert (*sourcename != '\0');
+
+ if (sourcename[0] == 'l' && sourcename[1] == 'i' &&
+ sourcename[2] == 'b' && sourcename[3] != '\0')
+ return mprintf("pool/%s/lib%c/%s",
+ atoms_components[component],
+ sourcename[3], sourcename);
+ else if (*sourcename != '\0')
+ return mprintf("pool/%s/%c/%s",
+ atoms_components[component],
+ sourcename[0], sourcename);
+ else
+ return NULL;
+}
+
+char *calc_filekey(component_t component, const char *sourcename, const char *filename) {
+ if (sourcename[0] == 'l' && sourcename[1] == 'i' &&
+ sourcename[2] == 'b' && sourcename[3] != '\0')
+ return mprintf("pool/%s/lib%c/%s/%s",
+ atoms_components[component],
+ sourcename[3], sourcename, filename);
+ else if (*sourcename != '\0')
+ return mprintf("pool/%s/%c/%s/%s",
+ atoms_components[component],
+ sourcename[0], sourcename, filename);
+ else
+ return NULL;
+}
+
+char *calc_byhanddir(component_t component, const char *sourcename, const char *version) {
+ if (sourcename[0] == 'l' && sourcename[1] == 'i' &&
+ sourcename[2] == 'b' && sourcename[3] != '\0')
+ return mprintf("pool/%s/lib%c/%s/%s_%s_byhand",
+ atoms_components[component],
+ sourcename[3], sourcename,
+ sourcename, version);
+ else if (*sourcename != '\0')
+ return mprintf("pool/%s/%c/%s/%s_%s_byhand",
+ atoms_components[component],
+ sourcename[0], sourcename,
+ sourcename, version);
+ else
+ return NULL;
+}
diff --git a/sources.h b/sources.h
new file mode 100644
index 0000000..62dba6e
--- /dev/null
+++ b/sources.h
@@ -0,0 +1,48 @@
+#ifndef REPREPRO_SOURCES_H
+#define REPREPRO_SOURCES_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_TARGET_H
+#include "target.h"
+#endif
+
+/* Functions for the target.h-stuff: */
+get_version sources_getversion;
+get_installdata sources_getinstalldata;
+get_architecture sources_getarchitecture;
+get_filekeys sources_getfilekeys;
+get_checksums sources_getchecksums;
+do_reoverride sources_doreoverride;
+do_retrack sources_retrack;
+get_sourceandversion sources_getsourceandversion;
+complete_checksums sources_complete_checksums;
+
+/* Functions for checkindsc.c and incoming.c: */
+struct dsc_headers {
+ char *name, *version;
+ char *control;
+ struct checksumsarray files;
+ /* normally not in a .dsc file: */
+ /*@null@*/ char *section, *priority;
+};
+
+/* read contents of filename into sources_readdsc.
+ * - broken is like signature_readsignedchunk
+ * - does not follow retvalue conventions, some fields may be set even when
+ * error returned
+ * - no checks for sanity of values, left to the caller */
+retvalue sources_readdsc(struct dsc_headers *, const char *filename, const char *filenametoshow, bool *broken);
+
+void sources_done(struct dsc_headers *);
+
+struct overridedata;
+retvalue sources_complete(const struct dsc_headers *, const char *directory, const struct overridedata *override, const char *section, const char *priority, char **newcontrol);
+
+char *calc_source_basename(const char *name, const char *version);
+char *calc_sourcedir(component_t, const char *sourcename);
+char *calc_filekey(component_t, const char *sourcename, const char *filename);
+char *calc_byhanddir(component_t, const char *sourcename, const char *version);
+#endif
diff --git a/strlist.c b/strlist.c
new file mode 100644
index 0000000..1899be2
--- /dev/null
+++ b/strlist.c
@@ -0,0 +1,283 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2005,2007 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "error.h"
+#include "strlist.h"
+
+bool strlist_in(const struct strlist *strlist, const char *element) {
+ int c;
+ char **t;
+
+ assert(strlist != NULL);
+
+ c = strlist->count;
+ t = strlist->values;
+ while (c-- != 0) {
+ if (strcmp(*(t++), element) == 0)
+ return true;
+ }
+ return false;
+}
+int strlist_ofs(const struct strlist *strlist, const char *element) {
+ int c;
+ char **t;
+
+ assert(strlist != NULL);
+
+ c = strlist->count;
+ t = strlist->values;
+ while (c-- != 0) {
+ if (strcmp(*(t++), element) == 0)
+ return (t-strlist->values)-1;
+ }
+ return -1;
+}
+
+bool strlist_subset(const struct strlist *strlist, const struct strlist *subset, const char **missing) {
+ int c;
+ char **t;
+
+ assert(subset != NULL);
+
+ c = subset->count;
+ t = subset->values;
+ while (c-- != 0) {
+ if (!strlist_in(strlist, *(t++))) {
+ if (missing != NULL)
+ *missing = *(t-1);
+ return false;
+ }
+ }
+ return true;
+
+}
+
+retvalue strlist_init_n(int startsize, struct strlist *strlist) {
+ assert(strlist != NULL && startsize >= 0);
+
+ if (startsize == 0)
+ startsize = 1;
+ strlist->count = 0;
+ strlist->size = startsize;
+ if (startsize > 0) {
+ strlist->values = malloc(startsize*sizeof(char *));
+ if (FAILEDTOALLOC(strlist->values))
+ return RET_ERROR_OOM;
+ } else {
+ strlist->values = NULL;
+ }
+ return RET_OK;
+}
+
+retvalue strlist_init_singleton(char *value, struct strlist *strlist) {
+ assert(strlist != NULL);
+
+ strlist->count = 1;
+ strlist->size = 1;
+ strlist->values = NEW(char *);
+ if (FAILEDTOALLOC(strlist->values)) {
+ free(value);
+ return RET_ERROR_OOM;
+ }
+ strlist->values[0] = value;
+
+ return RET_OK;
+}
+
+void strlist_init(struct strlist *strlist) {
+ assert(strlist != NULL);
+
+ strlist->count = 0;
+ strlist->size = 0;
+ strlist->values = NULL;
+}
+
+void strlist_done(struct strlist *strlist) {
+ int c;
+ char **t;
+
+ assert(strlist != NULL);
+
+ c = strlist->count;
+ t = strlist->values;
+ while (c-- != 0) {
+ free(*t);
+ t++;
+ }
+ free(strlist->values);
+ strlist->values = NULL;
+}
+
+retvalue strlist_add(struct strlist *strlist, char *element) {
+ char **v;
+
+ assert(strlist != NULL && element != NULL);
+
+ if (strlist->count >= strlist->size) {
+ strlist->size += 8;
+ v = realloc(strlist->values, strlist->size*sizeof(char *));
+ if (FAILEDTOALLOC(v)) {
+ free(element);
+ return RET_ERROR_OOM;
+ }
+ strlist->values = v;
+ }
+
+ strlist->values[strlist->count++] = element;
+ return RET_OK;
+}
+
+retvalue strlist_add_dup(struct strlist *strlist, const char *todup) {
+ char *element = strdup(todup);
+
+ if (FAILEDTOALLOC(element))
+ return RET_ERROR_OOM;
+ return strlist_add(strlist, element);
+}
+
+retvalue strlist_include(struct strlist *strlist, char *element) {
+ char **v;
+
+ assert(strlist != NULL && element != NULL);
+
+ if (strlist->count >= strlist->size) {
+ strlist->size += 1;
+ v = realloc(strlist->values, strlist->size*sizeof(char *));
+ if (FAILEDTOALLOC(v)) {
+ free(element);
+ return RET_ERROR_OOM;
+ }
+ strlist->values = v;
+ }
+ arrayinsert(char *, strlist->values, 0, strlist->count);
+ strlist->count++;
+ strlist->values[0] = element;
+ return RET_OK;
+}
+
+retvalue strlist_fprint(FILE *file, const struct strlist *strlist) {
+ int c;
+ char **p;
+ retvalue result;
+
+ assert(strlist != NULL);
+ assert(file != NULL);
+
+ c = strlist->count;
+ p = strlist->values;
+ result = RET_OK;
+ while (c > 0) {
+ if (fputs(*(p++), file) == EOF)
+ result = RET_ERROR;
+ if (--c > 0 && fputc(' ', file) == EOF)
+ result = RET_ERROR;
+ }
+ return result;
+}
+
+/* replace the contents of dest with those from orig, which get emptied */
+void strlist_move(struct strlist *dest, struct strlist *orig) {
+
+ assert(dest != NULL && orig != NULL);
+
+ if (dest == orig)
+ return;
+
+ dest->size = orig->size;
+ dest->count = orig->count;
+ dest->values = orig->values;
+ orig->size = orig->count = 0;
+ orig->values = NULL;
+}
+
+retvalue strlist_adduniq(struct strlist *strlist, char *element) {
+ // TODO: is there something better feasible?
+ if (strlist_in(strlist, element)) {
+ free(element);
+ return RET_OK;
+ } else
+ return strlist_add(strlist, element);
+
+}
+
+bool strlist_intersects(const struct strlist *a, const struct strlist *b) {
+ int i;
+
+ for (i = 0 ; i < a->count ; i++)
+ if (strlist_in(b, a->values[i]))
+ return true;
+ return false;
+}
+
+char *strlist_concat(const struct strlist *list, const char *prefix, const char *infix, const char *suffix) {
+ size_t l, prefix_len, infix_len, suffix_len, line_len;
+ char *c, *n;
+ int i;
+
+ prefix_len = strlen(prefix);
+ infix_len = strlen(infix);
+ suffix_len = strlen(suffix);
+
+ l = prefix_len + suffix_len;
+ for (i = 0 ; i < list->count ; i++)
+ l += strlen(list->values[i]);
+ if (list->count > 0)
+ l += (list->count-1)*infix_len;
+ c = malloc(l + 1);
+ if (FAILEDTOALLOC(c))
+ return c;
+ memcpy(c, prefix, prefix_len);
+ n = c + prefix_len;
+ for (i = 0 ; i < list->count ; i++) {
+ line_len = strlen(list->values[i]);
+ memcpy(n, list->values[i], line_len);
+ n += line_len;
+ if (i+1 < list->count) {
+ memcpy(n, infix, infix_len);
+ n += infix_len;
+ } else {
+ memcpy(n, suffix, suffix_len);
+ n += suffix_len;
+ }
+ }
+ assert ((size_t)(n-c) == l);
+ *n = '\0';
+ return c;
+}
+
+void strlist_remove(struct strlist *strlist, const char *element) {
+ int i, j;
+
+ assert(strlist != NULL);
+ assert(element != NULL);
+
+ j = 0;
+ for (i = 0 ; i < strlist->count ; i++) {
+ if (strcmp(strlist->values[i], element) != 0) {
+ if (i != j)
+ strlist->values[j] = strlist->values[i];
+ j++;
+ } else
+ free(strlist->values[i]);
+ }
+ strlist->count = j;
+}
diff --git a/strlist.h b/strlist.h
new file mode 100644
index 0000000..8a14cc8
--- /dev/null
+++ b/strlist.h
@@ -0,0 +1,50 @@
+#ifndef REPREPRO_STRLIST_H
+#define REPREPRO_STRLIST_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_GLOBALS_H
+#include "globals.h"
+#warning "What's hapening here?"
+#endif
+
+struct strlist {
+ char **values;
+ int count, size;
+};
+
+void strlist_init(/*@out@*/struct strlist *);
+retvalue strlist_init_n(int /*startsize*/, /*@out@*/struct strlist *);
+retvalue strlist_init_singleton(/*@only@*/char *, /*@out@*/struct strlist *);
+void strlist_done(/*@special@*/struct strlist *strlist) /*@releases strlist->values @*/;
+
+/* add a string, will get property of the strlist and free'd by it */
+retvalue strlist_add(struct strlist *, /*@only@*/char *);
+/* include a string at the beginning, otherwise like strlist_add */
+retvalue strlist_include(struct strlist *, /*@only@*/char *);
+/* add a string alphabetically, discarding if already there. */
+retvalue strlist_adduniq(struct strlist *, /*@only@*/char *);
+/* like strlist_add, but strdup it first */
+retvalue strlist_add_dup(struct strlist *strlist, const char *todup);
+
+/* print a space separated list of elements */
+retvalue strlist_fprint(FILE *, const struct strlist *);
+
+/* replace the contents of dest with those from orig, which get emptied */
+void strlist_move(/*@out@*/struct strlist *dest, /*@special@*/struct strlist *orig) /*@releases orig->values @*/;
+
+bool strlist_in(const struct strlist *, const char *);
+int strlist_ofs(const struct strlist *, const char *);
+
+bool strlist_intersects(const struct strlist *, const struct strlist *);
+/* if missing != NULL And subset no subset of strlist, set *missing to the first missing one */
+bool strlist_subset(const struct strlist *, const struct strlist * /*subset*/, const char ** /*missing_p*/);
+
+/* concatenate <prefix> <values separated by infix> <suffix> */
+char *strlist_concat(const struct strlist *, const char * /*prefix*/, const char * /*infix*/, const char * /*suffix*/);
+
+/* remove all strings equal to the argument */
+void strlist_remove(struct strlist *, const char *);
+#endif
diff --git a/target.c b/target.c
new file mode 100644
index 0000000..0984fc4
--- /dev/null
+++ b/target.c
@@ -0,0 +1,1245 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2004,2005,2007,2008,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+#include <strings.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "error.h"
+#include "ignore.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "chunks.h"
+#include "database.h"
+#include "reference.h"
+#include "binaries.h"
+#include "sources.h"
+#include "names.h"
+#include "dirs.h"
+#include "dpkgversions.h"
+#include "tracking.h"
+#include "log.h"
+#include "files.h"
+#include "descriptions.h"
+#include "package.h"
+#include "target.h"
+
+static char *calc_identifier(const char *codename, component_t component, architecture_t architecture, packagetype_t packagetype) {
+ assert (strchr(codename, '|') == NULL);
+ assert (codename != NULL); assert (atom_defined(component));
+ assert (atom_defined(architecture));
+ assert (atom_defined(packagetype));
+ if (packagetype == pt_udeb)
+ return mprintf("u|%s|%s|%s", codename,
+ atoms_components[component],
+ atoms_architectures[architecture]);
+ else if (packagetype == pt_ddeb)
+ return mprintf("d|%s|%s|%s", codename,
+ atoms_components[component],
+ atoms_architectures[architecture]);
+ else
+ return mprintf("%s|%s|%s", codename,
+ atoms_components[component],
+ atoms_architectures[architecture]);
+}
+
+
+static retvalue target_initialize(/*@dependant@*/struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype, get_version getversion, get_installdata getinstalldata, get_architecture getarchitecture, get_filekeys getfilekeys, get_checksums getchecksums, get_sourceandversion getsourceandversion, do_reoverride doreoverride, do_retrack doretrack, complete_checksums docomplete, /*@null@*//*@only@*/char *directory, /*@dependent@*/const struct exportmode *exportmode, bool readonly, bool noexport, /*@out@*/struct target **d) {
+ struct target *t;
+
+ assert(exportmode != NULL);
+ if (FAILEDTOALLOC(directory))
+ return RET_ERROR_OOM;
+
+ t = zNEW(struct target);
+ if (FAILEDTOALLOC(t)) {
+ free(directory);
+ return RET_ERROR_OOM;
+ }
+ t->relativedirectory = directory;
+ t->exportmode = exportmode;
+ t->distribution = distribution;
+ assert (atom_defined(component));
+ t->component = component;
+ assert (atom_defined(architecture));
+ t->architecture = architecture;
+ assert (atom_defined(packagetype));
+ t->packagetype = packagetype;
+ t->identifier = calc_identifier(distribution->codename,
+ component, architecture, packagetype);
+ if (FAILEDTOALLOC(t->identifier)) {
+ (void)target_free(t);
+ return RET_ERROR_OOM;
+ }
+ t->getversion = getversion;
+ t->getinstalldata = getinstalldata;
+ t->getarchitecture = getarchitecture;
+ t->getfilekeys = getfilekeys;
+ t->getchecksums = getchecksums;
+ t->getsourceandversion = getsourceandversion;
+ t->doreoverride = doreoverride;
+ t->doretrack = doretrack;
+ t->completechecksums = docomplete;
+ t->readonly = readonly;
+ t->noexport = noexport;
+ *d = t;
+ return RET_OK;
+}
+
+static const char *dist_component_name(component_t component, /*@null@*/const char *fakecomponentprefix) {
+ const char *c = atoms_components[component];
+ size_t len;
+
+ if (fakecomponentprefix == NULL)
+ return c;
+ len = strlen(fakecomponentprefix);
+ if (strncmp(c, fakecomponentprefix, len) != 0)
+ return c;
+ if (c[len] != '/')
+ return c;
+ return c + len + 1;
+}
+
+retvalue target_initialize_ubinary(struct distribution *d, component_t component, architecture_t architecture, const struct exportmode *exportmode, bool readonly, bool noexport, const char *fakecomponentprefix, struct target **target) {
+ return target_initialize(d, component, architecture, pt_udeb,
+ binaries_getversion,
+ binaries_getinstalldata,
+ binaries_getarchitecture,
+ binaries_getfilekeys, binaries_getchecksums,
+ binaries_getsourceandversion,
+ ubinaries_doreoverride, binaries_retrack,
+ binaries_complete_checksums,
+ mprintf("%s/debian-installer/binary-%s",
+ dist_component_name(component,
+ fakecomponentprefix),
+ atoms_architectures[architecture]),
+ exportmode, readonly, noexport, target);
+}
+retvalue target_initialize_dbinary(struct distribution *d, component_t component, architecture_t architecture, const struct exportmode *exportmode, bool readonly, bool noexport, const char *fakecomponentprefix, struct target **target) {
+ return target_initialize(d, component, architecture, pt_ddeb,
+ binaries_getversion,
+ binaries_getinstalldata,
+ binaries_getarchitecture,
+ binaries_getfilekeys, binaries_getchecksums,
+ binaries_getsourceandversion,
+ /* we use the main overrides */
+ binaries_doreoverride, binaries_retrack,
+ binaries_complete_checksums,
+ /* FIXME: we don't know what the Debian archive layout
+ * is going to look like yet, so take a guess based
+ * on udebs */
+ mprintf("%s/debug/binary-%s",
+ dist_component_name(component,
+ fakecomponentprefix),
+ atoms_architectures[architecture]),
+ exportmode, readonly, noexport, target);
+}
+retvalue target_initialize_binary(struct distribution *d, component_t component, architecture_t architecture, const struct exportmode *exportmode, bool readonly, bool noexport, const char *fakecomponentprefix, struct target **target) {
+ return target_initialize(d, component, architecture, pt_deb,
+ binaries_getversion,
+ binaries_getinstalldata,
+ binaries_getarchitecture,
+ binaries_getfilekeys, binaries_getchecksums,
+ binaries_getsourceandversion,
+ binaries_doreoverride, binaries_retrack,
+ binaries_complete_checksums,
+ mprintf("%s/binary-%s",
+ dist_component_name(component,
+ fakecomponentprefix),
+ atoms_architectures[architecture]),
+ exportmode, readonly, noexport, target);
+}
+
+retvalue target_initialize_source(struct distribution *d, component_t component, const struct exportmode *exportmode, bool readonly, bool noexport, const char *fakecomponentprefix, struct target **target) {
+ return target_initialize(d, component, architecture_source, pt_dsc,
+ sources_getversion,
+ sources_getinstalldata,
+ sources_getarchitecture,
+ sources_getfilekeys, sources_getchecksums,
+ sources_getsourceandversion,
+ sources_doreoverride, sources_retrack,
+ sources_complete_checksums,
+ mprintf("%s/source", dist_component_name(component,
+ fakecomponentprefix)),
+ exportmode, readonly, noexport, target);
+}
+
+retvalue target_free(struct target *target) {
+ retvalue result = RET_OK;
+
+ if (target == NULL)
+ return RET_OK;
+ if (target->packages != NULL) {
+ result = target_closepackagesdb(target);
+ } else
+ result = RET_OK;
+ if (target->wasmodified && !target->noexport) {
+ fprintf(stderr,
+"Warning: database '%s' was modified but no index file was exported.\n"
+"Changes will only be visible after the next 'export'!\n",
+ target->identifier);
+ }
+
+ target->distribution = NULL;
+ free(target->identifier);
+ free(target->relativedirectory);
+ free(target);
+ return result;
+}
+
+/* This opens up the database, if db != NULL, *db will be set to it.. */
+retvalue target_initpackagesdb(struct target *target, bool readonly) {
+ retvalue r;
+
+ if (!readonly && target->readonly) {
+ fprintf(stderr,
+"Error trying to open '%s' read-write in read-only distribution '%s'\n",
+ target->identifier,
+ target->distribution->codename);
+ return RET_ERROR;
+ }
+
+ assert (target->packages == NULL);
+ if (target->packages != NULL)
+ return RET_OK;
+ r = database_openpackages(target->identifier, readonly,
+ &target->packages);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ target->packages = NULL;
+ return r;
+ }
+ return r;
+}
+
+/* this closes databases... */
+retvalue target_closepackagesdb(struct target *target) {
+ retvalue r;
+
+ if (target->packages == NULL) {
+ fprintf(stderr, "Internal Warning: Double close!\n");
+ r = RET_OK;
+ } else {
+ r = table_close(target->packages);
+ target->packages = NULL;
+ }
+ return r;
+}
+
+/* Remove a package from the given target. */
+retvalue package_remove(struct package *old, struct logger *logger, struct trackingdata *trackingdata) {
+ struct strlist files;
+ retvalue result, r;
+ char *key;
+
+ assert (old->target != NULL && old->target->packages != NULL);
+
+ (void)package_getversion(old);
+ if (verbose >= 15)
+ fprintf(stderr, "trace: package_remove(old.name=%s, old.version=%s, old.target.identifier=%s) called.\n",
+ old->name, old->version, old->target->identifier);
+ r = old->target->getfilekeys(old->control, &files);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (trackingdata != NULL) {
+ (void)package_getsource(old);
+ }
+ if (verbose > 0)
+ printf("removing '%s=%s' from '%s'...\n",
+ old->name, old->version, old->target->identifier);
+ key = package_primarykey(old->name, old->version);
+ result = table_deleterecord(old->target->packages, key, false);
+ free(key);
+ if (RET_IS_OK(result)) {
+ old->target->wasmodified = true;
+ if (trackingdata != NULL && old->source != NULL
+ && old->sourceversion != NULL) {
+ r = trackingdata_remove(trackingdata,
+ old->source, old->sourceversion, &files);
+ RET_UPDATE(result, r);
+ }
+ if (trackingdata == NULL)
+ old->target->staletracking = true;
+ if (logger != NULL)
+ logger_log(logger, old->target, old->name,
+ NULL,
+ (old->version == NULL)
+ ? "#unparseable#"
+ : old->version,
+ NULL, &files,
+ NULL, NULL);
+ r = references_delete(old->target->identifier, &files, NULL);
+ RET_UPDATE(result, r);
+ }
+ strlist_done(&files);
+ return result;
+}
+
+/* Remove a package from the given target. */
+retvalue target_removepackage(struct target *target, struct logger *logger, const char *name, const char *version, struct trackingdata *trackingdata) {
+ struct package old;
+ retvalue r;
+
+ assert(target != NULL && target->packages != NULL && name != NULL);
+ if (verbose >= 15)
+ fprintf(stderr, "trace: target_removepackage(target.identifier=%s, name=%s, version=%s) called.\n",
+ target->identifier, name, version);
+
+ r = package_get(target, name, version, &old);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ else if (r == RET_NOTHING) {
+ if (verbose >= 10) {
+ if (version == NULL)
+ fprintf(stderr, "Could not find '%s' in '%s'...\n",
+ name, target->identifier);
+ else
+ fprintf(stderr, "Could not find '%s=%s' in '%s'...\n",
+ name, version, target->identifier);
+ }
+ return RET_NOTHING;
+ }
+ r = package_remove(&old, logger, trackingdata);
+ package_done(&old);
+ return r;
+}
+
+/* Like target_removepackage, but delete the package record by cursor */
+retvalue package_remove_by_cursor(struct package_cursor *tc, struct logger *logger, struct trackingdata *trackingdata) {
+ struct target * const target = tc->target;
+ struct package *old = &tc->current;
+ struct strlist files;
+ retvalue result, r;
+
+ assert (target != NULL && target->packages != NULL);
+ assert (target == old->target);
+
+ if (logger != NULL || verbose > 0) {
+ (void)package_getversion(old);
+ }
+ r = old->target->getfilekeys(old->control, &files);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (trackingdata != NULL) {
+ (void)package_getsource(old);
+ }
+ if (verbose > 0)
+ printf("removing '%s=%s' from '%s'...\n",
+ old->name, old->version, old->target->identifier);
+ result = cursor_delete(target->packages, tc->cursor, old->name, old->version);
+ if (RET_IS_OK(result)) {
+ old->target->wasmodified = true;
+ if (trackingdata != NULL && old->source != NULL
+ && old->sourceversion != NULL) {
+ r = trackingdata_remove(trackingdata,
+ old->source, old->sourceversion, &files);
+ RET_UPDATE(result, r);
+ }
+ if (trackingdata == NULL)
+ old->target->staletracking = true;
+ if (logger != NULL)
+ logger_log(logger, old->target, old->name,
+ NULL,
+ (old->version == NULL)
+ ? "#unparseable"
+ : old->version,
+ NULL, &files,
+ NULL, NULL);
+ r = references_delete(old->target->identifier, &files, NULL);
+ RET_UPDATE(result, r);
+ }
+ strlist_done(&files);
+ return result;
+}
+
+static retvalue archive_package(struct target *target, const struct package *package, const struct strlist *files, /*@null@*/const char *causingrule, /*@null@*/const char *suitefrom) {
+ struct strlist filekeys;
+ struct target *archive_target;
+ struct trackingdata trackingdata;
+ trackingdb tracks = NULL;
+ bool close_database, close_trackingdb = false;
+ retvalue result, r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: archive_package(target.identifier=%s, package->name=%s, package->version=%s) called.\n",
+ target->identifier, package->name, package->version);
+
+ if (target->distribution->archive != NULL) {
+ archive_target = distribution_gettarget(target->distribution->archive, target->component,
+ target->architecture, target->packagetype);
+ if (archive_target == NULL) {
+ fprintf(stderr,
+"Warning: Cannot archive '%s=%s' from '%s' to '%s' since '%s' has no matching component/architecture/packagetype.\n",
+ package->name, package->version, target->distribution->codename,
+ target->distribution->archive->codename,
+ target->distribution->archive->codename);
+ } else {
+ close_database = archive_target->packages == NULL;
+ if (close_database) {
+ result = target_initpackagesdb(archive_target, READWRITE);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ }
+ if (files == NULL) {
+ result = archive_target->getfilekeys(package->control, &filekeys);
+ if (RET_WAS_ERROR(result))
+ return result;
+ files = &filekeys;
+ }
+ if (archive_target->distribution->tracking != dt_NONE) {
+ close_trackingdb = archive_target->distribution->trackingdb == NULL;
+ if (close_trackingdb) {
+ r = tracking_initialize(&tracks, archive_target->distribution, false);
+ if (RET_WAS_ERROR(r))
+ return r;
+ } else {
+ tracks = archive_target->distribution->trackingdb;
+ }
+ r = trackingdata_summon(tracks, package->source, package->version, &trackingdata);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ // TODO: Check whether this is the best place to set 'selected'
+ target->distribution->archive->selected = true;
+ result = distribution_prepareforwriting(archive_target->distribution);
+ if (!RET_WAS_ERROR(result)) {
+ result = target_addpackage(archive_target, target->distribution->archive->logger,
+ package->name, package->version, package->control,
+ files, false, (tracks != NULL) ? &trackingdata : NULL,
+ target->architecture, causingrule, suitefrom);
+ RET_UPDATE(target->distribution->archive->status, result);
+ }
+ if (close_database) {
+ r = target_closepackagesdb(archive_target);
+ RET_UPDATE(result, r);
+ }
+ if (tracks != NULL) {
+ r = trackingdata_finish(tracks, &trackingdata);
+ RET_UPDATE(result, r);
+ if (close_trackingdb) {
+ r = tracking_done(tracks, archive_target->distribution);
+ RET_UPDATE(result, r);
+ }
+ }
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue addpackages(struct target *target, const char *packagename, const char *controlchunk, const char *version, const struct strlist *files, /*@null@*/const struct package *old, /*@null@*/const struct strlist *oldfiles, /*@null@*/struct logger *logger, /*@null@*/struct trackingdata *trackingdata, architecture_t architecture, /*@null@*/const char *causingrule, /*@null@*/const char *suitefrom) {
+
+ retvalue result = RET_OK, r;
+ char *key;
+ struct table *table = target->packages;
+ enum filetype filetype;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: addpackages(target.identifier=%s, packagename=%s, version=%s, old->version=%s) called.\n",
+ target->identifier, packagename, version, old != NULL ? old->version : NULL);
+ assert (atom_defined(architecture));
+
+ if (architecture == architecture_source)
+ filetype = ft_SOURCE;
+ else if (architecture == architecture_all)
+ filetype = ft_ALL_BINARY;
+ else
+ filetype = ft_ARCH_BINARY;
+
+ /* mark it as needed by this distribution */
+
+ r = references_insert(target->identifier, files, oldfiles);
+
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* Add package to the distribution's database */
+
+ if (old != NULL && old->control != NULL) {
+ key = package_primarykey(old->name, old->version);
+ r = archive_package(target, old, oldfiles, causingrule, suitefrom);
+ RET_UPDATE(result, r);
+ if (RET_IS_OK(r)) {
+ r = table_deleterecord(table, key, false);
+ RET_UPDATE(result, r);
+ }
+ free(key);
+ }
+
+ key = package_primarykey(packagename, version);
+ r = table_adduniqrecord(table, key, controlchunk);
+ free(key);
+
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (logger != NULL) {
+ logger_log(logger, target, packagename,
+ version,
+ /* the old version, NULL if there is
+ * no old package,
+ * "#unparseable" if there is old but
+ * no version available */
+ (old==NULL)
+ ? NULL
+ : (old->version == NULL)
+ ? "#unparseable"
+ : old->version,
+ files, oldfiles, causingrule, suitefrom);
+ }
+
+ if (trackingdata != NULL) {
+ r = trackingdata_insert(trackingdata,
+ filetype, files, old, oldfiles);
+ RET_UPDATE(result, r);
+ }
+
+ /* remove old references to files */
+
+ if (oldfiles != NULL) {
+ r = references_delete(target->identifier, oldfiles, files);
+ RET_UPDATE(result, r);
+ }
+
+ return result;
+}
+
+retvalue target_addpackage(struct target *target, struct logger *logger, const char *name, const char *version, const char *control, const struct strlist *filekeys, bool downgrade, struct trackingdata *trackingdata, architecture_t architecture, const char *causingrule, const char *suitefrom) {
+ struct strlist oldfilekeys, *ofk = NULL;
+ char *newcontrol;
+ struct package_cursor iterator = {NULL};
+ struct package old;
+ retvalue r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: target_addpackage(target.identifier=%s, name=%s, version=%s) called.\n",
+ target->identifier, name, version);
+ assert(target->packages!=NULL);
+
+ r = package_get(target, name, version, &old);
+ if (RET_WAS_ERROR(r)) {
+ package_done(&old);
+ return r;
+ } else if (RET_IS_OK(r)) {
+ if (!downgrade) {
+ fprintf(stderr, "Skipping inclusion of '%s' '%s' in '%s', as this version already exists.\n",
+ name, version, target->identifier);
+ package_done(&old);
+ return RET_NOTHING;
+ } else {
+ r = package_getversion(&old);
+ if (RET_WAS_ERROR(r) && !IGNORING(brokenold, "Error parsing old version!\n")) {
+ package_done(&old);
+ return r;
+ }
+ fprintf(stderr, "Warning: replacing '%s' version '%s' with equal version '%s' in '%s'!\n",
+ name, old.version, version, target->identifier);
+ }
+ } else if (target->distribution->limit > 0) {
+ package_done(&old);
+ r = package_openduplicateiterator(target, name, target->distribution->limit - 1, &iterator);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ r = package_getversion(&iterator.current);
+ if (RET_WAS_ERROR(r) && !IGNORING(brokenold, "Error parsing old version!\n")) {
+ retvalue r2 = package_closeiterator(&iterator);
+ RET_ENDUPDATE(r, r2);
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ int versioncmp;
+
+ r = dpkgversions_cmp(version, iterator.current.version, &versioncmp);
+ if (RET_WAS_ERROR(r)) {
+ if (!IGNORING(brokenversioncmp, "Parse errors processing versions of %s.\n", name)) {
+ retvalue r2 = package_closeiterator(&iterator);
+ RET_ENDUPDATE(r, r2);
+ return r;
+ }
+ } else if (versioncmp < 0) {
+ // new Version is older than the old version that will be replaced
+ if (!downgrade) {
+ fprintf(stderr,
+"Skipping inclusion of '%s' '%s' in '%s', as it has already '%s'.\n",
+ name, version,
+ target->identifier,
+ iterator.current.version);
+ package_done(&old);
+ return RET_NOTHING;
+ } else {
+ fprintf(stderr,
+"Warning: downgrading '%s' from '%s' to '%s' in '%s'!\n", name,
+ iterator.current.version,
+ version,
+ target->identifier);
+ }
+ }
+ old.target = target;
+ old.name = iterator.current.name;
+ old.control = iterator.current.control;
+ old.controllen = iterator.current.controllen;
+ old.version = iterator.current.version;
+ }
+ }
+ } else {
+ // Keep all package versions in the archive.
+ package_done(&old);
+ }
+
+ if (old.name != NULL) {
+ r = target->getfilekeys(old.control, &oldfilekeys);
+ ofk = &oldfilekeys;
+ if (RET_WAS_ERROR(r)) {
+ if (IGNORING(brokenold,
+"Error parsing files belonging to installed version of %s!\n", name)) {
+ ofk = NULL;
+ } else {
+ package_done(&old);
+ if (iterator.cursor != NULL) {
+ retvalue r2 = package_closeiterator(&iterator);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+ }
+ } else if (trackingdata != NULL) {
+ r = package_getsource(&old);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(ofk);
+ if (IGNORING(brokenold,
+"Error searching for source name of installed version of %s!\n", name)) {
+ // TODO: free something of oldfilekeys?
+ ofk = NULL;
+ } else {
+ package_done(&old);
+ if (iterator.cursor != NULL) {
+ retvalue r2 = package_closeiterator(&iterator);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+ }
+ }
+ }
+ }
+
+ newcontrol = NULL;
+ r = description_addpackage(target, name, control, &newcontrol);
+ if (RET_IS_OK(r))
+ control = newcontrol;
+ if (!RET_WAS_ERROR(r))
+ r = addpackages(target, name, control,
+ version,
+ filekeys,
+ &old, ofk,
+ logger,
+ trackingdata, architecture,
+ causingrule, suitefrom);
+ if (ofk != NULL)
+ strlist_done(ofk);
+ if (RET_IS_OK(r)) {
+ target->wasmodified = true;
+ if (trackingdata == NULL)
+ target->staletracking = true;
+ }
+ free(newcontrol);
+ package_done(&old);
+
+ if (iterator.cursor != NULL) {
+ // Remove all older versions (that exceed the current limit)
+ retvalue r2;
+ while(package_next(&iterator)) {
+ r2 = package_getversion(&iterator.current);
+ RET_UPDATE(r, r2);
+ if (RET_WAS_ERROR(r2))
+ continue;
+ if (strcmp(version, iterator.current.version) == 0) {
+ // Do not archive/remove the newly added package!
+ continue;
+ }
+ r2 = package_getsource(&iterator.current);
+ if (RET_WAS_ERROR(r2))
+ continue;
+ r2 = archive_package(target, &iterator.current, NULL, causingrule, suitefrom);
+ RET_UPDATE(r, r2);
+ if (RET_WAS_ERROR(r2))
+ continue;
+ r2 = package_remove_by_cursor(&iterator, logger, trackingdata);
+ RET_UPDATE(r, r2);
+ }
+ r2 = package_closeiterator(&iterator);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+}
+
+retvalue target_checkaddpackage(struct target *target, const char *name, const char *version, bool tracking, bool permitnewerold) {
+ struct strlist oldfilekeys, *ofk;
+ struct package old;
+ retvalue r;
+
+ assert(target->packages!=NULL);
+
+ r = package_get(target, name, NULL, &old);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ ofk = NULL;
+ } else {
+ int versioncmp;
+
+ r = package_getversion(&old);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error extracting version from old '%s' in '%s'. Database corrupted?\n", name, target->identifier);
+ package_done(&old);
+ return r;
+ }
+ assert (RET_IS_OK(r));
+
+ r = dpkgversions_cmp(version, old.version, &versioncmp);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Parse error comparing version '%s' of '%s' with old version '%s' in '%s'\n.",
+ version, name, old.version,
+ target->identifier);
+ package_done(&old);
+ return r;
+ }
+ if (versioncmp < 0) {
+ if (!permitnewerold) {
+ fprintf(stderr,
+"Error: trying to put version '%s' of '%s' in '%s',\n"
+"while there already is the stricly newer '%s' in there.\n"
+"(To ignore this error add Permit: older_version.)\n",
+ version, name,
+ target->identifier,
+ old.version);
+ package_done(&old);
+ return RET_ERROR;
+ } else if (verbose > 2) {
+ printf("Puting version '%s' of '%s' in '%s', while there already is '%s' in there.\n",
+ version, name, target->identifier, old.version);
+ }
+ } else if (versioncmp == 0) {
+ if (verbose > 2) {
+ printf(
+"Will not put '%s' in '%s', as already there with same version '%s'.\n",
+ name, target->identifier,
+ old.version);
+ }
+ package_done(&old);
+ return RET_NOTHING;
+ }
+ r = target->getfilekeys(old.control, &oldfilekeys);
+ ofk = &oldfilekeys;
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error extracting installed files from old '%s' in '%s'.\nDatabase corrupted?\n",
+ name, target->identifier);
+ package_done(&old);
+ return r;
+ }
+ if (tracking) {
+ r = package_getsource(&old);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error extracting source name and version from '%s' in '%s'. Database corrupted?\n",
+ name, target->identifier);
+ strlist_done(ofk);
+ package_done(&old);
+ return r;
+ }
+ /* TODO: check if tracking would succeed */
+ }
+ strlist_done(ofk);
+ package_done(&old);
+ }
+ return RET_OK;
+}
+
+retvalue target_rereference(struct target *target) {
+ retvalue result, r;
+ struct package_cursor iterator;
+
+ if (verbose > 1) {
+ if (verbose > 2)
+ printf("Unlocking dependencies of %s...\n",
+ target->identifier);
+ else
+ printf("Rereferencing %s...\n",
+ target->identifier);
+ }
+
+ result = references_remove(target->identifier);
+ if (verbose > 2)
+ printf("Referencing %s...\n", target->identifier);
+
+ r = package_openiterator(target, READONLY, true, &iterator);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ while (package_next(&iterator)) {
+ struct strlist filekeys;
+
+ r = target->getfilekeys(iterator.current.control, &filekeys);
+ RET_UPDATE(result, r);
+ if (!RET_IS_OK(r))
+ continue;
+ if (verbose > 10) {
+ fprintf(stderr, "adding references to '%s' for '%s': ",
+ target->identifier,
+ iterator.current.name);
+ (void)strlist_fprint(stderr, &filekeys);
+ (void)putc('\n', stderr);
+ }
+ r = references_insert(target->identifier, &filekeys, NULL);
+ strlist_done(&filekeys);
+ RET_UPDATE(result, r);
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue package_referenceforsnapshot(struct package *package, void *data) {
+ const char *identifier = data;
+ struct strlist filekeys;
+ retvalue r;
+
+ r = package->target->getfilekeys(package->control, &filekeys);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (verbose > 15) {
+ fprintf(stderr, "adding references to '%s' for '%s': ",
+ identifier, package->name);
+ (void)strlist_fprint(stderr, &filekeys);
+ (void)putc('\n', stderr);
+ }
+ r = references_add(identifier, &filekeys);
+ strlist_done(&filekeys);
+ return r;
+}
+
+retvalue package_check(struct package *package, UNUSED(void *pd)) {
+ struct target *target = package->target;
+ struct checksumsarray files;
+ retvalue result = RET_OK, r;
+
+ r = package_getversion(package);
+ if (!RET_IS_OK(r)) {
+ fprintf(stderr,
+"Error extraction version number from package control info of '%s'!\n",
+ package->name);
+ if (r == RET_NOTHING)
+ r = RET_ERROR_MISSING;
+ return r;
+ }
+ r = package_getarchitecture(package);
+ if (!RET_IS_OK(r)) {
+ fprintf(stderr,
+"Error extraction architecture from package control info of '%s'!\n",
+ package->name);
+ if (r == RET_NOTHING)
+ r = RET_ERROR_MISSING;
+ return r;
+ }
+ /* check if the architecture matches the architecture where this
+ * package belongs to. */
+ if (target->architecture != package->architecture &&
+ package->architecture != architecture_all) {
+ fprintf(stderr,
+"Wrong architecture '%s' of package '%s' in '%s'!\n",
+ atoms_architectures[package->architecture],
+ package->name, target->identifier);
+ result = RET_ERROR;
+ }
+ r = target->getchecksums(package->control, &files);
+ if (r == RET_NOTHING)
+ r = RET_ERROR;
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error extracting information of package '%s'!\n",
+ package->name);
+ return r;
+ }
+
+ if (verbose > 10) {
+ fprintf(stderr, "checking files of '%s'\n", package->name);
+ }
+ r = files_expectfiles(&files.names, files.checksums);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr, "Files are missing for '%s'!\n", package->name);
+ }
+ RET_UPDATE(result, r);
+ if (verbose > 10) {
+ (void)fprintf(stderr, "checking references to '%s' for '%s': ",
+ target->identifier, package->name);
+ (void)strlist_fprint(stderr, &files.names);
+ (void)putc('\n', stderr);
+ }
+ r = references_check(target->identifier, &files.names);
+ RET_UPDATE(result, r);
+ checksumsarray_done(&files);
+ return result;
+}
+
+/* Reapply override information */
+
+retvalue target_reoverride(struct target *target, struct distribution *distribution) {
+ struct package_cursor iterator;
+ retvalue result, r;
+
+ assert(target->packages == NULL);
+ assert(distribution != NULL);
+
+ if (verbose > 1) {
+ fprintf(stderr,
+"Reapplying overrides packages in '%s'...\n",
+ target->identifier);
+ }
+
+ r = package_openiterator(target, READWRITE, true, &iterator);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ char *newcontrolchunk = NULL;
+
+ r = target->doreoverride(target, iterator.current.name,
+ iterator.current.control,
+ &newcontrolchunk);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r)) {
+ if (verbose > 0)
+ (void)fputs(
+"target_reoverride: Stopping procession of further packages due to previous errors\n",
+ stderr);
+ break;
+ }
+ if (RET_IS_OK(r)) {
+ r = package_newcontrol_by_cursor(&iterator,
+ newcontrolchunk, strlen(newcontrolchunk));
+ free(newcontrolchunk);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ target->wasmodified = true;
+ }
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+/* Readd checksum information */
+
+static retvalue complete_package_checksums(struct target *target, const char *control, char **n) {
+ struct checksumsarray files;
+ retvalue r;
+
+ r = target->getchecksums(control, &files);
+ if (!RET_IS_OK(r))
+ return r;
+
+ r = files_checkorimprove(&files.names, files.checksums);
+ if (!RET_IS_OK(r)) {
+ checksumsarray_done(&files);
+ return r;
+ }
+ r = target->completechecksums(control,
+ &files.names, files.checksums, n);
+ checksumsarray_done(&files);
+ return r;
+}
+
+retvalue target_redochecksums(struct target *target, struct distribution *distribution) {
+ struct package_cursor iterator;
+ retvalue result, r;
+
+ assert(target->packages == NULL);
+ assert(distribution != NULL);
+
+ if (verbose > 1) {
+ fprintf(stderr,
+"Redoing checksum information for packages in '%s'...\n",
+ target->identifier);
+ }
+
+ r = package_openiterator(target, READWRITE, true, &iterator);
+ if (!RET_IS_OK(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ char *newcontrolchunk = NULL;
+
+ r = complete_package_checksums(target, iterator.current.control,
+ &newcontrolchunk);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (RET_IS_OK(r)) {
+ r = package_newcontrol_by_cursor(&iterator,
+ newcontrolchunk, strlen(newcontrolchunk));
+ free(newcontrolchunk);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ target->wasmodified = true;
+ }
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+/* export a database */
+
+retvalue target_export(struct target *target, bool onlyneeded, bool snapshot, struct release *release) {
+ retvalue result;
+ bool onlymissing;
+
+ assert (!target->noexport);
+
+ if (verbose > 5) {
+ if (onlyneeded)
+ printf(" looking for changes in '%s'...\n",
+ target->identifier);
+ else
+ printf(" exporting '%s'...\n", target->identifier);
+ }
+
+ /* not exporting if file is already there? */
+ onlymissing = onlyneeded && !target->wasmodified;
+
+ result = export_target(target->relativedirectory, target,
+ target->exportmode, release, onlymissing, snapshot);
+
+ if (!RET_WAS_ERROR(result) && !snapshot) {
+ target->saved_wasmodified =
+ target->saved_wasmodified || target->wasmodified;
+ target->wasmodified = false;
+ }
+ return result;
+}
+
+retvalue package_rerunnotifiers(struct package *package, UNUSED(void *data)) {
+ struct target *target = package->target;
+ struct logger *logger = target->distribution->logger;
+ struct strlist filekeys;
+ retvalue r;
+
+ r = package_getversion(package);
+ if (!RET_IS_OK(r)) {
+ fprintf(stderr,
+"Error extraction version number from package control info of '%s'!\n",
+ package->name);
+ if (r == RET_NOTHING)
+ r = RET_ERROR_MISSING;
+ return r;
+ }
+ r = target->getfilekeys(package->control, &filekeys);
+ if (RET_WAS_ERROR(r)) {
+ fprintf(stderr,
+"Error extracting information about used files from package '%s'!\n",
+ package->name);
+ return r;
+ }
+ r = logger_reruninfo(logger, target, package->name, package->version,
+ &filekeys);
+ strlist_done(&filekeys);
+ return r;
+}
+
+retvalue package_get(struct target *target, const char *name, const char *version, struct package *pkg) {
+ retvalue result, r;
+ bool database_closed;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: package_get(target.identifier=%s, packagename=%s, version=%s) called.\n",
+ target->identifier, name, version);
+
+ memset(pkg, 0, sizeof(*pkg));
+
+ database_closed = target->packages == NULL;
+
+ if (database_closed) {
+ r = target_initpackagesdb(target, READONLY);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ if (version == NULL) {
+ result = table_getrecord(target->packages, true, name,
+ &pkg->pkgchunk, &pkg->controllen);
+ } else {
+ char *key = package_primarykey(name, version);
+ result = table_getrecord(target->packages, false, key,
+ &pkg->pkgchunk, &pkg->controllen);
+ free(key);
+ }
+ if (RET_IS_OK(result)) {
+ pkg->target = target;
+ pkg->name = name;
+ pkg->control = pkg->pkgchunk;
+ }
+ if (database_closed) {
+ r = target_closepackagesdb(target);
+ if (RET_WAS_ERROR(r)) {
+ package_done(pkg);
+ return r;
+ }
+ }
+ return result;
+}
+
+retvalue package_openiterator(struct target *t, bool readonly, bool duplicate, /*@out@*/struct package_cursor *tc) {
+ retvalue r, r2;
+ struct cursor *c;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: package_openiterator(target={identifier: %s}, readonly=%s, duplicate=%s) called.\n",
+ t->identifier, readonly ? "true" : "false", duplicate ? "true" : "false");
+
+ tc->close_database = t->packages == NULL;
+ r = target_initpackagesdb(t, readonly);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = table_newglobalcursor(t->packages, duplicate, &c);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ r2 = target_closepackagesdb(t);
+ RET_UPDATE(r, r2);
+ return r;
+ }
+ tc->target = t;
+ tc->cursor = c;
+ memset(&tc->current, 0, sizeof(tc->current));
+ return RET_OK;
+}
+
+retvalue package_openduplicateiterator(struct target *t, const char *name, long long skip, /*@out@*/struct package_cursor *tc) {
+ retvalue r, r2;
+ struct cursor *c;
+
+ tc->close_database = t->packages == NULL;
+ if (tc->close_database) {
+ r = target_initpackagesdb(t, READONLY);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+
+ memset(&tc->current, 0, sizeof(tc->current));
+ r = table_newduplicatecursor(t->packages, name, skip, &c, &tc->current.name,
+ &tc->current.control, &tc->current.controllen);
+ if (!RET_IS_OK(r)) {
+ if (tc->close_database) {
+ r2 = target_closepackagesdb(t);
+ RET_ENDUPDATE(r, r2);
+ }
+ return r;
+ }
+ tc->current.target = t;
+ tc->target = t;
+ tc->cursor = c;
+ return RET_OK;
+}
+
+bool package_next(struct package_cursor *tc) {
+ bool success;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: package_next(tc={current: {name: %s, version: %s}}) called.\n", tc->current.name, tc->current.version);
+
+ package_done(&tc->current);
+ success = cursor_nexttempdata(tc->target->packages, tc->cursor,
+ &tc->current.name, &tc->current.control,
+ &tc->current.controllen);
+ if (!success)
+ memset(&tc->current, 0, sizeof(tc->current));
+ else
+ tc->current.target = tc->target;
+ return success;
+}
+
+retvalue package_closeiterator(struct package_cursor *tc) {
+ retvalue result, r;
+
+ package_done(&tc->current);
+ result = cursor_close(tc->target->packages, tc->cursor);
+ if (tc->close_database) {
+ r = target_closepackagesdb(tc->target);
+ RET_UPDATE(result, r);
+ } else {
+ tc->target = NULL;
+ }
+ return result;
+}
+
+retvalue package_getversion(struct package *package) {
+ retvalue r;
+
+ if (package->version != NULL)
+ return RET_OK;
+
+ r = package->target->getversion(package->control, &package->pkgversion);
+ if (RET_IS_OK(r)) {
+ assert (package->pkgversion != NULL);
+ package->version = package->pkgversion;
+ }
+ return r;
+}
+
+retvalue package_getarchitecture(struct package *package) {
+ if (atom_defined(package->architecture))
+ return RET_OK;
+
+ return package->target->getarchitecture(package->control,
+ &package->architecture);
+}
+
+retvalue package_getsource(struct package *package) {
+ retvalue r;
+
+ if (package->source != NULL)
+ return RET_OK;
+
+ r = package->target->getsourceandversion(package->control, package->name,
+ &package->pkgsource, &package->pkgsrcversion);
+ if (RET_IS_OK(r)) {
+ assert (package->pkgsource != NULL);
+ package->source = package->pkgsource;
+ package->sourceversion = package->pkgsrcversion;
+ }
+ return r;
+}
diff --git a/target.h b/target.h
new file mode 100644
index 0000000..32607ee
--- /dev/null
+++ b/target.h
@@ -0,0 +1,126 @@
+#ifndef REPREPRO_TARGET_H
+#define REPREPRO_TARGET_H
+
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_NAMES_H
+#include "names.h"
+#endif
+#ifndef REPREPRO_ATOMS_H
+#include "atoms.h"
+#endif
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_TRACKINGT_H
+#include "trackingt.h"
+#endif
+#ifndef REPREPRO_CHECKSUMS_H
+#include "checksums.h"
+#endif
+#ifndef REPREPRO_EXPORTS_H
+#include "exports.h"
+#endif
+
+struct target;
+struct alloverrides;
+
+typedef retvalue get_version(const char *, /*@out@*/char **);
+typedef retvalue get_architecture(const char *, /*@out@*/architecture_t *);
+struct package;
+typedef retvalue get_installdata(const struct target *, struct package *, /*@out@*/char **, /*@out@*/struct strlist *, /*@out@*/struct checksumsarray *);
+/* md5sums may be NULL */
+typedef retvalue get_filekeys(const char *, /*@out@*/struct strlist *);
+typedef retvalue get_checksums(const char *, /*@out@*/struct checksumsarray *);
+typedef retvalue do_reoverride(const struct target *, const char * /*packagename*/, const char *, /*@out@*/char **);
+typedef retvalue do_retrack(const char * /*packagename*/, const char * /*controlchunk*/, trackingdb);
+typedef retvalue get_sourceandversion(const char *, const char * /*packagename*/, /*@out@*/char ** /*source_p*/, /*@out@*/char ** /*version_p*/);
+typedef retvalue complete_checksums(const char *, const struct strlist *, struct checksums **, /*@out@*/char **);
+
+struct distribution;
+struct target {
+ struct distribution *distribution;
+ component_t component;
+ architecture_t architecture;
+ packagetype_t packagetype;
+ char *identifier;
+ /* links into the correct description in distribution */
+ /*@dependent@*/const struct exportmode *exportmode;
+ /* the directory relative to <distdir>/<codename>/ to use */
+ char *relativedirectory;
+ /* functions to use on the packages included */
+ get_version *getversion;
+ /* binary packages might be "all" or the architecture of the target */
+ get_architecture *getarchitecture;
+ get_installdata *getinstalldata;
+ get_filekeys *getfilekeys;
+ get_checksums *getchecksums;
+ get_sourceandversion *getsourceandversion;
+ do_reoverride *doreoverride;
+ do_retrack *doretrack;
+ complete_checksums *completechecksums;
+ bool wasmodified, saved_wasmodified;
+ /* set when existed at startup time, only valid in --nofast mode */
+ bool existed;
+ /* the next one in the list of targets of a distribution */
+ struct target *next;
+ /* is initialized as soon as needed: */
+ struct table *packages;
+ /* do not allow write operations */
+ bool readonly;
+ /* has noexport option */
+ bool noexport;
+ /* was updated without tracking data (no problem when distribution
+ * has no tracking, otherwise cause warning later) */
+ bool staletracking;
+};
+
+retvalue target_initialize_ubinary(/*@dependant@*/struct distribution *, component_t, architecture_t, /*@dependent@*/const struct exportmode *, bool /*readonly*/, bool /*noexport*/, /*@NULL@*/const char *fakecomponentprefix, /*@out@*/struct target **);
+retvalue target_initialize_dbinary(/*@dependant@*/struct distribution *, component_t, architecture_t, /*@dependent@*/const struct exportmode *, bool /*readonly*/, bool /*noexport*/, /*@NULL@*/const char *fakecomponentprefix, /*@out@*/struct target **);
+retvalue target_initialize_binary(/*@dependant@*/struct distribution *, component_t, architecture_t, /*@dependent@*/const struct exportmode *, bool /*readonly*/, bool /*noexport*/, /*@NULL@*/const char *fakecomponentprefix, /*@out@*/struct target **);
+retvalue target_initialize_source(/*@dependant@*/struct distribution *, component_t, /*@dependent@*/const struct exportmode *, bool /*readonly*/, bool /*noexport*/, /*@NULL@*/const char *fakecomponentprefix, /*@out@*/struct target **);
+retvalue target_free(struct target *);
+
+retvalue target_export(struct target *, bool /*onlyneeded*/, bool /*snapshot*/, struct release *);
+
+/* This opens up the database, if db != NULL, *db will be set to it.. */
+retvalue target_initpackagesdb(struct target *, bool /*readonly*/);
+/* this closes databases... */
+retvalue target_closepackagesdb(struct target *);
+
+/* The following calls can only be called if target_initpackagesdb was called before: */
+struct logger;
+struct description;
+retvalue target_addpackage(struct target *, /*@null@*/struct logger *, const char *name, const char *version, const char *control, const struct strlist *filekeys, bool downgrade, /*@null@*/struct trackingdata *, architecture_t, /*@null@*/const char *causingrule, /*@null@*/const char *suitefrom);
+retvalue target_checkaddpackage(struct target *, const char *name, const char *version, bool tracking, bool permitnewerold);
+retvalue target_removepackage(struct target *, /*@null@*/struct logger *, const char *name, const char *version, struct trackingdata *);
+/* like target_removepackage, but do not read control data yourself but use available */
+retvalue target_rereference(struct target *);
+retvalue target_reoverride(struct target *, struct distribution *);
+retvalue target_redochecksums(struct target *, struct distribution *);
+
+static inline bool target_matches(const struct target *t, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) {
+ if (limitations_missed(components, t->component))
+ return false;
+ if (limitations_missed(architectures, t->architecture))
+ return false;
+ if (limitations_missed(packagetypes, t->packagetype))
+ return false;
+ return true;
+}
+
+static inline char *package_primarykey(const char *packagename, const char *version) {
+ char *key;
+
+ assert (packagename != NULL);
+ assert (version != NULL);
+ key = malloc(strlen(packagename) + 1 + strlen(version) + 1);
+ if (key != NULL) {
+ strcpy(key, packagename);
+ strcat(key, "|");
+ strcat(key, version);
+ }
+ return key;
+}
+#endif
diff --git a/termdecide.c b/termdecide.c
new file mode 100644
index 0000000..6ff9590
--- /dev/null
+++ b/termdecide.c
@@ -0,0 +1,302 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2004,2005,2007,2009,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "chunks.h"
+#include "globmatch.h"
+#include "dpkgversions.h"
+#include "terms.h"
+#include "termdecide.h"
+
+static inline bool check_field(enum term_comparison c, const char *value, const char *with) {
+ if (c == tc_none) {
+ return true;
+ } else if (c == tc_globmatch) {
+ return globmatch(value, with);
+ } else if (c == tc_notglobmatch) {
+ return !globmatch(value, with);
+ } else {
+ int i;
+ i = strcmp(value, with);
+ if (i < 0)
+ return c == tc_strictless
+ || c == tc_lessorequal
+ || c == tc_notequal;
+ else if (i > 0)
+ return c == tc_strictmore
+ || c == tc_moreorequal
+ || c == tc_notequal;
+ else
+ return c == tc_lessorequal
+ || c == tc_moreorequal
+ || c == tc_equal;
+ }
+}
+
+/* this has a target argument instead of using package->target
+ * as the package might come from one distribution/architecture/...
+ * and the decision being about adding it somewhere else */
+retvalue term_decidepackage(const term *condition, struct package *package, struct target *target) {
+ const struct term_atom *atom = condition;
+
+ while (atom != NULL) {
+ bool correct; char *value;
+ enum term_comparison c = atom->comparison;
+ retvalue r;
+
+ if (atom->isspecial) {
+ correct = atom->special.type->compare(c,
+ &atom->special.comparewith,
+ package, target);
+ } else {
+ r = chunk_getvalue(package->control,
+ atom->generic.key, &value);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ correct = (c == tc_notequal
+ || c == tc_notglobmatch);
+ } else {
+ correct = check_field(c, value,
+ atom->generic.comparewith);
+ free(value);
+ }
+ }
+ if (atom->negated)
+ correct = !correct;
+ if (correct) {
+ atom = atom->nextiftrue;
+ } else {
+ atom = atom->nextiffalse;
+ if (atom == NULL) {
+ /* do not include */
+ return RET_NOTHING;
+ }
+ }
+
+ }
+ /* do include */
+ return RET_OK;
+}
+
+static retvalue parsestring(enum term_comparison c, const char *value, size_t len, struct compare_with *v) {
+ if (c == tc_none) {
+ fprintf(stderr,
+"Error: Special formula predicates (those starting with '$') are always\n"
+"defined, thus specifying them without parameter to compare against\n"
+"makes not sense!\n");
+ return RET_ERROR;
+ }
+ v->pointer = strndup(value, len);
+ if (FAILEDTOALLOC(v->pointer))
+ return RET_ERROR_OOM;
+ return RET_OK;
+}
+// TODO: check for well-formed versions
+#define parseversion parsestring
+
+static bool comparesource(enum term_comparison c, const struct compare_with *v, void *d1, UNUSED(void *d2)) {
+ struct package *package = d1;
+ retvalue r;
+
+ r = package_getsource(package);
+ if (!RET_IS_OK(r))
+ return false;
+ return check_field(c, package->source, v->pointer);
+}
+
+static inline bool compare_dpkgversions(enum term_comparison c, const char *version, const char *param) {
+ if (c != tc_globmatch && c != tc_notglobmatch) {
+ int cmp;
+ retvalue r;
+
+ r = dpkgversions_cmp(version, param, &cmp);
+ if (RET_IS_OK(r)) {
+ if (cmp < 0)
+ return c == tc_strictless
+ || c == tc_lessorequal
+ || c == tc_notequal;
+ else if (cmp > 0)
+ return c == tc_strictmore
+ || c == tc_moreorequal
+ || c == tc_notequal;
+ else
+ return c == tc_lessorequal
+ || c == tc_moreorequal
+ || c == tc_equal;
+ } else
+ return false;
+ } else
+ return check_field(c, version, param);
+}
+
+static bool compareversion(enum term_comparison c, const struct compare_with *v, void *d1, UNUSED(void *d2)) {
+ struct package *package = d1;
+ retvalue r;
+
+ r = package_getversion(package);
+ if (!RET_IS_OK(r))
+ return false;
+ return compare_dpkgversions(c, package->version, v->pointer);
+}
+static bool comparesourceversion(enum term_comparison c, const struct compare_with *v, void *d1, UNUSED(void *d2)) {
+ struct package *package = d1;
+ retvalue r;
+
+ r = package_getsource(package);
+ if (!RET_IS_OK(r))
+ return false;
+ return compare_dpkgversions(c, package->sourceversion, v->pointer);
+}
+
+static void freestring(UNUSED(enum term_comparison c), struct compare_with *d) {
+ free(d->pointer);
+}
+static void freeatom(enum term_comparison c, struct compare_with *d) {
+ if (c != tc_equal && c != tc_notequal)
+ free(d->pointer);
+}
+
+static retvalue parsetype(enum term_comparison c, const char *value, size_t len, struct compare_with *v) {
+ if (c == tc_none) {
+ fprintf(stderr,
+"Error: $Type is always defined, it does not make sense without parameter\n"
+"to compare against!\n");
+ return RET_ERROR;
+ }
+ if (c != tc_equal && c != tc_notequal) {
+ v->pointer = strndup(value, len);
+ if (FAILEDTOALLOC(v->pointer))
+ return RET_ERROR_OOM;
+ return RET_OK;
+ }
+ v->number = packagetype_find_l(value, len);
+ if (atom_defined(v->number))
+ return RET_OK;
+ fprintf(stderr, "Unknown package type '%.*s' in formula!\n",
+ (int)len, value);
+ return RET_ERROR;
+}
+
+static retvalue parsearchitecture(enum term_comparison c, const char *value, size_t len, struct compare_with *v) {
+ if (c == tc_none) {
+ fprintf(stderr,
+"Error: $Architecture is always defined, it does not make sense without parameter\n"
+"to compare against!\n");
+ return RET_ERROR;
+ }
+ if (c != tc_equal && c != tc_notequal) {
+ v->pointer = strndup(value, len);
+ if (FAILEDTOALLOC(v->pointer))
+ return RET_ERROR_OOM;
+ return RET_OK;
+ }
+ v->number = architecture_find_l(value, len);
+ if (atom_defined(v->number))
+ return RET_OK;
+ fprintf(stderr,
+"Unknown architecture '%.*s' in formula (must be listed in conf/distributions to be known)!\n",
+ (int)len, value);
+ return RET_ERROR;
+}
+
+static retvalue parsecomponent(enum term_comparison c, const char *value, size_t len, struct compare_with *v) {
+ if (c == tc_none) {
+ fprintf(stderr,
+"Error: $Component is always defined, it does not make sense without parameter\n"
+"to compare against!\n");
+ return RET_ERROR;
+ }
+ if (c != tc_equal && c != tc_notequal) {
+ v->pointer = strndup(value, len);
+ if (FAILEDTOALLOC(v->pointer))
+ return RET_ERROR_OOM;
+ return RET_OK;
+ }
+ v->number = component_find_l(value, len);
+ if (atom_defined(v->number))
+ return RET_OK;
+ fprintf(stderr,
+"Unknown component '%.*s' in formula (must be listed in conf/distributions to be known)!\n",
+ (int)len, value);
+ return RET_ERROR;
+}
+
+static bool comparetype(enum term_comparison c, const struct compare_with *v, UNUSED( void *d1), void *d2) {
+ const struct target *target = d2;
+
+ if (c == tc_equal)
+ return v->number == target->packagetype;
+ else if (c == tc_notequal)
+ return v->number != target->packagetype;
+ else
+ return check_field(c,
+ atoms_packagetypes[target->packagetype],
+ v->pointer);
+
+}
+static bool comparearchitecture(enum term_comparison c, const struct compare_with *v, UNUSED(void *d1), void *d2) {
+ const struct target *target = d2;
+
+ if (c == tc_equal)
+ return v->number == target->architecture;
+ else if (c == tc_notequal)
+ return v->number != target->architecture;
+ else
+ return check_field(c,
+ atoms_architectures[target->architecture],
+ v->pointer);
+}
+static bool comparecomponent(enum term_comparison c, const struct compare_with *v, UNUSED(void *d1), void *d2) {
+ const struct target *target = d2;
+
+ if (c == tc_equal)
+ return v->number == target->component;
+ else if (c == tc_notequal)
+ return v->number != target->component;
+ else
+ return check_field(c,
+ atoms_components[target->component],
+ v->pointer);
+}
+
+static struct term_special targetdecisionspecial[] = {
+ {"$Source", parsestring, comparesource, freestring},
+ {"$SourceVersion", parseversion, comparesourceversion, freestring},
+ {"$Version", parseversion, compareversion, freestring},
+ {"$Architecture", parsearchitecture, comparearchitecture, freeatom},
+ {"$Component", parsecomponent, comparecomponent, freeatom},
+ {"$Type", parsetype, comparetype, freeatom},
+ {"$PackageType", parsetype, comparetype, freeatom},
+ {NULL, NULL, NULL, NULL}
+};
+
+retvalue term_compilefortargetdecision(term **term_p, const char *formula) {
+ return term_compile(term_p, formula,
+ T_GLOBMATCH|T_OR|T_BRACKETS|T_NEGATION|T_VERSION|T_NOTEQUAL,
+ targetdecisionspecial);
+}
diff --git a/termdecide.h b/termdecide.h
new file mode 100644
index 0000000..ee77ac9
--- /dev/null
+++ b/termdecide.h
@@ -0,0 +1,19 @@
+#ifndef REPREPRO_TERMDECIDE_H
+#define REPREPRO_TERMDECIDE_H
+
+#ifndef REPREPRO_TERMS_H
+#include "terms.h"
+#endif
+#ifndef REPREPRO_TARGET_H
+#include "target.h"
+#endif
+#ifndef REPREPRO_PACKAGE_H
+#include "package.h"
+#endif
+
+retvalue term_compilefortargetdecision(/*@out@*/term **, const char *);
+retvalue term_decidepackage(const term *, struct package *, struct target *);
+
+
+
+#endif
diff --git a/terms.c b/terms.c
new file mode 100644
index 0000000..34438d1
--- /dev/null
+++ b/terms.c
@@ -0,0 +1,387 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2004,2005,2007,2009 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "chunks.h"
+#include "globmatch.h"
+#include "terms.h"
+
+void term_free(term *t) {
+ while (t != NULL) {
+ struct term_atom *next = t->next;
+ if (t->isspecial) {
+ if (t->special.type != NULL &&
+ t->special.type->done != NULL)
+ t->special.type->done(t->comparison,
+ &t->special.comparewith);
+
+ } else {
+ free(t->generic.key);
+ free(t->generic.comparewith);
+ }
+ strlist_done(&t->architectures);
+ free(t);
+ t = next;
+ }
+}
+
+static retvalue parseatom(const char **formula, /*@out@*/struct term_atom **atom, int options, const struct term_special *specials) {
+ struct term_atom *a;
+ const char *f = *formula;
+#define overspace() while (*f != '\0' && xisspace(*f)) f++
+ const char *keystart, *keyend;
+ const char *valuestart, *valueend;
+ enum term_comparison comparison = tc_none;
+ bool negated = false;
+ const struct term_special *s;
+
+
+ overspace();
+ if (*f == '!' && ISSET(options, T_NEGATION)) {
+ negated = true;
+ f++;
+ }
+ keystart = f;
+ // TODO: allow more strict checking again with some option?
+ while (*f != '\0' && *f != '(' && !xisspace(*f) && *f != ','
+ && *f != '|' && *f !='(' && *f != ')'
+ && *f != '[' && *f != '!')
+ f++;
+ keyend = f;
+ if (keystart == keyend) {
+ *formula = f;
+ return RET_NOTHING;
+ }
+ overspace();
+ if (ISSET(options, T_VERSION) && *f == '(') {
+ f++;
+ overspace();
+ switch (*f) {
+ case '>':
+ f++;
+ if (*f == '=') {
+ comparison = tc_moreorequal;
+ f++;
+ } else if (*f == '>') {
+ comparison = tc_strictmore;
+ f++;
+ } else {
+ comparison = tc_moreorequal;
+ fprintf(stderr,
+"Warning: Found a '(>' without '=' or '>' in '%s'(beginning cut), will be treated as '>='.\n",
+ *formula);
+ }
+ break;
+ case '<':
+ f++;
+ if (*f == '=') {
+ comparison = tc_lessorequal;
+ f++;
+ } else if (*f == '<') {
+ comparison = tc_strictless;
+ f++;
+ } else {
+ comparison = tc_lessorequal;
+ fprintf(stderr,
+"Warning: Found a '(<' without '=' or '<' in '%s'(begin cut), will be treated as '<='.\n",
+ *formula);
+ }
+ break;
+ case '=':
+ f++;
+ if (*f == '=')
+ f++;
+ else if (*f != ' ') {
+ *formula = f;
+ return RET_NOTHING;
+ }
+ comparison = tc_equal;
+ break;
+ case '%':
+ if (ISSET(options, T_GLOBMATCH)) {
+ f++;
+ comparison = tc_globmatch;
+ break;
+ }
+ *formula = f;
+ return RET_NOTHING;
+ case '!':
+ if (f[1] == '%' &&
+ ISSET(options, T_GLOBMATCH)) {
+ f += 2;
+ comparison = tc_notglobmatch;
+ break;
+ }
+ if (ISSET(options, T_NOTEQUAL)) {
+ f++;
+ if (*f != '=') {
+ *formula = f;
+ return RET_NOTHING;
+ }
+ f++;
+ comparison = tc_notequal;
+ break;
+ }
+ *formula = f;
+ return RET_NOTHING;
+ default:
+ *formula = f;
+ return RET_NOTHING;
+ }
+ overspace();
+ valueend = valuestart = f;
+ while (*f != '\0' && *f != ')') {
+ valueend = f+1;
+ f++;
+ while (*f != '\0' && xisspace(*f))
+ f++;
+ }
+ if (*f != ')' || valueend == valuestart) {
+ *formula = f;
+ return RET_NOTHING;
+ }
+ f++;
+
+ } else {
+ comparison = tc_none;
+ valuestart = valueend = NULL;
+ }
+ overspace();
+ if (ISSET(options, T_ARCHITECTURES) && *f == '[') {
+ //TODO: implement this one...
+ assert ("Not yet implemented!" == NULL);
+ }
+ for (s = specials ; s->name != NULL ; s++) {
+ if (strncasecmp(s->name, keystart, keyend-keystart) == 0 &&
+ s->name[keyend-keystart] == '\0')
+ break;
+ }
+ a = zNEW(struct term_atom);
+ if (FAILEDTOALLOC(a))
+ return RET_ERROR_OOM;
+ a->negated = negated;
+ a->comparison = comparison;
+ if (s->name != NULL) {
+ retvalue r;
+
+ a->isspecial = true;
+ a->special.type = s;
+ r = s->parse(comparison, valuestart, valueend-valuestart,
+ &a->special.comparewith);
+ if (RET_WAS_ERROR(r)) {
+ term_free(a);
+ return r;
+ }
+ } else {
+ a->isspecial = false;
+ a->generic.key = strndup(keystart, keyend - keystart);
+ if (FAILEDTOALLOC(a->generic.key)) {
+ term_free(a);
+ return RET_ERROR_OOM;
+ }
+ if (comparison != tc_none) {
+ if (valueend - valuestart > 2048 &&
+ (comparison == tc_globmatch ||
+ comparison == tc_notglobmatch)) {
+ fprintf(stderr,
+"Ridicilous long globmatch '%.10s...'!\n",
+ valuestart);
+ term_free(a);
+ return RET_ERROR;
+ }
+ a->generic.comparewith = strndup(valuestart,
+ valueend - valuestart);
+ if (FAILEDTOALLOC(a->generic.comparewith)) {
+ term_free(a);
+ return RET_ERROR_OOM;
+ }
+ }
+ }
+ //TODO: here architectures, too
+
+ *atom = a;
+ *formula = f;
+ return RET_OK;
+#undef overspace
+}
+
+/* as this are quite special BDDs (a atom being false cannot make it true),
+ * the places where True and False can be found are
+ * quite easy and fast to find: */
+
+static void orterm(term *termtochange, /*@dependent@*/term *termtoor) {
+ struct term_atom *p = termtochange;
+
+ while (p != NULL) {
+ while (p->nextiffalse != NULL)
+ p = p->nextiffalse;
+ p->nextiffalse= termtoor;
+ p = p->nextiftrue;
+ }
+}
+static void andterm(term *termtochange, /*@dependent@*/term *termtoand) {
+ struct term_atom *p = termtochange;
+
+ while (p != NULL) {
+ while (p->nextiftrue != NULL)
+ p = p->nextiftrue;
+ p->nextiftrue = termtoand;
+ p = p->nextiffalse;
+ }
+}
+
+retvalue term_compile(term **term_p, const char *origformula, int options, const struct term_special *specials) {
+ const char *formula = origformula;
+ /* for the global list */
+ struct term_atom *first, *last;
+ /* the atom just read */
+ struct term_atom *atom;
+ struct {
+ /*@dependent@*/struct term_atom *firstinand, *firstinor;
+ } levels[50];
+ int lastinitializeddepth=-1;
+ int depth=0;
+ retvalue r;
+ int i;
+ //TODO: ???
+ char junction = '\0';
+
+ if (ISSET(options, T_ARCHITECTURES)) {
+ //TODO: implement this one...
+ assert ("Not yet implemented!" == NULL);
+ }
+
+#define overspace() while (*formula!='\0' && xisspace(*formula)) formula++
+
+ lastinitializeddepth=-1;
+ depth=0;
+ first = last = NULL;
+
+ while (true) {
+ overspace();
+ while (*formula == '(' && ISSET(options, T_BRACKETS)) {
+ depth++; formula++;
+ overspace();
+ }
+ if (depth >= 50) {
+ term_free(first);
+ fprintf(stderr,
+"Nested too deep: '%s'!\n",
+ origformula);
+ return RET_ERROR;
+ }
+ r = parseatom(&formula, &atom, options, specials);
+ if (r == RET_NOTHING) {
+ if (*formula == '\0')
+ fprintf(stderr,
+"Unexpected end of string parsing formula '%s'!\n",
+ origformula);
+ else
+ fprintf(stderr,
+"Unexpected character '%c' parsing formula '%s'!\n",
+ *formula, origformula);
+
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ term_free(first);
+ return r;
+ }
+ for (i=lastinitializeddepth+1 ; i <= depth ; i ++) {
+ levels[i].firstinand = atom;
+ levels[i].firstinor = atom;
+ }
+ if (junction != '\0') {
+ assert(lastinitializeddepth >= 0);
+ assert (first != NULL);
+ last->next = atom;
+ last = atom;
+ if (junction == ',') {
+ andterm(levels[lastinitializeddepth].firstinand,
+ atom);
+ levels[lastinitializeddepth].firstinand = atom;
+ levels[lastinitializeddepth].firstinor = atom;
+ } else {
+ assert (junction == '|');
+ orterm(levels[lastinitializeddepth].firstinor,
+ atom);
+ levels[lastinitializeddepth].firstinor = atom;
+ }
+ } else {
+ assert(lastinitializeddepth == -1);
+ assert (first == NULL);
+ first = last = atom;
+ }
+ lastinitializeddepth = depth;
+ overspace();
+ if (*formula == ')' && ISSET(options, T_BRACKETS)) {
+ formula++;
+ if (depth > 0) {
+ depth--;
+ lastinitializeddepth = depth;
+ } else {
+ fprintf(stderr,
+"Too many ')'s in '%s'!\n",
+ origformula);
+ term_free(first);
+ return RET_ERROR;
+ }
+ overspace();
+ }
+ overspace();
+ if (*formula == '\0')
+ break;
+ if (*formula != ',' &&
+ (*formula != '|' || NOTSET(options, T_OR))) {
+ fprintf(stderr,
+"Unexpected character '%c' within '%s'!\n",
+ *formula, origformula);
+ term_free(first);
+ return RET_ERROR;
+ }
+ junction = *formula;
+ formula++;
+ }
+ if (depth > 0) {
+ fprintf(stderr,
+"Missing ')' at end of formula '%s'!\n",
+ origformula);
+ term_free(first);
+ return RET_ERROR;
+
+ }
+ if (*formula != '\0') {
+ fprintf(stderr,
+"Trailing garbage at end of term: '%s'\n",
+ formula);
+ term_free(first);
+ return RET_ERROR;
+ }
+ *term_p = first;
+ return RET_OK;
+}
+
diff --git a/terms.h b/terms.h
new file mode 100644
index 0000000..8c307d5
--- /dev/null
+++ b/terms.h
@@ -0,0 +1,65 @@
+#ifndef REPREPRO_TERMS_H
+#define REPREPRO_TERMS_H
+
+enum term_comparison { tc_none=0, tc_equal, tc_strictless, tc_strictmore,
+ tc_lessorequal, tc_moreorequal,
+ tc_notequal, tc_globmatch, tc_notglobmatch};
+
+struct term_special;
+
+typedef struct term_atom {
+ /* global list to allow freeing them all */
+ struct term_atom *next;
+ /* the next atom to look at if this is true, resp. false,
+ * nextiftrue == NULL means total result is true,
+ * nextiffalse == NULL means total result is false. */
+ /*@dependent@*/struct term_atom *nextiftrue, *nextiffalse;
+ bool negated, isspecial;
+ /* architecture requirements */
+ bool architectures_negated;
+ struct strlist architectures;
+ /* version/value requirement */
+ enum term_comparison comparison;
+ union {
+ struct {
+ /* package-name or key */
+ char *key;
+ /* version/value requirement */
+ char *comparewith;
+ } generic;
+ struct {
+ const struct term_special *type;
+ struct compare_with {
+ void *pointer;
+ long number;
+ } comparewith;
+ } special;
+ };
+} term;
+
+struct term_special {
+ const char *name;
+ retvalue (*parse)(enum term_comparison, const char *, size_t len, struct compare_with *);
+ bool (*compare)(enum term_comparison, const struct compare_with *, void*, void*);
+ void (*done)(enum term_comparison, struct compare_with *);
+};
+
+/* | is allowed in terms */
+#define T_OR 0x01
+/* () are allowed to build sub-expressions */
+#define T_BRACKETS 0x02
+/* expressions may be negated */
+#define T_NEGATION 0x04
+/* (<rel> <version>) is allowed */
+#define T_VERSION 0x10
+/* [archlist] is allowed */
+#define T_ARCHITECTURES 0x20
+/* (!= value) is allowed */
+#define T_NOTEQUAL 0x40
+/* (% <globpattern>) and (!% globpattern) are allowed */
+#define T_GLOBMATCH 0x80
+
+retvalue term_compile(/*@out@*/term **, const char * /*formula*/, int /*options*/, /*@null@*/const struct term_special *specials);
+void term_free(/*@null@*//*@only@*/term *);
+
+#endif
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 0000000..709e79f
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1,62 @@
+EXTRA_DIST = \
+brokenuncompressor.sh \
+genpackage.sh \
+test.inc \
+test.sh \
+atoms.test \
+buildinfo.test \
+buildneeding.test \
+check.test \
+copy.test \
+descriptions.test \
+diffgeneration.test \
+easyupdate.test \
+export.test \
+exporthooks.test \
+flat.test \
+flood.test \
+includeasc.test \
+includeextra.test \
+layeredupdate.test \
+layeredupdate2.test \
+listcodenames.test \
+morgue.test \
+onlysmalldeletes.test \
+override.test \
+packagediff.test \
+signatures.test \
+signed.test \
+snapshotcopyrestore.test \
+srcfilterlist.test \
+subcomponents.test \
+template.test \
+trackingcorruption.test \
+uncompress.test \
+updatecorners.test \
+updatepullreject.test \
+uploaders.test \
+various1.test \
+various2.test \
+various3.test \
+verify.test \
+wrongarch.test \
+evil.key \
+expired.key \
+expiredwithsubkey.key \
+expiredwithsubkey-working.key \
+good.key \
+revoked.key \
+revoked.pkey \
+withsubkeys.key \
+withsubkeys-works.key \
+basic.sh \
+multiversion.sh \
+shunit2-helper-functions.sh
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+check:
+ ./basic.sh
+ ./multiversion.sh
+
+clean-local:
+ rm -rf testrepo testpkgs
diff --git a/tests/atoms.test b/tests/atoms.test
new file mode 100644
index 0000000..c57a53d
--- /dev/null
+++ b/tests/atoms.test
@@ -0,0 +1,180 @@
+. "$TESTSDIR"/test.inc
+
+# different tests to check the error messages when accessing
+# architectures components or packagetypes...
+
+mkdir conf
+cat > conf/options <<EOF
+export silent-never
+EOF
+
+cat > conf/distributions <<EOF
+Codename: codename
+Architectures: te/st all source
+Components: component
+EOF
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 2, column 16: Malformed Architectures element 'te/st': '/' is not allowed
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#te/st#test#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error: Distribution codename contains an architecture called 'all'.
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#\<all\>#a|l#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 2, column 21: Malformed Architectures element 'a|l': '|' is not allowed
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#\<a|l\>##' -e 's#component#compo|nent#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 13: Malformed Components element 'compo|nent': '|' is not allowed
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#compo|nent#.#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 13: Malformed Components element '.': '.' is not allowed as directory part
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's# .$# ./test#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 13: Malformed Components element './test': '.' is not allowed as directory part
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's# ./test$# bla/./test#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 13: Malformed Components element 'bla/./test': '.' is not allowed as directory part
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's# bla/./test$# bla/../test#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 13: Malformed Components element 'bla/../test': '..' is not allowed as directory part
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#/test$##' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 13: Malformed Components element 'bla/..': '..' is not allowed as directory part
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#bla/##' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 13: Malformed Components element '..': '..' is not allowed as directory part
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#\.\.#component#' -e 's#Components#UdebComponents#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 3, column 16:
+*= A 'UDebComponents'-field is only allowed after a 'Components'-field.
+-v0*=There have been errors!
+return 255
+EOF
+
+ed -s conf/distributions <<EOF
+/Codename/a
+Components: test
+.
+w
+q
+EOF
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 4, column 17: 'component' not allowed in UDebComponents as it was not in Components.
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#test$#test component#' conf/distributions
+cat >> conf/distributions <<EOF
+ContentsArchitectures: bla
+EOF
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 5, column 24: 'bla' not allowed in ContentsArchitectures as it was not in Architectures.
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#ContentsArchitectures#ContentsComponents#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 5, column 21: 'bla' not allowed in ContentsComponents as it was not in Components.
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#ContentsComponents: bla#ContentsUComponents: test#' conf/distributions
+
+testrun - -b . update 3<<EOF
+*=Error parsing ./conf/distributions, line 5, column 22: 'test' not allowed in ContentsUComponents as it was not in UDebComponents.
+-v0*=There have been errors!
+return 255
+EOF
+
+sed -i -e 's#ContentsUComponents: test#ContentsUComponents: component#' conf/distributions
+
+testrun - -b . -A test export 3<<EOF
+*=Action 'export' cannot be restricted to an architecture!
+*=neither --archiecture nor -A make sense here.
+*=To ignore use --ignore=unusedoption.
+-v0*=There have been errors!
+return 255
+EOF
+testrun - -b . -C test export 3<<EOF
+*=Action 'export' cannot be restricted to a component!
+*=neither --component nor -C make sense here.
+*=To ignore use --ignore=unusedoption.
+-v0*=There have been errors!
+return 255
+EOF
+testrun - -b . -T dsc export 3<<EOF
+*=Action 'export' cannot be restricted to a packagetype!
+*=neither --packagetype nor -T make sense here.
+*=To ignore use --ignore=unusedoption.
+-v0*=There have been errors!
+return 255
+EOF
+mkdir db
+testrun - -b . -A test remove codename nothing 3<<EOF
+-v0*=Not removed as not found: nothing
+EOF
+testrun - -b . -A bla remove codename nothing 3<<EOF
+*=Error: Architecture 'bla' as given to --architecture is not know.
+*=(it does not appear as architecture in ./conf/distributions (did you mistype?))
+-v0*=There have been errors!
+returns 255
+EOF
+
+rm -r conf db
+testsuccess
diff --git a/tests/basic.sh b/tests/basic.sh
new file mode 100755
index 0000000..867c489
--- /dev/null
+++ b/tests/basic.sh
@@ -0,0 +1,425 @@
+#!/bin/sh
+set -u
+
+# Copyright (C) 2017, Benjamin Drung <benjamin.drung@profitbricks.com>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+. "${0%/*}/shunit2-helper-functions.sh"
+
+setUp() {
+ create_repo
+}
+
+tearDown() {
+ check_db
+}
+
+test_empty() {
+ $REPREPRO -b $REPO export
+ call $REPREPRO -b $REPO list buster
+ assertEquals "" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_list() {
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=1.0 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_1.0-1_${ARCH}.deb
+ assertEquals "buster|main|$ARCH: hello 1.0-1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_ls() {
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster EPOCH="1:" VERSION=2.5 REVISION=-3 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.5-3_${ARCH}.deb
+ assertEquals "hello | 1:2.5-3 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+}
+
+test_copy() {
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster EPOCH="1:" VERSION=2.5 REVISION=-3 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.5-3_${ARCH}.deb
+ assertEquals "hello | 1:2.5-3 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ add_distro bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copy bullseye buster hello
+ assertEquals "bullseye|main|$ARCH: hello 1:2.5-3" "$($REPREPRO -b $REPO list bullseye)"
+}
+
+test_copy_existing() {
+ add_distro bullseye
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/sl_3.03-1_${ARCH}.deb
+ assertEquals "sl | 3.03-1 | buster | $ARCH" "$($REPREPRO -b $REPO ls sl)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copy bullseye buster sl
+ assertEquals "\
+sl | 3.03-1 | buster | $ARCH
+sl | 3.03-1 | bullseye | $ARCH" "$($REPREPRO -b $REPO ls sl)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copy bullseye buster sl
+ assertEquals "\
+sl | 3.03-1 | buster | $ARCH
+sl | 3.03-1 | bullseye | $ARCH" "$($REPREPRO -b $REPO ls sl)"
+}
+
+test_include_changes() {
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main include buster $PKGS/sl_3.03-1_${ARCH}.changes
+ assertEquals "\
+buster|main|$ARCH: sl 3.03-1
+buster|main|$ARCH: sl-addons 3.03-1
+buster|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_include_old() {
+ # Test including an old package version. Expected output:
+ # Skipping inclusion of 'hello' '2.9-1' in 'buster|main|$ARCH', as it has already '2.9-2'.
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-2_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-1_${ARCH}.deb
+ assertEquals "buster|main|$ARCH: hello 2.9-2" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_limit() {
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-1_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-2_${ARCH}.deb
+ assertEquals "buster|main|$ARCH: hello 2.9-2" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_older_version() {
+ cat >> $REPO/conf/incoming <<EOF
+Name: buster-upload
+IncomingDir: incoming
+TempDir: tmp
+Allow: buster
+Permit: older_version
+EOF
+ echo "Limit: 3" >> $REPO/conf/distributions
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ mkdir -p "$REPO/incoming"
+ cp "$PKGS/hello_2.9-2_${ARCH}.changes" "$PKGS/hello-addons_2.9-2_all.deb" "$PKGS/hello_2.9-2_${ARCH}.deb" "$PKGS/hello_2.9-2.dsc" "$PKGS/hello_2.9.orig.tar.gz" "$PKGS/hello_2.9-2.debian.tar.xz" "$REPO/incoming"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO processincoming buster-upload hello_2.9-2_${ARCH}.changes
+ assertEquals "hello | 2.9-2 | buster | $ARCH, source" "$($REPREPRO -b $REPO ls hello)"
+ cp "$PKGS/hello_2.9-1_${ARCH}.changes" "$PKGS/hello-addons_2.9-1_all.deb" "$PKGS/hello_2.9-1_${ARCH}.deb" "$PKGS/hello_2.9-1.dsc" "$PKGS/hello_2.9.orig.tar.gz" "$PKGS/hello_2.9-1.debian.tar.xz" "$REPO/incoming"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO processincoming buster-upload hello_2.9-1_${ARCH}.changes
+ assertEquals "\
+hello | 2.9-2 | buster | $ARCH, source
+hello | 2.9-1 | buster | $ARCH, source" "$($REPREPRO -b $REPO ls hello)"
+}
+
+test_too_old_version() {
+ # Allow only one version per package in the archive
+ # Test if uploading an older version will not replace the newer version
+ # in the archive.
+ cat >> $REPO/conf/incoming <<EOF
+Name: buster-upload
+IncomingDir: incoming
+TempDir: tmp
+Allow: buster
+Permit: older_version
+EOF
+ echo "Limit: 1" >> $REPO/conf/distributions
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ mkdir -p "$REPO/incoming"
+ cp "$PKGS/hello_2.9-2_${ARCH}.changes" "$PKGS/hello-addons_2.9-2_all.deb" "$PKGS/hello_2.9-2_${ARCH}.deb" "$PKGS/hello_2.9-2.dsc" "$PKGS/hello_2.9.orig.tar.gz" "$PKGS/hello_2.9-2.debian.tar.xz" "$REPO/incoming"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO processincoming buster-upload hello_2.9-2_${ARCH}.changes
+ assertEquals "hello | 2.9-2 | buster | $ARCH, source" "$($REPREPRO -b $REPO ls hello)"
+ cp "$PKGS/hello_2.9-1_${ARCH}.changes" "$PKGS/hello-addons_2.9-1_all.deb" "$PKGS/hello_2.9-1_${ARCH}.deb" "$PKGS/hello_2.9-1.dsc" "$PKGS/hello_2.9.orig.tar.gz" "$PKGS/hello_2.9-1.debian.tar.xz" "$REPO/incoming"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO processincoming buster-upload hello_2.9-1_${ARCH}.changes
+ assertEquals "hello | 2.9-2 | buster | $ARCH, source" "$($REPREPRO -b $REPO ls hello)"
+}
+
+test_remove() {
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main include buster $PKGS/sl_3.03-1_${ARCH}.changes
+ assertEquals "\
+buster|main|$ARCH: sl 3.03-1
+buster|main|$ARCH: sl-addons 3.03-1
+buster|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list buster)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO remove buster sl
+ assertEquals "buster|main|$ARCH: sl-addons 3.03-1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_listcodenames() {
+ assertEquals "buster" "$($REPREPRO -b $REPO _listcodenames)"
+ add_distro bullseye
+ assertEquals "\
+buster
+bullseye" "$($REPREPRO -b $REPO _listcodenames)"
+}
+
+test_copysrc() {
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main include buster $PKGS/sl_3.03-1_${ARCH}.changes
+ add_distro bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copysrc bullseye buster sl
+ assertEquals "\
+bullseye|main|$ARCH: sl 3.03-1
+bullseye|main|$ARCH: sl-addons 3.03-1
+bullseye|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list bullseye)"
+ assertEquals "\
+buster|main|$ARCH: sl 3.03-1
+buster|main|$ARCH: sl-addons 3.03-1
+buster|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_copymatched() {
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main include buster $PKGS/sl_3.03-1_${ARCH}.changes
+ add_distro bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copymatched bullseye buster "sl-a*on?"
+ assertEquals "bullseye|main|$ARCH: sl-addons 3.03-1" "$($REPREPRO -b $REPO list bullseye)"
+ assertEquals "\
+buster|main|$ARCH: sl 3.03-1
+buster|main|$ARCH: sl-addons 3.03-1
+buster|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_move() {
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedsc buster $PKGS/sl_3.03-1.dsc
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/sl_3.03-1_$ARCH.deb $PKGS/sl-addons_3.03-1_all.deb
+ add_distro bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO move bullseye buster sl
+ assertEquals "\
+bullseye|main|$ARCH: sl 3.03-1
+bullseye|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list bullseye)"
+ assertEquals "buster|main|$ARCH: sl-addons 3.03-1" "$($REPREPRO -b $REPO list buster)"
+ assertEquals "\
+Distribution: buster
+Source: sl
+Version: 3.03-1
+Files:
+ pool/main/s/sl/sl_3.03-1.dsc s 0
+ pool/main/s/sl/sl_3.03.orig.tar.gz s 0
+ pool/main/s/sl/sl_3.03-1.debian.tar.xz s 0
+ pool/main/s/sl/sl_3.03-1_$ARCH.deb b 0
+ pool/main/s/sl/sl-addons_3.03-1_all.deb a 1
+
+Distribution: bullseye
+Source: sl
+Version: 3.03-1
+Files:
+ pool/main/s/sl/sl_3.03-1_$ARCH.deb b 1
+ pool/main/s/sl/sl_3.03-1.dsc s 1
+ pool/main/s/sl/sl_3.03.orig.tar.gz s 1
+ pool/main/s/sl/sl_3.03-1.debian.tar.xz s 1" "$($REPREPRO -b $REPO dumptracks)"
+}
+
+test_movesrc() {
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main include buster $PKGS/sl_3.03-1_${ARCH}.changes
+ add_distro bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO movesrc bullseye buster sl
+ assertEquals "\
+bullseye|main|$ARCH: sl 3.03-1
+bullseye|main|$ARCH: sl-addons 3.03-1
+bullseye|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list bullseye)"
+ assertEquals "" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_movematched() {
+ (cd $PKGS && PACKAGE=sl SECTION=main DISTRI=buster EPOCH="" VERSION=3.03 REVISION=-1 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main include buster $PKGS/sl_3.03-1_${ARCH}.changes
+ add_distro bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO movematched bullseye buster "sl-a*on?"
+ assertEquals "bullseye|main|$ARCH: sl-addons 3.03-1" "$($REPREPRO -b $REPO list bullseye)"
+ assertEquals "\
+buster|main|$ARCH: sl 3.03-1
+buster|main|source: sl 3.03-1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_archive() {
+ clear_distro
+ add_distro buster-archive
+ add_distro buster "Limit: 1\nArchive: buster-archive"
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-1_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-2_${ARCH}.deb
+ assertEquals "\
+hello | 2.9-1 | buster-archive | $ARCH
+hello | 2.9-2 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ assertEquals "\
+Distribution: buster-archive
+Source: hello
+Version: 2.9-1
+Files:
+ pool/main/h/hello/hello_2.9-1_$ARCH.deb b 1
+
+Distribution: buster
+Source: hello
+Version: 2.9-2
+Files:
+ pool/main/h/hello/hello_2.9-2_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+}
+
+test_archive_downgrade() {
+ clear_distro
+ add_distro buster-archive
+ add_distro buster "Limit: 1\nArchive: buster-archive"
+ add_distro buster-proposed
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-2_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster-proposed $PKGS/hello_2.9-1_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO move buster buster-proposed hello=2.9-1
+ assertEquals "\
+hello | 2.9-2 | buster-archive | $ARCH
+hello | 2.9-1 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ assertEquals "\
+Distribution: buster-archive
+Source: hello
+Version: 2.9-2
+Files:
+ pool/main/h/hello/hello_2.9-2_$ARCH.deb b 1
+
+Distribution: buster
+Source: hello
+Version: 2.9-1
+Files:
+ pool/main/h/hello/hello_2.9-1_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+}
+
+test_archive_move() {
+ clear_distro
+ add_distro buster-archive "Limit: -1"
+ add_distro buster "Limit: 1\nArchive: buster-archive"
+ add_distro buster-proposed "Limit: -1"
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-3 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster-proposed $PKGS/hello_2.9-1_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster-proposed $PKGS/hello_2.9-2_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster-proposed $PKGS/hello_2.9-3_${ARCH}.deb
+ assertEquals "\
+hello | 2.9-3 | buster-proposed | $ARCH
+hello | 2.9-2 | buster-proposed | $ARCH
+hello | 2.9-1 | buster-proposed | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ assertEquals "\
+Distribution: buster-proposed
+Source: hello
+Version: 2.9-1
+Files:
+ pool/main/h/hello/hello_2.9-1_$ARCH.deb b 1
+
+Distribution: buster-proposed
+Source: hello
+Version: 2.9-2
+Files:
+ pool/main/h/hello/hello_2.9-2_$ARCH.deb b 1
+
+Distribution: buster-proposed
+Source: hello
+Version: 2.9-3
+Files:
+ pool/main/h/hello/hello_2.9-3_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main move buster buster-proposed hello=2.9-1
+ assertEquals "\
+hello | 2.9-1 | buster | $ARCH
+hello | 2.9-3 | buster-proposed | $ARCH
+hello | 2.9-2 | buster-proposed | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ assertEquals "\
+Distribution: buster
+Source: hello
+Version: 2.9-1
+Files:
+ pool/main/h/hello/hello_2.9-1_$ARCH.deb b 1
+
+Distribution: buster-proposed
+Source: hello
+Version: 2.9-2
+Files:
+ pool/main/h/hello/hello_2.9-2_$ARCH.deb b 1
+
+Distribution: buster-proposed
+Source: hello
+Version: 2.9-3
+Files:
+ pool/main/h/hello/hello_2.9-3_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main move buster buster-proposed hello=2.9-2
+ assertEquals "\
+hello | 2.9-1 | buster-archive | $ARCH
+hello | 2.9-2 | buster | $ARCH
+hello | 2.9-3 | buster-proposed | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ assertEquals "\
+Distribution: buster-archive
+Source: hello
+Version: 2.9-1
+Files:
+ pool/main/h/hello/hello_2.9-1_$ARCH.deb b 1
+
+Distribution: buster
+Source: hello
+Version: 2.9-2
+Files:
+ pool/main/h/hello/hello_2.9-2_$ARCH.deb b 1
+
+Distribution: buster-proposed
+Source: hello
+Version: 2.9-3
+Files:
+ pool/main/h/hello/hello_2.9-3_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main move buster buster-proposed hello
+ assertEquals "\
+hello | 2.9-2 | buster-archive | $ARCH
+hello | 2.9-1 | buster-archive | $ARCH
+hello | 2.9-3 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ assertEquals "\
+Distribution: buster-archive
+Source: hello
+Version: 2.9-1
+Files:
+ pool/main/h/hello/hello_2.9-1_$ARCH.deb b 1
+
+Distribution: buster-archive
+Source: hello
+Version: 2.9-2
+Files:
+ pool/main/h/hello/hello_2.9-2_$ARCH.deb b 1
+
+Distribution: buster
+Source: hello
+Version: 2.9-3
+Files:
+ pool/main/h/hello/hello_2.9-3_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+}
+
+test_archive_move_back() {
+ clear_distro
+ add_distro buster-archive "Limit: -1"
+ add_distro buster "Limit: 1\nArchive: buster-archive"
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-2 ../genpackage.sh)
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-1_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster-archive $PKGS/hello_2.9-2_${ARCH}.deb
+ assertEquals "\
+hello | 2.9-2 | buster-archive | $ARCH
+hello | 2.9-1 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main move buster buster-archive hello=2.9-2
+ assertEquals "\
+hello | 2.9-1 | buster-archive | $ARCH
+hello | 2.9-2 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+}
+
+test_ddeb() {
+ clear_distro
+ add_distro buster "DDebComponents: main non-free"
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster VERSION=2.9 REVISION=-1 DDEB=1 ../genpackage.sh)
+ #mv $PKGS/hello_2.9-1_${ARCH}.deb $PKGS/hello_2.9-1_${ARCH}.ddeb
+ #sed -i "s/hello_2.9-1_${ARCH}.deb/hello_2.9-1_${ARCH}.ddeb/g" $PKGS/hello_2.9-1_${ARCH}.changes
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main include buster $PKGS/hello_2.9-1_${ARCH}.changes
+ assertEquals "hello | 2.9-1 | buster | $ARCH, source" "$($REPREPRO -b $REPO ls hello)"
+}
+
+. shunit2
diff --git a/tests/brokenuncompressor.sh b/tests/brokenuncompressor.sh
new file mode 100755
index 0000000..42ea32a
--- /dev/null
+++ b/tests/brokenuncompressor.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+if [ $# -ne 0 ] ; then
+ echo "brokenuncompressor.sh: Wrong number of arguments: $#" >&2
+ exit 17
+fi
+$uncompressor
+if test -f breakon2nd ; then
+ rm breakon2nd
+ exit 0;
+fi
+# Breaking an .lzma stream is hard, faking it is more reproduceable...
+echo "brokenuncompressor.sh: claiming broken archive" >&2
+exit 1
diff --git a/tests/buildinfo.test b/tests/buildinfo.test
new file mode 100644
index 0000000..e3d4c7a
--- /dev/null
+++ b/tests/buildinfo.test
@@ -0,0 +1,656 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+
+gensource() {
+ srcname="$1"
+ version="$2"
+ build="$3"
+ mkdir "${srcname}-${version}"
+ cd "${srcname}-${version}"
+ mkdir debian
+ cat > debian/rules <<'EOF'
+#!/usr/bin/make -f
+tmp = $(CURDIR)/debian/tmp
+build-indep build-arch:
+binary-indep binary-arch:
+ rm -f debian/files # prep
+ # wrong directory, but not relevant for the test
+ install -m 755 -d $(tmp)/DEBIAN $(tmp)/usr/share/doc/documentation
+ echo "I have told you so" > $(tmp)/usr/share/doc/documentation/NEWS
+ gzip -c9 debian/changelog > $(tmp)/usr/share/doc/documentation/changelog.gz
+ chown -R root.root $(tmp) && chmod -R go=rX $(tmp)
+ dpkg-gencontrol -isp
+ dpkg --build $(tmp) ..
+clean:
+ rm -f debian/files
+.PHONY: clean binary-arch binary-indep binary build build-indep buil-arch
+EOF
+ chmod a+x debian/rules
+ cat > debian/changelog <<EOF
+${srcname} (${version}) test; urgency=low
+
+ * everything fixed
+
+ -- Sky.NET <nowhere@example.com> Sat, 15 Jan 2011 17:12:05 +2700
+EOF
+
+ cat > debian/control <<EOF
+Source: ${srcname}
+Section: doc
+Priority: standard
+Maintainer: Sky.NET <nowhere@example.com>
+Standards-Version: Aleph_17
+
+EOF
+ cat >> debian/control
+ # sadly dpkg-buildinfo does not allow faking the architecture, so this gets more ugly:
+ echo simulating: dpkg-buildpackage -rfakeroot -us -uc -S
+ fakeroot debian/rules clean
+ (cd .. && dpkg-source -b "${srcname}-${version}")
+ dpkg-genchanges --build=source > "../${srcname}_${version}_source.changes"
+ dpkg-genbuildinfo --build=source
+ dpkg-genchanges --build=source > "../${srcname}_${version}_sourceandbuild.changes"
+ if ! grep buildinfo "../${srcname}_${version}_sourceandbuild.changes" ; then
+ ed -s "../${srcname}_${version}_sourceandbuild.changes" <<EOF
+/^Checksums-Sha1:/a
+ $(sha1andsize "../${srcname}_${version}_source.buildinfo") ${srcname}_${version}_source.buildinfo
+.
+/^Checksums-Sha256:/a
+ $(sha2andsize "../${srcname}_${version}_source.buildinfo") ${srcname}_${version}_source.buildinfo
+.
+/^Files:/a
+ $(mdandsize "../${srcname}_${version}_source.buildinfo") doc standard ${srcname}_${version}_source.buildinfo
+.
+w
+q
+EOF
+ fi
+ echo simulating dpkg-buildpackage -rfakeroot -us -uc --build="$build"
+ fakeroot debian/rules clean
+ case $build in
+ any) debian/rules build-arch ; fakeroot debian/rules binary-arch ;;
+ all) debian/rules build-indep ; fakeroot debian/rules binary-indep ;;
+ *) echo "unknown build type" ; exit 1 ;;
+ esac
+ dpkg-genbuildinfo --build="$build"
+ dpkg-genchanges --build="$build" > "../${srcname}_${version}_binary.changes"
+ cd ..
+}
+
+gensource 'source-a' '1' all << EOF
+Package: onlyall
+Architecture: all
+Description: documentation
+ documentation
+EOF
+gensource 'source-b' '2' any << EOF
+Package: onlyany
+Architecture: any
+Description: binaries
+ binaries
+EOF
+
+mkdir "source-c-3"
+cd "source-c-3"
+mkdir debian
+cat > debian/rules <<'EOF'
+#!/usr/bin/make -f
+build-indep build-arch:
+binary-indep binary-arch: binary-%:
+ rm -rf debian/tmp #prep
+ install -m 755 -d debian/tmp/DEBIAN debian/tmp/usr/share/doc/some$*
+ echo "I have told you so" > debian/tmp/usr/share/doc/some$*/NEWS
+ gzip -c9 debian/changelog > debian/tmp/usr/share/doc/some$*/changelog.gz
+ chown -R root.root debian/tmp && chmod -R go=rX debian/tmp
+ dpkg-gencontrol -isp -psome$* -Pdebian/tmp
+ dpkg --build debian/tmp ..
+clean:
+ rm -f debian/files -r debian/tmp
+.PHONY: clean binary-arch binary-indep binary build build-indep buil-arch
+EOF
+chmod a+x debian/rules
+cat > debian/changelog <<EOF
+source-c (3) test; urgency=low
+
+ * everything fixed
+
+ -- Sky.NET <nowhere@example.com> Sat, 15 Jan 2011 17:12:05 +2700
+EOF
+
+cat > debian/control <<EOF
+Source: source-c
+Section: shells
+Priority: required
+Maintainer: Sky.NET <nowhere@example.com>
+Standards-Version: Aleph_17
+
+Package: somearch
+Architecture: any
+Description: binaries
+ binaries
+
+Package: someindep
+Architecture: all
+Description: scripts
+ scripts
+EOF
+# sadly dpkg-buildinfo does not allow faking the architecture, so this gets more ugly:
+echo simulating dpkg-buildpackage -rfakeroot -us -uc --build="full"
+fakeroot debian/rules clean
+(cd .. && dpkg-source -b source-c-3)
+debian/rules build-arch ; debian/rules build-indep
+fakeroot debian/rules binary-arch
+fakeroot debian/rules binary-indep
+dpkg-genbuildinfo --build=full
+dpkg-genchanges --build=full > "../source-c_3_full.changes"
+cd ..
+
+rm -r source-a-1 source-b-2 source-c-3
+
+# first check include:
+mkdir conf
+cat > conf/options <<EOF
+export silent-never
+EOF
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: coal source
+Components: main
+EOF
+
+mkdir -p pool/main/s/source-a pool/main/s/source-b
+
+echo Check that include properly ignores a .buildinfo file
+testrun - include test source-a_1_binary.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-a_1_all.buildinfo'!
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(opa 'onlyall' '1' 'test' 'main' 'coal' 'deb')
+EOF
+rm -r pool db
+
+mkdir pool
+testrun - -A coal include test source-b_2_binary.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-b_2_abacus.buildinfo'!
+-v1*=Skipping 'onlyany_2_abacus.deb' as architecture is not in the requested set.
+*=source-b_2_binary.changes: Not enough files in .changes!
+-v0*=There have been errors!
+returns 255
+stdout
+$(odb)
+EOF
+rm -r pool db
+
+mkdir pool
+testrun - -A coal include test source-b_2_source.changes 3<<EOF
+stderr
+-v1*=Skipping 'source-b_2.dsc' as architecture 'source' is not in the requested set.
+-v1*=Skipping 'source-b_2.tar.gz' as architecture 'source' is not in the requested set.
+*=source-b_2_source.changes: Not enough files in .changes!
+-v0*=There have been errors!
+returns 255
+stdout
+$(odb)
+EOF
+rm -r pool db
+
+mkdir pool
+testrun - -A coal include test source-b_2_sourceandbuild.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-b_2_source.buildinfo'!
+-v1*=Skipping 'source-b_2.dsc' as architecture 'source' is not in the requested set.
+-v1*=Skipping 'source-b_2.tar.gz' as architecture 'source' is not in the requested set.
+*=source-b_2_sourceandbuild.changes: Not enough files in .changes!
+-v0*=There have been errors!
+returns 255
+stdout
+$(odb)
+EOF
+rm -r pool db
+
+mkdir -p pool/main/s/source-a pool/main/s/source-b
+testrun - include test source-b_2_sourceandbuild.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-b_2_source.buildinfo'!
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+EOF
+rm -r pool db
+
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: abacus source
+Components: main
+EOF
+
+mkdir -p pool/main/s/source-a pool/main/s/source-b pool/main/s/source-c
+
+echo Check that include properly ignores a .buildinfo file
+testrun - include test source-a_1_binary.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-a_1_all.buildinfo'!
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(opa 'onlyall' '1' 'test' 'main' 'abacus' 'deb')
+EOF
+
+testrun - include test source-b_2_binary.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-b_2_abacus.buildinfo'!
+stdout
+$(ofa 'pool/main/s/source-b/onlyany_2_abacus.deb')
+$(opa 'onlyany' '2' 'test' 'main' 'abacus' 'deb')
+EOF
+
+testrun - include test source-b_2_sourceandbuild.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-b_2_source.buildinfo'!
+stdout
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+EOF
+
+testrun - include test source-c_3_full.changes 3<<EOF
+stderr
+-v3*=Ignoring buildinfo file: 'source-c_3_abacus.buildinfo'!
+stdout
+$(ofa 'pool/main/s/source-c/source-c_3.dsc')
+$(ofa 'pool/main/s/source-c/source-c_3.tar.gz')
+$(ofa 'pool/main/s/source-c/somearch_3_abacus.deb')
+$(ofa 'pool/main/s/source-c/someindep_3_all.deb')
+$(opa 'source-c' '3' 'test' 'main' 'source' 'dsc')
+$(opa 'somearch' '3' 'test' 'main' 'abacus' 'deb')
+$(opa 'someindep' '3' 'test' 'main' 'abacus' 'deb')
+EOF
+rm -r pool db
+
+echo now the same with tracking information:
+
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: coal source
+Components: main
+Tracking: minimal includebuildinfos
+EOF
+
+mkdir -p pool/main/s/source-a pool/main/s/source-b
+
+echo Check that include properly ignores a .buildinfo file
+testrun - include test source-a_1_binary.changes 3<<EOF
+stderr
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(ofa 'pool/main/s/source-a/source-a_1_all.buildinfo')
+$(opa 'onlyall' '1' 'test' 'main' 'coal' 'deb')
+$(ota 'test' 'source-a')
+EOF
+rm -r pool db
+
+mkdir pool
+testrun - -A coal include test source-b_2_binary.changes 3<<EOF
+stderr
+-v1*=Skipping 'source-b_2_abacus.buildinfo' as architecture is not in the requested set.
+-v1*=Skipping 'onlyany_2_abacus.deb' as architecture is not in the requested set.
+*=source-b_2_binary.changes: Not enough files in .changes!
+-v0*=There have been errors!
+returns 255
+stdout
+$(odb)
+EOF
+rm -r pool db
+
+mkdir pool
+testrun - -A coal include test source-b_2_source.changes 3<<EOF
+stderr
+-v1*=Skipping 'source-b_2.dsc' as architecture 'source' is not in the requested set.
+-v1*=Skipping 'source-b_2.tar.gz' as architecture 'source' is not in the requested set.
+*=source-b_2_source.changes: Not enough files in .changes!
+-v0*=There have been errors!
+returns 255
+stdout
+$(odb)
+EOF
+rm -r pool db
+
+mkdir pool
+testrun - -A coal include test source-b_2_sourceandbuild.changes 3<<EOF
+stderr
+-v1*=Skipping 'source-b_2_source.buildinfo' as architecture 'source' is not in the requested set.
+-v1*=Skipping 'source-b_2.dsc' as architecture 'source' is not in the requested set.
+-v1*=Skipping 'source-b_2.tar.gz' as architecture 'source' is not in the requested set.
+*=source-b_2_sourceandbuild.changes: Not enough files in .changes!
+-v0*=There have been errors!
+returns 255
+stdout
+$(odb)
+EOF
+rm -r pool db
+
+mkdir -p pool/main/s/source-a pool/main/s/source-b
+testrun - include test source-b_2_sourceandbuild.changes 3<<EOF
+stderr
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-b/source-b_2_source.buildinfo')
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-b')
+EOF
+rm -r pool db
+
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: abacus source
+Components: main
+Tracking: minimal includebuildinfos
+EOF
+
+mkdir -p pool/main/s/source-a pool/main/s/source-b pool/main/s/source-c
+
+echo Check that include properly ignores a .buildinfo file
+testrun - include test source-a_1_binary.changes 3<<EOF
+stderr
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/source-a_1_all.buildinfo')
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(opa 'onlyall' '1' 'test' 'main' 'abacus' 'deb')
+$(ota 'test' 'source-a')
+EOF
+
+testrun - include test source-b_2_binary.changes 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/s/source-b/source-b_2_abacus.buildinfo')
+$(ofa 'pool/main/s/source-b/onlyany_2_abacus.deb')
+$(opa 'onlyany' '2' 'test' 'main' 'abacus' 'deb')
+$(ota 'test' 'source-b')
+EOF
+
+testrun - include test source-b_2_sourceandbuild.changes 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/s/source-b/source-b_2_source.buildinfo')
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+EOF
+
+testrun - include test source-c_3_full.changes 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/s/source-c/source-c_3_abacus.buildinfo')
+$(ofa 'pool/main/s/source-c/source-c_3.dsc')
+$(ofa 'pool/main/s/source-c/source-c_3.tar.gz')
+$(ofa 'pool/main/s/source-c/somearch_3_abacus.deb')
+$(ofa 'pool/main/s/source-c/someindep_3_all.deb')
+$(opa 'source-c' '3' 'test' 'main' 'source' 'dsc')
+$(opa 'somearch' '3' 'test' 'main' 'abacus' 'deb')
+$(opa 'someindep' '3' 'test' 'main' 'abacus' 'deb')
+$(ota 'test' 'source-c')
+EOF
+rm -r pool db
+
+mkdir -p pool/main/s/source-a pool/main/s/source-b pool/main/s/source-c
+mkdir i j tmp
+mv source-?_* *.deb j/
+cp j/* i/
+cat > conf/incoming <<EOF
+Name: foo
+IncomingDir: i
+TempDir: tmp
+Default: test
+EOF
+
+# avoid problems with the order of files:
+rm './i/source-b_2_source.changes'
+rm './i/source-a_1_source.changes'
+
+echo check to process all .changes at the same time with tracking:
+testrun - processincoming foo 3<<EOF
+stderr
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/source-a_1_all.buildinfo')
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(opa 'onlyall' '1' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-a/source-a_1_source.buildinfo')
+$(ofa 'pool/main/s/source-a/source-a_1.dsc')
+$(ofa 'pool/main/s/source-a/source-a_1.tar.gz')
+$(opa 'source-a' '1' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-a')
+$(ofa 'pool/main/s/source-b/source-b_2_abacus.buildinfo')
+$(ofa 'pool/main/s/source-b/onlyany_2_abacus.deb')
+$(opa 'onlyany' '2' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-b/source-b_2_source.buildinfo')
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-b')
+$(ofa 'pool/main/s/source-c/source-c_3_abacus.buildinfo')
+$(ofa 'pool/main/s/source-c/source-c_3.dsc')
+$(ofa 'pool/main/s/source-c/source-c_3.tar.gz')
+$(ofa 'pool/main/s/source-c/somearch_3_abacus.deb')
+$(ofa 'pool/main/s/source-c/someindep_3_all.deb')
+$(opa 'source-c' '3' 'test' 'main' 'source' 'dsc')
+$(opa 'somearch' '3' 'test' 'main' 'abacus' 'deb')
+$(opa 'someindep' '3' 'test' 'main' 'abacus' 'deb')
+$(ota 'test' 'source-c')
+-v3*=deleting './i/onlyall_1_all.deb'...
+-v3*=deleting './i/onlyany_2_abacus.deb'...
+-v3*=deleting './i/somearch_3_abacus.deb'...
+-v3*=deleting './i/someindep_3_all.deb'...
+-v3*=deleting './i/source-a_1.dsc'...
+-v3*=deleting './i/source-a_1.tar.gz'...
+-v3*=deleting './i/source-a_1_all.buildinfo'...
+-v3*=deleting './i/source-a_1_binary.changes'...
+-v3*=deleting './i/source-a_1_source.buildinfo'...
+-v3*=deleting './i/source-a_1_sourceandbuild.changes'...
+-v3*=deleting './i/source-b_2.dsc'...
+-v3*=deleting './i/source-b_2.tar.gz'...
+-v3*=deleting './i/source-b_2_abacus.buildinfo'...
+-v3*=deleting './i/source-b_2_binary.changes'...
+-v3*=deleting './i/source-b_2_source.buildinfo'...
+-v3*=deleting './i/source-b_2_sourceandbuild.changes'...
+-v3*=deleting './i/source-c_3.dsc'...
+-v3*=deleting './i/source-c_3.tar.gz'...
+-v3*=deleting './i/source-c_3_abacus.buildinfo'...
+-v3*=deleting './i/source-c_3_full.changes'...
+EOF
+
+rm -r db pool
+mkdir -p pool/main/s/source-a pool/main/s/source-b pool/main/s/source-c
+
+cp j/* i/
+rm './i/source-b_2_source.changes'
+rm './i/source-a_1_source.changes'
+ed -s conf/distributions <<EOF
+g/^Tracking: /s/include[^ ]*//g
+w
+q
+EOF
+
+echo check to process all .changes at the same time without storing buildinfo:
+testrun - processincoming foo 3<<EOF
+stderr
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(opa 'onlyall' '1' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-a/source-a_1.dsc')
+$(ofa 'pool/main/s/source-a/source-a_1.tar.gz')
+$(opa 'source-a' '1' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-a')
+$(ofa 'pool/main/s/source-b/onlyany_2_abacus.deb')
+$(opa 'onlyany' '2' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-b')
+$(ofa 'pool/main/s/source-c/source-c_3.dsc')
+$(ofa 'pool/main/s/source-c/source-c_3.tar.gz')
+$(ofa 'pool/main/s/source-c/somearch_3_abacus.deb')
+$(ofa 'pool/main/s/source-c/someindep_3_all.deb')
+$(opa 'source-c' '3' 'test' 'main' 'source' 'dsc')
+$(opa 'somearch' '3' 'test' 'main' 'abacus' 'deb')
+$(opa 'someindep' '3' 'test' 'main' 'abacus' 'deb')
+$(ota 'test' 'source-c')
+-v3*=deleting './i/onlyall_1_all.deb'...
+-v3*=deleting './i/onlyany_2_abacus.deb'...
+-v3*=deleting './i/somearch_3_abacus.deb'...
+-v3*=deleting './i/someindep_3_all.deb'...
+-v3*=deleting './i/source-a_1.dsc'...
+-v3*=deleting './i/source-a_1.tar.gz'...
+-v3*=deleting './i/source-a_1_binary.changes'...
+-v3*=deleting './i/source-a_1_sourceandbuild.changes'...
+-v3*=deleting './i/source-b_2.dsc'...
+-v3*=deleting './i/source-b_2.tar.gz'...
+-v3*=deleting './i/source-b_2_binary.changes'...
+-v3*=deleting './i/source-b_2_sourceandbuild.changes'...
+-v3*=deleting './i/source-c_3.dsc'...
+-v3*=deleting './i/source-c_3.tar.gz'...
+-v3*=deleting './i/source-c_3_full.changes'...
+EOF
+
+echo check to process all .changes at the same time without storing buildinfo but deleting them:
+cat >> conf/incoming <<EOF
+Cleanup: unused_buildinfo_files
+EOF
+
+rm -r db pool
+mkdir -p pool/main/s/source-a pool/main/s/source-b pool/main/s/source-c
+
+rm -r i
+mkdir i
+cp j/* i/
+rm './i/source-b_2_source.changes'
+rm './i/source-a_1_source.changes'
+
+testrun - processincoming foo 3<<EOF
+stderr
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(opa 'onlyall' '1' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-a/source-a_1.dsc')
+$(ofa 'pool/main/s/source-a/source-a_1.tar.gz')
+$(opa 'source-a' '1' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-a')
+$(ofa 'pool/main/s/source-b/onlyany_2_abacus.deb')
+$(opa 'onlyany' '2' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-b')
+$(ofa 'pool/main/s/source-c/source-c_3.dsc')
+$(ofa 'pool/main/s/source-c/source-c_3.tar.gz')
+$(ofa 'pool/main/s/source-c/somearch_3_abacus.deb')
+$(ofa 'pool/main/s/source-c/someindep_3_all.deb')
+$(opa 'source-c' '3' 'test' 'main' 'source' 'dsc')
+$(opa 'somearch' '3' 'test' 'main' 'abacus' 'deb')
+$(opa 'someindep' '3' 'test' 'main' 'abacus' 'deb')
+$(ota 'test' 'source-c')
+-v3*=deleting './i/onlyall_1_all.deb'...
+-v3*=deleting './i/onlyany_2_abacus.deb'...
+-v3*=deleting './i/somearch_3_abacus.deb'...
+-v3*=deleting './i/someindep_3_all.deb'...
+-v3*=deleting './i/source-a_1.dsc'...
+-v3*=deleting './i/source-a_1.tar.gz'...
+-v3*=deleting './i/source-a_1_all.buildinfo'...
+-v3*=deleting './i/source-a_1_binary.changes'...
+-v3*=deleting './i/source-a_1_source.buildinfo'...
+-v3*=deleting './i/source-a_1_sourceandbuild.changes'...
+-v3*=deleting './i/source-b_2.dsc'...
+-v3*=deleting './i/source-b_2.tar.gz'...
+-v3*=deleting './i/source-b_2_abacus.buildinfo'...
+-v3*=deleting './i/source-b_2_binary.changes'...
+-v3*=deleting './i/source-b_2_source.buildinfo'...
+-v3*=deleting './i/source-b_2_sourceandbuild.changes'...
+-v3*=deleting './i/source-c_3.dsc'...
+-v3*=deleting './i/source-c_3.tar.gz'...
+-v3*=deleting './i/source-c_3_abacus.buildinfo'...
+-v3*=deleting './i/source-c_3_full.changes'...
+EOF
+
+echo Now check storing the .buildinfo files in a logdir:
+sed -e 's/^Cleanup:.*/LogDir: log/' -i conf/incoming
+mkdir log
+
+rm -r db pool
+mkdir -p pool/main/s/source-a pool/main/s/source-b pool/main/s/source-c
+
+rm -r i
+mkdir i
+cp j/* i/
+rm './i/source-b_2_source.changes'
+rm './i/source-a_1_source.changes'
+
+testrun - processincoming foo 3<<EOF
+stderr
+stdout
+$(odb)
+$(ofa 'pool/main/s/source-a/onlyall_1_all.deb')
+$(opa 'onlyall' '1' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-a/source-a_1.dsc')
+$(ofa 'pool/main/s/source-a/source-a_1.tar.gz')
+$(opa 'source-a' '1' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-a')
+$(ofa 'pool/main/s/source-b/onlyany_2_abacus.deb')
+$(opa 'onlyany' '2' 'test' 'main' 'abacus' 'deb')
+$(ofa 'pool/main/s/source-b/source-b_2.dsc')
+$(ofa 'pool/main/s/source-b/source-b_2.tar.gz')
+$(opa 'source-b' '2' 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'source-b')
+$(ofa 'pool/main/s/source-c/source-c_3.dsc')
+$(ofa 'pool/main/s/source-c/source-c_3.tar.gz')
+$(ofa 'pool/main/s/source-c/somearch_3_abacus.deb')
+$(ofa 'pool/main/s/source-c/someindep_3_all.deb')
+$(opa 'source-c' '3' 'test' 'main' 'source' 'dsc')
+$(opa 'somearch' '3' 'test' 'main' 'abacus' 'deb')
+$(opa 'someindep' '3' 'test' 'main' 'abacus' 'deb')
+$(ota 'test' 'source-c')
+-v2*=Created directory "./log/source-c_3_source+abacus+all.0000000"
+-v2*=Created directory "./log/source-b_2_source.0000000"
+-v2*=Created directory "./log/source-b_2_abacus.0000000"
+-v2*=Created directory "./log/source-a_1_all.0000000"
+-v2*=Created directory "./log/source-a_1_source.0000000"
+-v3*=deleting './i/onlyall_1_all.deb'...
+-v3*=deleting './i/onlyany_2_abacus.deb'...
+-v3*=deleting './i/somearch_3_abacus.deb'...
+-v3*=deleting './i/someindep_3_all.deb'...
+-v3*=deleting './i/source-a_1.dsc'...
+-v3*=deleting './i/source-a_1.tar.gz'...
+-v3*=deleting './i/source-a_1_all.buildinfo'...
+-v3*=deleting './i/source-a_1_binary.changes'...
+-v3*=deleting './i/source-a_1_source.buildinfo'...
+-v3*=deleting './i/source-a_1_sourceandbuild.changes'...
+-v3*=deleting './i/source-b_2.dsc'...
+-v3*=deleting './i/source-b_2.tar.gz'...
+-v3*=deleting './i/source-b_2_abacus.buildinfo'...
+-v3*=deleting './i/source-b_2_binary.changes'...
+-v3*=deleting './i/source-b_2_source.buildinfo'...
+-v3*=deleting './i/source-b_2_sourceandbuild.changes'...
+-v3*=deleting './i/source-c_3.dsc'...
+-v3*=deleting './i/source-c_3.tar.gz'...
+-v3*=deleting './i/source-c_3_abacus.buildinfo'...
+-v3*=deleting './i/source-c_3_full.changes'...
+EOF
+
+dodiff log/source-a_1_all.0000000/source-a_1_all.buildinfo j/source-a_1_all.buildinfo
+dodiff log/source-a_1_source.0000000/source-a_1_source.buildinfo j/source-a_1_source.buildinfo
+dodiff log/source-b_2_abacus.0000000/source-b_2_abacus.buildinfo j/source-b_2_abacus.buildinfo
+dodiff log/source-b_2_source.0000000/source-b_2_source.buildinfo j/source-b_2_source.buildinfo
+dodiff log/source-c_3_source+abacus+all.0000000/source-c_3_abacus.buildinfo j/source-c_3_abacus.buildinfo
+
+testsuccess
diff --git a/tests/buildneeding.test b/tests/buildneeding.test
new file mode 100644
index 0000000..4d0b587
--- /dev/null
+++ b/tests/buildneeding.test
@@ -0,0 +1,631 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf
+mkdir package-1.0
+mkdir package-1.0/debian
+cat >package-1.0/debian/control <<END
+Source: package
+Section: sound
+Priority: extra
+Maintainer: me <me@example.org>
+Standards-Version: 0.0
+
+Package: rumsrumsrums
+Architecture: all
+Description: a package
+ .
+
+Package: dumdidum
+Architecture: another
+Description: a package not build
+ .
+
+Package: troettroet
+Architecture: abacus
+Description: some test-package
+ .
+END
+cat >package-1.0/debian/changelog <<END
+package (1.0-1) test; urgency=critical
+
+ * first version
+
+ -- me <me@example.orgguess@who> Mon, 01 Jan 1980 01:02:02 +0000
+END
+
+dpkg-source -b package-1.0
+
+cat > conf/distributions <<EOF
+Codename: bla
+Suite: test
+Components: main
+Architectures: source abacus another
+Tracking: all
+
+Codename: blub
+Components: main
+Architectures: notinbla
+
+Codename: oses
+Components: main
+Architectures: source abacus kfreebsd-abacus hurd-abacus
+Tracking: all
+EOF
+cat >> conf/options <<EOF
+export silent-never
+EOF
+
+# to get databases:
+testrun - rereference 3<<EOF
+stdout
+$(odb)
+-v1*=Referencing oses...
+-v3*=Unlocking dependencies of oses|main|abacus...
+-v3*=Referencing oses|main|abacus...
+-v2=Rereferencing oses|main|abacus...
+-v3*=Unlocking dependencies of oses|main|kfreebsd-abacus...
+-v3*=Referencing oses|main|kfreebsd-abacus...
+-v2=Rereferencing oses|main|kfreebsd-abacus...
+-v3*=Unlocking dependencies of oses|main|hurd-abacus...
+-v3*=Referencing oses|main|hurd-abacus...
+-v2=Rereferencing oses|main|hurd-abacus...
+-v3*=Unlocking dependencies of oses|main|source...
+-v3*=Referencing oses|main|source...
+-v2=Rereferencing oses|main|source...
+-v1*=Referencing bla...
+-v3*=Unlocking dependencies of bla|main|abacus...
+-v3*=Referencing bla|main|abacus...
+-v2=Rereferencing bla|main|abacus...
+-v3*=Unlocking dependencies of bla|main|another...
+-v3*=Referencing bla|main|another...
+-v2=Rereferencing bla|main|another...
+-v3*=Unlocking dependencies of bla|main|source...
+-v3*=Referencing bla|main|source...
+-v2=Rereferencing bla|main|source...
+-v1*=Referencing blub...
+-v3*=Unlocking dependencies of blub|main|notinbla...
+-v3*=Referencing blub|main|notinbla...
+-v2=Rereferencing blub|main|notinbla...
+EOF
+
+testrun empty build-needing test abacus
+testrun empty build-needing test all
+testrun empty build-needing test any
+
+testrun - includedsc test package_1.0-1.dsc 3<<EOF
+stderr
+-v1*=package_1.0-1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/p"
+-v2*=Created directory "./pool/main/p/package"
+$(ofa 'pool/main/p/package/package_1.0-1.dsc')
+$(ofa 'pool/main/p/package/package_1.0-1.tar.gz')
+$(opa 'package' '1.0-1' 'bla' 'main' 'source' 'dsc')
+$(ota 'bla' 'package')
+EOF
+rm package_1.0*
+
+testrun - build-needing test another 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc
+EOF
+testrun - build-needing test abacus 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc
+EOF
+testrun - build-needing test all 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc
+EOF
+testrun - build-needing test any 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc all
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc another
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc abacus
+EOF
+testrun - build-needing test source 3<<EOF
+stderr
+*=Error: Architecture 'source' makes no sense for build-needing!
+-v0*=There have been errors!
+returns 255
+EOF
+testrun - build-needing test mistake 3<<EOF
+stderr
+*=Error: Architecture 'mistake' is not known!
+-v0*=There have been errors!
+returns 255
+EOF
+testrun - build-needing test notinbla 3<<EOF
+stderr
+*=Error: Architecture 'notinbla' not found in distribution 'bla'!
+-v0*=There have been errors!
+returns 255
+EOF
+
+mkdir package-1.0/debian/tmp
+mkdir package-1.0/debian/tmp/DEBIAN
+mkdir -p package-1.0/debian/tmp/usr/share/sounds
+touch package-1.0/debian/tmp/usr/share/sounds/krach.wav
+cd package-1.0
+dpkg-gencontrol -prumsrumsrums
+dpkg --build debian/tmp ..
+cd ..
+
+testrun - -C main includedeb test rumsrumsrums_1.0-1_all.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/p/package/rumsrumsrums_1.0-1_all.deb')
+$(opa 'rumsrumsrums' '1.0-1' 'bla' 'main' 'abacus' 'deb')
+$(opa 'rumsrumsrums' x 'bla' 'main' 'another' 'deb')
+EOF
+
+testrun - build-needing test another 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc
+EOF
+testrun - build-needing test abacus 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc
+EOF
+testrun - build-needing test any 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc another
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc abacus
+EOF
+testrun empty build-needing test all
+
+cd package-1.0
+dpkg-gencontrol -ptroettroet
+dpkg --build debian/tmp ..
+cd ..
+
+testrun - -C main includedeb test troettroet_1.0-1_abacus.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/p/package/troettroet_1.0-1_abacus.deb')
+$(opa 'troettroet' x 'bla' 'main' 'abacus' 'deb')
+EOF
+
+testrun - build-needing test another 3<<EOF
+stdout
+*=package 1.0-1 pool/main/p/package/package_1.0-1.dsc
+EOF
+testrun - build-needing test abacus 3<<EOF
+stdout
+EOF
+
+# Include a fake .log file to tell reprepro that architecture is done:
+
+echo "There was nothing to do on this architecture!" > package_1.0-1_another.log
+echo "package_1.0-1_another.log - -" > package-1.0/debian/files
+cd package-1.0
+dpkg-genchanges -B > ../package_1.0-1_another.changes
+cd ..
+
+# work around dpkg-dev 1.18.15+ no longer adding Architectures of log files:
+if ! grep -q -s '^Architecture:' package_1.0-1_another.changes ; then
+ sed -e 's/^Version:/Architecture: another\n&/' -i package_1.0-1_another.changes
+fi
+
+testrun - -C main include test package_1.0-1_another.changes 3<<EOF
+stderr
+*=Ignoring log file: 'package_1.0-1_another.log'!
+*=package_1.0-1_another.changes: Not enough files in .changes!
+-v0*=There have been errors!
+returns 255
+EOF
+
+sed -i -e 's/Tracking: all/Tracking: all includelogs/' conf/distributions
+
+testrun - -C main include test package_1.0-1_another.changes 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/p/package/package_1.0-1_another.log')
+EOF
+
+testrun empty build-needing test another
+testrun empty build-needing test abacus
+
+# TODO: add a new version of that package...
+rm -r package-1.0
+
+mkdir onlyonearch-1.0
+mkdir onlyonearch-1.0/debian
+cat >onlyonearch-1.0/debian/control <<END
+Source: onlyonearch
+Section: something
+Priority: extra
+Maintainer: me <me@example.org>
+Standards-Version: 0.0
+
+Package: onearch
+Architecture: abacus
+Description: some test-onlyonearch
+ .
+END
+cat >onlyonearch-1.0/debian/changelog <<END
+onlyonearch (1.0-1) test; urgency=critical
+
+ * first version
+
+ -- me <me@example.orgguess@who> Mon, 01 Jan 1980 01:02:02 +0000
+END
+dpkg-source -b onlyonearch-1.0
+mkdir onlyonearch-1.0/debian/tmp
+mkdir onlyonearch-1.0/debian/tmp/DEBIAN
+mkdir -p onlyonearch-1.0/debian/tmp/usr/bin
+touch onlyonearch-1.0/debian/tmp/usr/bin/program
+cd onlyonearch-1.0
+dpkg-gencontrol -ponearch
+dpkg --build debian/tmp ..
+cd ..
+rm -r onlyonearch-1.0
+
+testrun - --delete includedsc test onlyonearch_1.0-1.dsc 3<<EOF
+stderr
+-v1*=onlyonearch_1.0-1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool/main/o"
+-v2*=Created directory "./pool/main/o/onlyonearch"
+$(ofa 'pool/main/o/onlyonearch/onlyonearch_1.0-1.dsc')
+$(ofa 'pool/main/o/onlyonearch/onlyonearch_1.0-1.tar.gz')
+$(opa 'onlyonearch' x 'bla' 'main' 'source' 'dsc')
+$(ota 'bla' 'onlyonearch')
+EOF
+
+testrun empty build-needing test another
+testrun empty build-needing test all
+testrun - build-needing test abacus 3<<EOF
+stdout
+*=onlyonearch 1.0-1 pool/main/o/onlyonearch/onlyonearch_1.0-1.dsc
+EOF
+
+testrun - build-needing test any 3<<EOF
+stdout
+*=onlyonearch 1.0-1 pool/main/o/onlyonearch/onlyonearch_1.0-1.dsc abacus
+EOF
+
+testrun - --delete -C main includedeb test onearch_1.0-1_abacus.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/o/onlyonearch/onearch_1.0-1_abacus.deb')
+$(opa 'onearch' x 'bla' 'main' 'abacus' 'deb')
+EOF
+
+testrun empty build-needing test another
+testrun empty build-needing test abacus
+testrun empty build-needing test all
+testrun empty build-needing test any
+
+mkdir onlyarchall-1.0
+mkdir onlyarchall-1.0/debian
+cat >onlyarchall-1.0/debian/control <<END
+Source: onlyarchall
+Section: something
+Priority: extra
+Maintainer: me <me@example.org>
+Standards-Version: 0.0
+
+Package: archall
+Architecture: all
+Description: some test-arch all package
+ .
+END
+cat >onlyarchall-1.0/debian/changelog <<END
+onlyarchall (1.0-1) test; urgency=critical
+
+ * first version
+
+ -- me <me@example.orgguess@who> Mon, 01 Jan 1980 01:02:02 +0000
+END
+dpkg-source -b onlyarchall-1.0
+mkdir onlyarchall-1.0/debian/tmp
+mkdir onlyarchall-1.0/debian/tmp/DEBIAN
+mkdir -p onlyarchall-1.0/debian/tmp/usr/bin
+touch onlyarchall-1.0/debian/tmp/usr/bin/program
+cd onlyarchall-1.0
+dpkg-gencontrol -parchall
+dpkg --build debian/tmp ..
+cd ..
+rm -r onlyarchall-1.0
+
+testrun - --delete includedsc test onlyarchall_1.0-1.dsc 3<<EOF
+stderr
+-v1*=onlyarchall_1.0-1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool/main/o/onlyarchall"
+$(ofa 'pool/main/o/onlyarchall/onlyarchall_1.0-1.dsc')
+$(ofa 'pool/main/o/onlyarchall/onlyarchall_1.0-1.tar.gz')
+$(opa 'onlyarchall' x 'bla' 'main' 'source' 'dsc')
+$(ota 'bla' 'onlyarchall')
+EOF
+
+testrun empty build-needing test another
+testrun empty build-needing test abacus
+testrun - build-needing test all 3<<EOF
+stdout
+*=onlyarchall 1.0-1 pool/main/o/onlyarchall/onlyarchall_1.0-1.dsc
+EOF
+testrun - build-needing test any 3<<EOF
+stdout
+*=onlyarchall 1.0-1 pool/main/o/onlyarchall/onlyarchall_1.0-1.dsc all
+EOF
+
+testrun - --delete -C main includedeb test archall_1.0-1_all.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/o/onlyarchall/archall_1.0-1_all.deb')
+$(opa 'archall' x 'bla' 'main' 'abacus' 'deb')
+$(opa 'archall' x 'bla' 'main' 'another' 'deb')
+EOF
+
+testrun empty build-needing test another
+testrun empty build-needing test abacus
+testrun empty build-needing test any
+
+mkdir allandany-1.0
+mkdir allandany-1.0/debian
+cat >allandany-1.0/debian/control <<END
+Source: allandany
+Section: something
+Priority: extra
+Maintainer: me <me@example.org>
+Standards-Version: 0.0
+
+Package: allpart
+Architecture: all
+Description: some test-arch all package
+ .
+
+Package: anypart
+Architecture: any
+Description: some test-arch any package
+ .
+END
+cat >allandany-1.0/debian/changelog <<END
+allandany (1.0-1) test; urgency=critical
+
+ * first version
+
+ -- me <me@example.org> Mon, 01 Jan 1980 01:02:02 +0000
+END
+dpkg-source -b allandany-1.0
+mkdir allandany-1.0/debian/tmp
+mkdir allandany-1.0/debian/tmp/DEBIAN
+mkdir -p allandany-1.0/debian/tmp/usr/bin
+touch allandany-1.0/debian/tmp/usr/bin/program
+cd allandany-1.0
+dpkg-gencontrol -panypart
+dpkg --build debian/tmp ..
+cd ..
+rm -r allandany-1.0/debian/tmp
+mkdir allandany-1.0/debian/tmp
+mkdir allandany-1.0/debian/tmp/DEBIAN
+mkdir -p allandany-1.0/debian/tmp/usr/share
+touch allandany-1.0/debian/tmp/usr/share/data
+cd allandany-1.0
+dpkg-gencontrol -pallpart
+dpkg --build debian/tmp ..
+cd ..
+echo "There was nothing to do on this architecture!" > allandany_1.0-1_another.log
+echo "allandany_1.0-1_another.log - -" > allandany-1.0/debian/files
+cd allandany-1.0
+dpkg-genchanges -B > ../allandany_1.0-1_another.changes
+cd ..
+rm -r allandany-1.0
+
+# work around dpkg-dev 1.18.15+ no longer adding Architectures of log files:
+if ! grep -q -s '^Architecture:' allandany_1.0-1_another.changes ; then
+ sed -e 's/^Version:/Architecture: another\n&/' -i allandany_1.0-1_another.changes
+fi
+
+testrun - --delete includedsc test allandany_1.0-1.dsc 3<<EOF
+stderr
+-v1*=allandany_1.0-1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool/main/a"
+-v2*=Created directory "./pool/main/a/allandany"
+$(ofa 'pool/main/a/allandany/allandany_1.0-1.dsc')
+$(ofa 'pool/main/a/allandany/allandany_1.0-1.tar.gz')
+$(opa 'allandany' x 'bla' 'main' 'source' 'dsc')
+$(ota 'bla' 'allandany')
+EOF
+
+testrun - build-needing test another 3<<EOF
+stdout
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc
+EOF
+testrun - build-needing test abacus 3<<EOF
+stdout
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc
+EOF
+testrun - build-needing test all 3<<EOF
+stdout
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc
+EOF
+testrun - build-needing test any 3<<EOF
+stdout
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc abacus
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc another
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc all
+EOF
+
+testrun - --delete -C main includedeb test anypart_1.0-1_abacus.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/a/allandany/anypart_1.0-1_abacus.deb')
+$(opa 'anypart' x 'bla' 'main' 'abacus' 'deb')
+EOF
+
+testrun empty build-needing test abacus
+testrun - build-needing test all 3<<EOF
+stdout
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc
+EOF
+testrun - build-needing test any 3<<EOF
+stdout
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc another
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc all
+EOF
+
+testrun - --delete -C main includedeb test allpart_1.0-1_all.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/a/allandany/allpart_1.0-1_all.deb')
+$(opa 'allpart' x 'bla' 'main' 'abacus' 'deb')
+$(opa 'allpart' x 'bla' 'main' 'another' 'deb')
+EOF
+
+testrun empty build-needing test abacus
+testrun empty build-needing test all
+testrun - build-needing test any 3<<EOF
+stdout
+*=allandany 1.0-1 pool/main/a/allandany/allandany_1.0-1.dsc another
+EOF
+
+testrun - -C main include test allandany_1.0-1_another.changes 3<<EOF
+stderr
+stdout
+$(ofa 'pool/main/a/allandany/allandany_1.0-1_another.log')
+EOF
+
+testrun empty build-needing test any
+
+mkdir anyonly-1.0
+mkdir anyonly-1.0/debian
+cat >anyonly-1.0/debian/control <<END
+Source: anyonly
+Section: something
+Priority: extra
+Maintainer: me <me@example.org>
+Standards-Version: 0.0
+
+Package: anyonly
+Architecture: any
+Description: some test-arch any package
+ .
+END
+cat >anyonly-1.0/debian/changelog <<END
+anyonly (1.0-1) test; urgency=critical
+
+ * first version
+
+ -- me <me@example.org> Mon, 01 Jan 1980 01:02:02 +0000
+END
+dpkg-source -b anyonly-1.0
+mkdir anyonly-1.0/debian/tmp
+mkdir anyonly-1.0/debian/tmp/DEBIAN
+mkdir -p anyonly-1.0/debian/tmp/usr/bin
+touch anyonly-1.0/debian/tmp/usr/bin/program
+cd anyonly-1.0
+dpkg-gencontrol -panyonly
+dpkg --build debian/tmp ..
+cd ..
+rm -r anyonly-1.0
+
+testrun - --delete includedsc test anyonly_1.0-1.dsc 3<<EOF
+stderr
+-v1*=anyonly_1.0-1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool/main/a/anyonly"
+$(ofa 'pool/main/a/anyonly/anyonly_1.0-1.dsc')
+$(ofa 'pool/main/a/anyonly/anyonly_1.0-1.tar.gz')
+$(opa 'anyonly' x 'bla' 'main' 'source' 'dsc')
+$(ota 'bla' 'anyonly')
+EOF
+
+testrun empty build-needing test all
+testrun - build-needing test any 3<<EOF
+stdout
+*=anyonly 1.0-1 pool/main/a/anyonly/anyonly_1.0-1.dsc another
+*=anyonly 1.0-1 pool/main/a/anyonly/anyonly_1.0-1.dsc abacus
+EOF
+
+mkdir linuxwildcard-1.0
+mkdir linuxwildcard-1.0/debian
+cat >linuxwildcard-1.0/debian/control <<END
+Source: linuxwildcard
+Section: something
+Priority: extra
+Maintainer: me <me@example.org>
+Standards-Version: 0.0
+
+Package: linuxwildcard
+Architecture: linux-any
+Description: some test-arch any package
+ .
+END
+cat >linuxwildcard-1.0/debian/changelog <<END
+linuxwildcard (1.0-1) test; urgency=critical
+
+ * first version
+
+ -- me <me@example.org> Mon, 01 Jan 1980 01:02:02 +0000
+END
+dpkg-source -b linuxwildcard-1.0
+rm -r linuxwildcard-1.0
+
+mkdir kfreebsdwildcard-1.0
+mkdir kfreebsdwildcard-1.0/debian
+cat >kfreebsdwildcard-1.0/debian/control <<END
+Source: kfreebsdwildcard
+Section: something
+Priority: extra
+Maintainer: me <me@example.org>
+Standards-Version: 0.0
+
+Package: kfreebsdwildcard
+Architecture: kfreebsd-any
+Description: some test-arch any package
+ .
+END
+cat >kfreebsdwildcard-1.0/debian/changelog <<END
+kfreebsdwildcard (1.0-1) test; urgency=critical
+
+ * first version
+
+ -- me <me@example.org> Mon, 01 Jan 1980 01:02:02 +0000
+END
+dpkg-source -b kfreebsdwildcard-1.0
+rm -r kfreebsdwildcard-1.0
+
+testrun - includedsc oses linuxwildcard_1.0-1.dsc 3<<EOF
+stderr
+-v1*=linuxwildcard_1.0-1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool/main/l"
+-v2*=Created directory "./pool/main/l/linuxwildcard"
+$(ofa 'pool/main/l/linuxwildcard/linuxwildcard_1.0-1.dsc')
+$(ofa 'pool/main/l/linuxwildcard/linuxwildcard_1.0-1.tar.gz')
+$(opa 'linuxwildcard' x 'oses' 'main' 'source' 'dsc')
+$(ota 'oses' 'linuxwildcard')
+EOF
+
+testrun - includedsc oses kfreebsdwildcard_1.0-1.dsc 3<<EOF
+stderr
+-v1*=kfreebsdwildcard_1.0-1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool/main/k"
+-v2*=Created directory "./pool/main/k/kfreebsdwildcard"
+$(ofa 'pool/main/k/kfreebsdwildcard/kfreebsdwildcard_1.0-1.dsc')
+$(ofa 'pool/main/k/kfreebsdwildcard/kfreebsdwildcard_1.0-1.tar.gz')
+$(opa 'kfreebsdwildcard' x 'oses' 'main' 'source' 'dsc')
+$(ota 'oses' 'kfreebsdwildcard')
+EOF
+
+testrun empty build-needing oses all
+testrun empty build-needing oses hurd-abacus
+testrun - build-needing oses abacus 3<<EOF
+stdout
+*=linuxwildcard 1.0-1 pool/main/l/linuxwildcard/linuxwildcard_1.0-1.dsc
+EOF
+testrun - build-needing oses kfreebsd-abacus 3<<EOF
+stdout
+*=kfreebsdwildcard 1.0-1 pool/main/k/kfreebsdwildcard/kfreebsdwildcard_1.0-1.dsc
+EOF
+
+rm -r pool conf db *.deb *.log *.changes
+testsuccess
diff --git a/tests/check.test b/tests/check.test
new file mode 100644
index 0000000..b9d160a
--- /dev/null
+++ b/tests/check.test
@@ -0,0 +1,225 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+mkdir -p conf db pool
+cat > conf/distributions <<EOF
+Codename: n
+Components: c
+Architectures: a
+EOF
+cat > conf/options <<EOF
+export silent-never
+EOF
+
+echo "fake-deb1" > fake1.deb
+echo "fake-deb2" > fake2.deb
+echo "fake-deb3" > fake3.deb
+
+fakedeb1md="$(md5 fake1.deb)"
+fakedeb2md="$(md5 fake2.deb)"
+fakedeb3md="$(md5 fake3.deb)"
+fakedeb1sha1="$(sha1 fake1.deb)"
+fakedeb2sha1="$(sha1 fake2.deb)"
+fakedeb3sha1="$(sha1 fake3.deb)"
+fakedeb1sha2="$(sha256 fake1.deb)"
+fakedeb2sha2="$(sha256 fake2.deb)"
+fakedeb3sha2="$(sha256 fake3.deb)"
+fakesize=10
+
+cat > fakeindex <<EOF
+Package: fake
+Version: 0
+Source: pseudo (9999)
+Architecture: all
+Filename: pool/c/p/pseudo/fake_0_all.deb
+Section: base
+Priority: extra
+Description: test
+ test
+Size: $fakesize
+MD5Sum: $fakedeb1md
+EOF
+
+testrun - -b . -C c -A a -T deb _addpackage n fakeindex fake 3<<EOF
+returns 249
+stderr
+*=Error: package fake version 0 lists file pool/c/p/pseudo/fake_0_all.deb not yet in the pool!
+-v0*=There have been errors!
+stdout
+EOF
+
+mkdir -p pool/c/p/pseudo
+cp fake2.deb pool/c/p/pseudo/fake_0_all.deb
+
+testrun - -b . _detect pool/c/p/pseudo/fake_0_all.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/c/p/pseudo/fake_0_all.deb')
+-v0*=1 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+
+testrun - -b . -C c -A a -T deb _addpackage n fakeindex fake 3<<EOF
+returns 254
+stderr
+*=File "pool/c/p/pseudo/fake_0_all.deb" is already registered with different checksums!
+*=md5 expected: $fakedeb2md, got: $fakedeb1md
+*=Error: package fake version 0 lists different checksums than in the pool!
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . _forget pool/c/p/pseudo/fake_0_all.deb 3<<EOF
+stderr
+stdout
+$(ofd 'pool/c/p/pseudo/fake_0_all.deb' false)
+EOF
+
+cp fake1.deb pool/c/p/pseudo/fake_0_all.deb
+
+testrun - -b . _detect pool/c/p/pseudo/fake_0_all.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/c/p/pseudo/fake_0_all.deb')
+-v0*=1 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+
+testrun - -b . -C c -A a -T deb _addpackage n fakeindex fake 3<<EOF
+stdout
+$(opa 'fake' '0' 'n' 'c' 'a' 'deb')
+-v1*=Adding 'fake' '0' to 'n|c|a'.
+EOF
+
+testrun - -b . checkpool 3<<EOF
+stderr
+stdout
+EOF
+
+testrun - -b . check 3<<EOF
+stderr
+stdout
+-v1*=Checking n...
+EOF
+
+cp fake3.deb pool/c/p/pseudo/fake_0_all.deb
+
+testrun - -b . check 3<<EOF
+stderr
+stdout
+-v1*=Checking n...
+EOF
+
+testrun - -b . checkpool 3<<EOF
+return 254
+stderr
+*=WRONG CHECKSUMS of './pool/c/p/pseudo/fake_0_all.deb':
+*=md5 expected: $fakedeb1md, got: $fakedeb3md
+*=sha1 expected: $fakedeb1sha1, got: $fakedeb3sha1
+*=sha256 expected: $fakedeb1sha2, got: $fakedeb3sha2
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . _forget pool/c/p/pseudo/fake_0_all.deb 3<<EOF
+stderr
+stdout
+$(ofd 'pool/c/p/pseudo/fake_0_all.deb' false)
+EOF
+
+testrun - -b . _detect pool/c/p/pseudo/fake_0_all.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/c/p/pseudo/fake_0_all.deb')
+EOF
+
+testrun - -b . checkpool 3<<EOF
+stderr
+stdout
+EOF
+
+testrun - -b . check 3<<EOF
+stdout
+-v1*=Checking n...
+stderr
+*=File "pool/c/p/pseudo/fake_0_all.deb" is already registered with different checksums!
+*=md5 expected: $fakedeb3md, got: $fakedeb1md
+*=Files are missing for 'fake'!
+-v0*=There have been errors!
+returns 254
+EOF
+
+testrun - -b . _forget pool/c/p/pseudo/fake_0_all.deb 3<<EOF
+stderr
+stdout
+$(ofd 'pool/c/p/pseudo/fake_0_all.deb' false)
+EOF
+
+# Correct size but wrong checksum:
+testrun - -b . check 3<<EOF
+stdout
+-v1*=Checking n...
+stderr
+*=Deleting unexpected file './pool/c/p/pseudo/fake_0_all.deb'!
+*=(not in database and wrong in pool)
+*= Missing file pool/c/p/pseudo/fake_0_all.deb
+*=Files are missing for 'fake'!
+-v0*=There have been errors!
+returns 249
+EOF
+# Wrong size:
+echo "Tooo long......" > pool/c/p/pseudo/fake_0_all.deb
+testrun - -b . check 3<<EOF
+stdout
+-v1*=Checking n...
+stderr
+*=Deleting unexpected file './pool/c/p/pseudo/fake_0_all.deb'!
+*=(not in database and wrong in pool)
+*= Missing file pool/c/p/pseudo/fake_0_all.deb
+*=Files are missing for 'fake'!
+-v0*=There have been errors!
+returns 249
+EOF
+
+cp fake1.deb pool/c/p/pseudo/fake_0_all.deb
+
+testrun - -b . check 3<<EOF
+stderr
+-v0*=Warning: readded existing file 'pool/c/p/pseudo/fake_0_all.deb' mysteriously missing from the checksum database.
+stdout
+-v1*=Checking n...
+$(ofa 'pool/c/p/pseudo/fake_0_all.deb')
+stderr
+EOF
+
+testout - -b . _dumpcontents 'n|c|a' 3<<EOF
+EOF
+
+cat >results.expected << EOF
+'fake' -> 'Package: fake
+Version: 0
+Source: pseudo (9999)
+Architecture: all
+Filename: pool/c/p/pseudo/fake_0_all.deb
+Section: base
+Priority: extra
+Description: test
+ test
+Size: $fakesize
+MD5Sum: $fakedeb1md
+'
+EOF
+dodiff results.expected results
+cat results
+
+testrun - -b . _listchecksums 3<<EOF
+stdout
+*=pool/c/p/pseudo/fake_0_all.deb :1:$fakedeb1sha1 :2:$fakedeb1sha2 $fakedeb1md $fakesize
+stderr
+EOF
+
+dodo test ! -e dists
+
+rm -r -f db conf pool fake*.deb fakeindex
+testsuccess
diff --git a/tests/copy.test b/tests/copy.test
new file mode 100644
index 0000000..73908b6
--- /dev/null
+++ b/tests/copy.test
@@ -0,0 +1,210 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -e dists
+mkdir conf db logs lists
+
+cat >> conf/distributions <<EOF
+Codename: a
+Architectures: abacus source
+Components: one two three
+
+Codename: b
+Architectures: abacus
+Components: one two four
+EOF
+
+DISTRI=a PACKAGE=aa EPOCH="" VERSION=1 REVISION="-1" FAKEVER="4-2" SECTION="one" genpackage.sh
+
+testrun - -b . --export=never --delete --delete include a test.changes 3<<EOF
+*=Warning: database 'a|one|abacus' was modified but no index file was exported.
+*=Warning: database 'a|one|source' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/one"
+-v2*=Created directory "./pool/one/a"
+-v2*=Created directory "./pool/one/a/aa"
+$(ofa 'pool/one/a/aa/aa-addons_4-2_all.deb')
+$(ofa 'pool/one/a/aa/aa_1-1_abacus.deb')
+$(ofa 'pool/one/a/aa/aa_1-1.tar.gz')
+$(ofa 'pool/one/a/aa/aa_1-1.dsc')
+$(opa 'aa-addons' '4-2' 'a' 'one' 'abacus' 'deb')
+$(opa 'aa' '1-1' 'a' 'one' 'abacus' 'deb')
+$(opa 'aa' '1-1' 'a' 'one' 'source' 'dsc')
+$(otta 'a' 'aa')
+-v5*=Deleting 'test.changes'.
+EOF
+
+DISTRI=a PACKAGE=aa EPOCH="" VERSION=1 REVISION="-2" FAKEVER="3-2" SECTION="two" genpackage.sh
+testrun - -b . --export=never --delete --delete include a test.changes 3<<EOF
+*=Warning: database 'a|two|abacus' was modified but no index file was exported.
+*=Warning: database 'a|two|source' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+stdout
+-v2*=Created directory "./pool/two"
+-v2*=Created directory "./pool/two/a"
+-v2*=Created directory "./pool/two/a/aa"
+$(ofa 'pool/two/a/aa/aa-addons_3-2_all.deb')
+$(ofa 'pool/two/a/aa/aa_1-2_abacus.deb')
+$(ofa 'pool/two/a/aa/aa_1-2.tar.gz')
+$(ofa 'pool/two/a/aa/aa_1-2.dsc')
+$(opa 'aa-addons' '3-2' 'a' 'two' 'abacus' 'deb')
+$(opa 'aa' 1-2 'a' 'two' 'abacus' 'deb')
+$(opa 'aa' 1-2 'a' 'two' 'source' 'dsc')
+$(otta 'a' 'aa')
+-v5*=Deleting 'test.changes'.
+EOF
+
+testrun - -b . ls aa 3<<EOF
+stdout
+*=aa | 1-1 | a | abacus, source
+*=aa | 1-2 | a | abacus, source
+returns 0
+EOF
+testrun - -b . ls aa-addons 3<<EOF
+stdout
+*=aa-addons | 4-2 | a | abacus
+*=aa-addons | 3-2 | a | abacus
+returns 0
+EOF
+
+testrun - -b . list a 3<<EOF
+stdout
+*=a|one|abacus: aa 1-1
+*=a|one|abacus: aa-addons 4-2
+*=a|one|source: aa 1-1
+*=a|two|abacus: aa 1-2
+*=a|two|abacus: aa-addons 3-2
+*=a|two|source: aa 1-2
+returns 0
+EOF
+
+testrun - -b . --export=never copy b a bb cc 3<<EOF
+stderr
+-v0*=Will not copy as not found: bb, cc.
+stdout
+-v3*=Not looking into 'a|one|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|two|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|abacus' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|source' as no matching target in 'b'!
+EOF
+
+
+testrun - -b . --export=never copy b a aa-addons 3<<EOF
+stdout
+-v3*=Not looking into 'a|one|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|two|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|abacus' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|source' as no matching target in 'b'!
+-v1*=Adding 'aa-addons' '4-2' to 'b|one|abacus'.
+$(opa 'aa-addons' '4-2' 'b' 'one' 'abacus' 'deb')
+-v1*=Adding 'aa-addons' '3-2' to 'b|two|abacus'.
+$(opa 'aa-addons' '3-2' 'b' 'two' 'abacus' 'deb')
+stderr
+*=Warning: database 'b|one|abacus' was modified but no index file was exported.
+*=Warning: database 'b|two|abacus' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+EOF
+
+testrun - -b . list b 3<<EOF
+stdout
+*=b|one|abacus: aa-addons 4-2
+*=b|two|abacus: aa-addons 3-2
+returns 0
+EOF
+
+testrun - -b . ls aa 3<<EOF
+stdout
+*=aa | 1-1 | a | abacus, source
+*=aa | 1-2 | a | abacus, source
+returns 0
+EOF
+testrun - -b . ls aa-addons 3<<EOF
+stdout
+*=aa-addons | 4-2 | a | abacus
+*=aa-addons | 3-2 | a | abacus
+*=aa-addons | 4-2 | b | abacus
+*=aa-addons | 3-2 | b | abacus
+returns 0
+EOF
+
+testrun - -b . --export=never remove b aa-addons 3<<EOF
+stdout
+$(opd 'aa-addons' unset b one abacus deb)
+$(opd 'aa-addons' unset b two abacus deb)
+stderr
+*=Warning: database 'b|one|abacus' was modified but no index file was exported.
+*=Warning: database 'b|two|abacus' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+EOF
+
+testrun - -b . ls aa-addons 3<<EOF
+stdout
+*=aa-addons | 4-2 | a | abacus
+*=aa-addons | 3-2 | a | abacus
+returns 0
+EOF
+
+testrun - -b . --export=never copysrc b a aa-addons 3<<EOF
+stdout
+-v3*=Not looking into 'a|one|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|two|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|abacus' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|source' as no matching target in 'b'!
+stderr
+-v0*=Nothing to do as no package with source 'aa-addons' found!
+EOF
+
+testrun - -b . --export=never copysrc b a aa 4-2 3-2 3<<EOF
+stdout
+-v3*=Not looking into 'a|one|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|two|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|abacus' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|source' as no matching target in 'b'!
+stderr
+-v0*=Nothing to do as no packages with source 'aa' and a requested source version found!
+EOF
+
+testrun - -b . --export=never copysrc b a aa 1-1 2-2 3<<EOF
+stdout
+-v3*=Not looking into 'a|one|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|two|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|abacus' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|source' as no matching target in 'b'!
+-v1*=Adding 'aa-addons' '4-2' to 'b|one|abacus'.
+$(opa 'aa-addons' 4-2 'b' 'one' 'abacus' 'deb')
+-v1*=Adding 'aa' '1-1' to 'b|one|abacus'.
+$(opa 'aa' 1-1 'b' 'one' 'abacus' 'deb')
+stderr
+-v0*=Will not copy as not found: 2-2.
+-v6*=Found versions are: 1-1.
+*=Warning: database 'b|one|abacus' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+EOF
+
+testrun - -b . --export=never copysrc b a aa 1-1 1-2 3<<EOF
+stdout
+-v3*=Not looking into 'a|one|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|two|source' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|abacus' as no matching target in 'b'!
+-v3*=Not looking into 'a|three|source' as no matching target in 'b'!
+-v1*=Adding 'aa-addons' '4-2' to 'b|one|abacus'.
+$(opu 'aa-addons' 4-2 4-2 'b' 'one' 'abacus' 'deb')
+-v1*=Adding 'aa' '1-1' to 'b|one|abacus'.
+$(opu 'aa' 1-1 1-1 'b' 'one' 'abacus' 'deb')
+-v1*=Adding 'aa-addons' '3-2' to 'b|two|abacus'.
+$(opa 'aa-addons' 3-2 'b' 'two' 'abacus' 'deb')
+-v1*=Adding 'aa' '1-2' to 'b|two|abacus'.
+$(opa 'aa' 1-2 'b' 'two' 'abacus' 'deb')
+stderr
+-v6*=Found versions are: 1-1, 1-2.
+*=Warning: replacing 'aa-addons' version '4-2' with equal version '4-2' in 'b|one|abacus'!
+*=Warning: replacing 'aa' version '1-1' with equal version '1-1' in 'b|one|abacus'!
+*=Warning: database 'b|one|abacus' was modified but no index file was exported.
+*=Warning: database 'b|two|abacus' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+EOF
+
+rm -r db conf pool logs lists
+testsuccess
diff --git a/tests/descriptions.test b/tests/descriptions.test
new file mode 100644
index 0000000..d751375
--- /dev/null
+++ b/tests/descriptions.test
@@ -0,0 +1,143 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+mkdir -p conf
+cat > conf/distributions <<EOF
+Codename: 1234
+Components: a
+UDebComponents: a
+Architectures: abacus source
+UDebIndices: Packages .
+DebIndices: Packages .
+DscIndices: Sources .
+
+Codename: 4321
+Components: a
+UDebComponents: a
+Architectures: abacus source
+UDebIndices: Packages .
+DebIndices: Packages .
+DscIndices: Sources .
+EOF
+
+testrun - -b . export 3<<EOF
+stderr
+stdout
+$(odb)
+-v1*=Exporting 1234...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/1234"
+-v2*=Created directory "./dists/1234/a"
+-v2*=Created directory "./dists/1234/a/binary-abacus"
+-v6*= exporting '1234|a|abacus'...
+-v6*= creating './dists/1234/a/binary-abacus/Packages' (uncompressed)
+-v2*=Created directory "./dists/1234/a/debian-installer"
+-v2*=Created directory "./dists/1234/a/debian-installer/binary-abacus"
+-v6*= exporting 'u|1234|a|abacus'...
+-v6*= creating './dists/1234/a/debian-installer/binary-abacus/Packages' (uncompressed)
+-v2*=Created directory "./dists/1234/a/source"
+-v6*= exporting '1234|a|source'...
+-v6*= creating './dists/1234/a/source/Sources' (uncompressed)
+-v1*=Exporting 4321...
+-v2*=Created directory "./dists/4321"
+-v2*=Created directory "./dists/4321/a"
+-v2*=Created directory "./dists/4321/a/binary-abacus"
+-v6*= exporting '4321|a|abacus'...
+-v6*= creating './dists/4321/a/binary-abacus/Packages' (uncompressed)
+-v2*=Created directory "./dists/4321/a/debian-installer"
+-v2*=Created directory "./dists/4321/a/debian-installer/binary-abacus"
+-v6*= exporting 'u|4321|a|abacus'...
+-v6*= creating './dists/4321/a/debian-installer/binary-abacus/Packages' (uncompressed)
+-v2*=Created directory "./dists/4321/a/source"
+-v6*= exporting '4321|a|source'...
+-v6*= creating './dists/4321/a/source/Sources' (uncompressed)
+EOF
+
+mkdir i
+(cd i ; PACKAGE=hello EPOCH="" VERSION=1 REVISION="" SECTION="base" genpackage.sh)
+
+testrun - -b . -C a includedeb 1234 i/hello_1_abacus.deb 3<<EOF
+stdout
+-v9*=Adding reference to 'pool/a/h/hello/hello_1_abacus.deb' by '1234|a|abacus'
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/a"
+-v2*=Created directory "./pool/a/h"
+-v2*=Created directory "./pool/a/h/hello"
+$(ofa 'pool/a/h/hello/hello_1_abacus.deb')
+$(opa 'hello' 1 '1234' 'a' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in '1234|a|abacus'...
+-v6*= replacing './dists/1234/a/binary-abacus/Packages' (uncompressed)
+-v6*= looking for changes in '1234|a|source'...
+-v6*= looking for changes in 'u|1234|a|abacus'...
+EOF
+
+cp dists/1234/a/binary-abacus/Packages i/mangled
+sed -i -e "s/^ blub/Description-md5: 29e34048cfd56bbec39e6997af9c7057/" i/mangled
+
+testrun - -b . -T deb -A abacus -C a _addpackage 4321 i/mangled hello 3<<EOF
+stdout
+-v1*=Adding 'hello' '1' to '4321|a|abacus'.
+-v9*=Adding reference to 'pool/a/h/hello/hello_1_abacus.deb' by '4321|a|abacus'
+$(opa 'hello' 1 '4321' 'a' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in '4321|a|abacus'...
+-v6*= replacing './dists/4321/a/binary-abacus/Packages' (uncompressed)
+-v6*= looking for changes in '4321|a|source'...
+-v6*= looking for changes in 'u|4321|a|abacus'...
+EOF
+
+dbversion="$(sed -n -e 's/^b\(db[0-9]\+\.[0-9]\+\)\.0$/\1/p;T;q' db/version)"
+
+dodo ${dbversion}_dump -p -f db/packages.dump db/packages.db
+dodo sed -e 's/\\0a blub\\0a/\\0aDescription-md5: 29e34048cfd56bbec39e6997af9c7057\\0a/' -i db/packages.dump
+dodo ${dbversion}_load -f db/packages.dump db/packages.db
+
+testrun - -b . export 4321 3<<EOF
+stdout
+-v0*=Exporting 4321...
+-v6*= exporting '4321|a|abacus'...
+-v6*= replacing './dists/4321/a/binary-abacus/Packages' (uncompressed)
+-v6*= exporting '4321|a|source'...
+-v6*= replacing './dists/4321/a/source/Sources' (uncompressed)
+-v6*= exporting 'u|4321|a|abacus'...
+-v6*= replacing './dists/4321/a/debian-installer/binary-abacus/Packages' (uncompressed)
+EOF
+
+if diff dists/1234/a/binary-abacus/Packages dists/4321/a/binary-abacus/Packages ; then
+ echo "Failed to include Description-md5 for test" 2>&1
+ exit 1
+fi
+
+testrun - -b . repairdescriptions 4321 3<<EOF
+stdout
+-v1*=Looking for 'Description's to repair in 4321...
+-v3*=Redoing checksum information for packages in '4321|a|abacus'...
+-v3*=Redoing checksum information for packages in 'u|4321|a|abacus'...
+-v0*=Fixing description for 'hello'...
+-v0*=Exporting indices...
+-v6*= looking for changes in '4321|a|abacus'...
+-v6*= replacing './dists/4321/a/binary-abacus/Packages' (uncompressed)
+-v6*= looking for changes in '4321|a|source'...
+-v6*= looking for changes in 'u|4321|a|abacus'...
+EOF
+
+dodiff dists/1234/a/binary-abacus/Packages dists/4321/a/binary-abacus/Packages
+
+dodo ${dbversion}_load -f db/packages.dump db/packages.db
+
+rm pool/a/h/hello/hello_1_abacus.deb
+
+testrun - -b . repairdescriptions 4321 3<<EOF
+stdout
+-v1*=Looking for 'Description's to repair in 4321...
+-v3*=Redoing checksum information for packages in '4321|a|abacus'...
+-v3*=Redoing checksum information for packages in 'u|4321|a|abacus'...
+stderr
+*=Error 2 opening ./pool/a/h/hello/hello_1_abacus.deb: No such file or directory
+*=Cannot retrieve long description for package 'hello' out of package's files!
+EOF
+
+rm -r -f db conf dists pool i
+testsuccess
diff --git a/tests/diffgeneration.test b/tests/diffgeneration.test
new file mode 100644
index 0000000..8594659
--- /dev/null
+++ b/tests/diffgeneration.test
@@ -0,0 +1,271 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+# testing with Sources, as they are easier to generate...
+
+if ! test -e "$RREDTOOL" ; then
+ echo "SKIPPED: rredtool not found, '$RREDTOOL' tried."
+ exit 0
+fi
+
+mkdir conf
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: source
+Components: main
+DscIndices: Sources Release . .gz $RREDTOOL
+EOF
+
+# Section and Priority in .dsc are a reprepro extension...
+
+echo "Dummy file" > test_1.tar.gz
+cat > test_1.dsc <<EOF
+Format: 1.0
+Source: test
+Binary: more or less
+Architecture: who knows what
+Version: 1
+Section: test
+Priority: extra
+Maintainer: Guess Who <its@me>
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaa some lines to make it long enough aaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+Files:
+ $(mdandsize test_1.tar.gz) test_1.tar.gz
+EOF
+echo "Dummy file" > pre_1.tar.gz
+cat > pre_1.dsc <<EOF
+Format: 1.0
+Source: pre
+Binary: pre
+Architecture: all
+Version: 1
+Maintainer: Guess Who <its@me>
+Section: pre
+Priority: extra
+Files:
+ $(mdandsize pre_1.tar.gz) pre_1.tar.gz
+EOF
+echo "New file" > pre_2.tar.gz
+cat > pre_2.dsc <<EOF
+Format: 1.0
+Source: pre
+Binary: pre
+Architecture: all
+Version: 2
+Maintainer: Guess Who <its@me>
+Section: pre
+Priority: extra
+Files:
+ $(mdandsize pre_2.tar.gz) pre_2.tar.gz
+EOF
+echo "Even newer" > pre_3.tar.gz
+cat > pre_3.dsc <<EOF
+Format: 1.0
+Source: pre
+Binary: pre
+Architecture: all
+Version: 3
+Maintainer: Guess Who <its@me>
+Section: pre
+Priority: extra
+Files:
+ $(mdandsize pre_3.tar.gz) pre_3.tar.gz
+EOF
+
+mkdir old
+testrun - includedsc test test_1.dsc 3<<EOF
+-v1*=test_1.dsc: component guessed as 'main'
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/t"
+-v2*=Created directory "./pool/main/t/test"
+$(ofa 'pool/main/t/test/test_1.dsc')
+$(ofa 'pool/main/t/test/test_1.tar.gz')
+$(opa 'test' 1 'test' 'main' 'source' 'dsc')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (uncompressed,gzipped,script: rredtool)
+EOF
+dodo cp dists/test/main/source/Sources old/0
+dodo test "!" -e dists/test/main/source/Sources.diff
+testrun - includedsc test pre_1.dsc 3<<EOF
+-v1*=pre_1.dsc: component guessed as 'main'
+stdout
+-v2*=Created directory "./pool/main/p"
+-v2*=Created directory "./pool/main/p/pre"
+$(ofa 'pool/main/p/pre/pre_1.dsc')
+$(ofa 'pool/main/p/pre/pre_1.tar.gz')
+$(opa 'pre' 1 'test' 'main' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|source'...
+-v6*= replacing './dists/test/main/source/Sources' (uncompressed,gzipped,script: rredtool)
+EOF
+dodo cp dists/test/main/source/Sources old/1
+dodo test -f dists/test/main/source/Sources.diff/Index
+testrun - includedsc test pre_2.dsc 3<<EOF
+-v1*=pre_2.dsc: component guessed as 'main'
+stdout
+$(ofa 'pool/main/p/pre/pre_2.dsc')
+$(ofa 'pool/main/p/pre/pre_2.tar.gz')
+$(opu 'pre' 1 2 'test' 'main' 'source' 'dsc')
+-v0*=Exporting indices...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/p/pre/pre_1.dsc')
+$(ofd 'pool/main/p/pre/pre_1.tar.gz')
+-v6*= looking for changes in 'test|main|source'...
+-v6*= replacing './dists/test/main/source/Sources' (uncompressed,gzipped,script: rredtool)
+EOF
+dodo cp dists/test/main/source/Sources old/2
+dodo test -f dists/test/main/source/Sources.diff/Index
+testrun - includedsc test pre_3.dsc 3<<EOF
+-v1*=pre_3.dsc: component guessed as 'main'
+stdout
+$(ofa 'pool/main/p/pre/pre_3.dsc')
+$(ofa 'pool/main/p/pre/pre_3.tar.gz')
+$(opu 'pre' 2 3 'test' 'main' 'source' 'dsc')
+-v0*=Exporting indices...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/p/pre/pre_2.dsc')
+$(ofd 'pool/main/p/pre/pre_2.tar.gz')
+-v6*= looking for changes in 'test|main|source'...
+-v6*= replacing './dists/test/main/source/Sources' (uncompressed,gzipped,script: rredtool)
+EOF
+dodo cp dists/test/main/source/Sources old/3
+dodo test -f dists/test/main/source/Sources.diff/Index
+
+(cd dists/test/main/source/Sources.diff/ && ls *.gz) | sort |sed -e 's/\.gz$//' > patches
+
+cat > results.expected <<EOF
+SHA1-Current: $(sha1andsize old/3)
+SHA1-History:
+EOF
+i=0
+for p in $(cat patches) ; do
+cat >> results.expected <<EOF
+ $(sha1and7size old/$i) ${p}
+EOF
+i=$((i+1))
+done
+cat >> results.expected <<EOF
+SHA1-Patches:
+EOF
+for p in $(cat patches) ; do
+ dodo gunzip dists/test/main/source/Sources.diff/${p}.gz
+cat >> results.expected <<EOF
+ $(sha1and7size dists/test/main/source/Sources.diff/${p}) ${p}
+EOF
+done
+cat >> results.expected <<EOF
+X-Patch-Precedence: merged
+EOF
+
+dodiff results.expected dists/test/main/source/Sources.diff/Index
+
+i=1
+for p in $(cat patches) ; do
+ cp dists/test/main/source/Sources.diff/$p $i.diff
+ i=$((i+1))
+done
+cat > results.expected << EOF
+1c
+Package: pre
+Format: 1.0
+Binary: pre
+Architecture: all
+Version: 3
+Maintainer: Guess Who <its@me>
+Priority: extra
+Section: pre
+Directory: pool/main/p/pre
+Files:
+ $(mdandsize pre_3.dsc) pre_3.dsc
+ $(mdandsize pre_3.tar.gz) pre_3.tar.gz
+Checksums-Sha1:
+ $(sha1andsize pre_3.dsc) pre_3.dsc
+ $(sha1andsize pre_3.tar.gz) pre_3.tar.gz
+Checksums-Sha256:
+ $(sha2andsize pre_3.dsc) pre_3.dsc
+ $(sha2andsize pre_3.tar.gz) pre_3.tar.gz
+
+Package: test
+.
+EOF
+dodiff results.expected 1.diff
+rm 1.diff
+cat > results.expected << EOF
+17,18c
+ $(sha2andsize pre_3.dsc) pre_3.dsc
+ $(sha2andsize pre_3.tar.gz) pre_3.tar.gz
+.
+14,15c
+ $(sha1andsize pre_3.dsc) pre_3.dsc
+ $(sha1andsize pre_3.tar.gz) pre_3.tar.gz
+.
+11,12c
+ $(mdandsize pre_3.dsc) pre_3.dsc
+ $(mdandsize pre_3.tar.gz) pre_3.tar.gz
+.
+5c
+Version: 3
+.
+EOF
+dodiff results.expected 2.diff
+rm 2.diff
+dodiff results.expected 3.diff
+rm 3.diff
+cat > results.expected << EOF
+1c
+Package: pre
+.
+EOF
+dodiff results.expected 4.diff
+rm 4.diff
+
+rm -r old db pool conf dists pre_*.dsc pre_*.tar.gz test_1.dsc test_1.tar.gz results.expected patches
+
+testsuccess
diff --git a/tests/easyupdate.test b/tests/easyupdate.test
new file mode 100644
index 0000000..4801c9a
--- /dev/null
+++ b/tests/easyupdate.test
@@ -0,0 +1,142 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir -p test/a test/dists/name/comp/source
+mkdir bla
+tar -czf test/a/a.tar.gz bla
+rmdir bla
+
+cat > test/a/a.dsc <<EOF
+Format: 3.0 (native)
+Source: apackage
+Version: 0-1
+Maintainer: noone <noone@nowhere.tld>
+Checksums-Sha1:
+ $(sha1andsize test/a/a.tar.gz) a.tar.gz
+EOF
+
+cat > test/dists/name/comp/source/Sources <<EOF
+Package: apackage
+Version: 0-1
+Priority: extra
+Section: devel
+Maintainer: noone <noone@nowhere.tld>
+Directory: a
+Files:
+ $(mdandsize test/a/a.dsc) a.dsc
+ $(mdandsize test/a/a.tar.gz) a.tar.gz
+Checksums-Sha1:
+ $(sha1andsize test/a/a.dsc) a.dsc
+ $(sha1andsize test/a/a.tar.gz) a.tar.gz
+EOF
+
+mkdir conf
+
+cat > conf/distributions <<EOF
+Codename: test1
+Architectures: source
+Components: everything
+Update: u
+
+Codename: test2
+Architectures: source
+Components: everything
+Update: - u
+EOF
+cat > conf/updates <<EOF
+Name: u
+Method: file:${WORKDIR}/test
+Suite: name
+Components: comp>everything
+IgnoreRelease: Yes
+DownloadListsAs: .
+EOF
+
+testrun - update test1 3<<EOF
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v2*=Copy file '${WORKDIR}/test/dists/name/comp/source/Sources' to './lists/u_name_comp_Sources'...
+-v6=aptmethod start 'file:${WORKDIR}/test/a/a.dsc'
+-v1*=aptmethod got 'file:${WORKDIR}/test/a/a.dsc'
+-v2*=Linking file '${WORKDIR}/test/a/a.dsc' to './pool/everything/a/apackage/a.dsc'...
+-v6=aptmethod start 'file:${WORKDIR}/test/a/a.tar.gz'
+-v1*=aptmethod got 'file:${WORKDIR}/test/a/a.tar.gz'
+-v2*=Linking file '${WORKDIR}/test/a/a.tar.gz' to './pool/everything/a/apackage/a.tar.gz'...
+stdout
+$(odb)
+-v2*=Created directory "./lists"
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test1|everything|source'
+-v5*= reading './lists/u_name_comp_Sources'
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/everything"
+-v2*=Created directory "./pool/everything/a"
+-v2*=Created directory "./pool/everything/a/apackage"
+-v0*=Getting packages...
+$(ofa pool/everything/a/apackage/a.dsc)
+$(ofa pool/everything/a/apackage/a.tar.gz)
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa apackage 0-1 test1 everything source dsc)
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test1"
+-v2*=Created directory "./dists/test1/everything"
+-v2*=Created directory "./dists/test1/everything/source"
+-v6*= looking for changes in 'test1|everything|source'...
+-v6*= creating './dists/test1/everything/source/Sources' (gzipped)
+EOF
+
+testrun - update test2 3<<EOF
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v2*=Copy file '${WORKDIR}/test/dists/name/comp/source/Sources' to './lists/u_name_comp_Sources'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test2|everything|source'
+-v5*= marking everything to be deleted
+-v5*= reading './lists/u_name_comp_Sources'
+-v0*=Getting packages...
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa apackage 0-1 test2 everything source dsc)
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/test2"
+-v2*=Created directory "./dists/test2/everything"
+-v2*=Created directory "./dists/test2/everything/source"
+-v6*= looking for changes in 'test2|everything|source'...
+-v6*= creating './dists/test2/everything/source/Sources' (gzipped)
+EOF
+
+testrun - update test2 3<<EOF
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v2*=Copy file '${WORKDIR}/test/dists/name/comp/source/Sources' to './lists/u_name_comp_Sources'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test2|everything|source'
+-v5*= marking everything to be deleted
+-v5*= reading './lists/u_name_comp_Sources'
+EOF
+
+true > test/dists/name/comp/source/Sources
+
+testrun - update test2 3<<EOF
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/name/comp/source/Sources'
+-v2*=Copy file '${WORKDIR}/test/dists/name/comp/source/Sources' to './lists/u_name_comp_Sources'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test2|everything|source'
+-v5*= marking everything to be deleted
+-v5*= reading './lists/u_name_comp_Sources'
+-v0*=Getting packages...
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opd apackage 0-1 test2 everything source dsc)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test2|everything|source'...
+-v6*= replacing './dists/test2/everything/source/Sources' (gzipped)
+EOF
+
+testsuccess
diff --git a/tests/evil.key b/tests/evil.key
new file mode 100644
index 0000000..31d8f84
--- /dev/null
+++ b/tests/evil.key
@@ -0,0 +1,18 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.6 (GNU/Linux)
+
+lQG7BEgsP4sRBADBpZrNGxOgGusSQPOO7B2KV3IBPQAg+Ywu4EaGBK4TvIJIqY0K
+5kSAt4oSCwC0jd6cKsG1IqDomcB7fUplJP10XyH438Px7t/4yAUvX4v2MwNnA75W
+X63/JJq0OPQ97KIQHF+Vpz3tLL7jPQxnCR5149bB94BYtaOFxOcfpqYJawCggpza
+MuTTV+xhc+UIPNtOJY9AZu8EAJGPH+WdBxHjeqO8Ao/QECwjqItxx8rxcWjkYN9z
+D4KaiQ1kUFHpU2tql/NCH/+FtvS1l6Cm+90TJO116CCo/+qh9j6+QcQqS6Wz2eJK
+vKMa2IjdIGNcSxgE7/k46g0KQ7uYC2NdzMmRu7ot+NGrFDwN3gt92mmR7MQpc5rI
+ksdbA/9k91cy9ZwbxSQdnTujlekpqiICwC99aotJ7qRSVelgVaU2ueLZpC1BoFA6
+QafyCzSGppkPm0RomLpUl77j5F6wkwJ+8oRchGB0kthsnOA/WW/wK+M0fh/SBpfK
+uFkuXUqKfJxqtOo99H4FuXbsaCWvLjq9y8XmI6Wf6aF/Q6WKPgAAnA7amgto7UZm
+wGVgYLdfn3B4vboQCjO0S1JlcHJlcHJvIFRlc3RzdWl0ZSBLZXkgMiAoRk9SIFVT
+RSBXSVRISU4gVEVTVFNVSVRFIE9OTFkpIDxldmlsQG5vd2hlcmUudGxkPohgBBMR
+AgAgBQJILD+LAhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQn6lDZwxnKkrT
+gQCfYJG25Z9MNlOj/F9WzJ2NahZC+NUAn0Qrfs9w8s1Rnw39EYqmVWTG9qSG
+=7arY
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tests/expired.key b/tests/expired.key
new file mode 100644
index 0000000..8d5cf67
--- /dev/null
+++ b/tests/expired.key
@@ -0,0 +1,19 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.6 (GNU/Linux)
+
+lQG6BEgsQCURBAC+SMbS3n9hnYIYf3YoKWHqEsrrjX6UXgb9io3VtHy4ljr2cJbF
+pABjIEWh0z7kLXXJVeR6bAc4lgR8cR6T2TsuRkx0lT2BtFI+iNz8oeLRjM4TQbJQ
+erxl4m67PXPxLXmbhBmO2HSQ/6NlQIE9AfAE5Bf5JTb630aJolrgWF4phwCguqfK
+EcqgotFEErPvwCF9bqv+UHsEAJCNVCJ1wyabrnSykkE+7H8cgB9wkE255ussB0pD
+pX3IKcquwShQFgLUjgCmlVnBqFE5K/K8dBSf+TAYI6a3zV5SzKTWUy2b3cZljMwO
+jUxd5CMVSK4c8IeTxPvWdcx0hzjAngeKNkGbzWaQqUes49Mr9ItxEXViVvaJLcay
+RhnEA/46soa9a7YI+XWJF0UQUSKSbuie5iwGzXC7KCosyNsPcu2G15dL7YelkGAo
+B+rV8yWMVg0+2fY68nmrkilfR32jG3rMPPS5ZPYO8vAQFv1VSJUjuIjerV0+fVsv
+W3udbXFDmURpw8LhZMI5bKmJtcKdGhXd1sZ/vhZSZAFs0LchDwAAl0NwAPemPbcK
+PBEqqXFxSe0lkSIH9bROUmVwcmVwcm8gVGVzdHN1aXRlIEtleSA0IChGT1IgVVNF
+IFdJVEhJTiBURVNUU1VJVEUgT05MWSkgPGV4cGlyZWRAbm93aGVyZS50bGQ+iGYE
+ExECACYFAkgsQCUCGwMFCQABUYAGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCJ
+T6Kd0E3T1go2AJ4lg1614/jYIy5m7NCCPXUpCOhrVQCfUMuczWp32ddKY4aDqiHX
+t/QBoI4=
+=Q+dU
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tests/expiredwithsubkey-working.key b/tests/expiredwithsubkey-working.key
new file mode 100644
index 0000000..1eb14bc
--- /dev/null
+++ b/tests/expiredwithsubkey-working.key
@@ -0,0 +1,52 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.9 (GNU/Linux)
+
+lQHYBEnjBlUBBAC3SCzchwyhCXrSItaqHZSbp/d+s1zaD7Ui75GY67lgZ92aHPrd
+sXr9+/cCneIFyxyDbG20mLkFNrvOmNIsfDikZGtHhCXlgXMHlv/FqMxw/mvxfFmu
+iEbQwaGIfUqQq1HvrvdaWgbnge9EUxF7qAGFv1Sw+1YYY5qNy2xVF3yxGQARAQAB
+AAP7BxCKAyOU++mTwYEMhXDfsyrDpk93VSPtG1hA5FQ0uGe0gd3bWRt/1kWlqSUJ
+Cuhf9Fxj86W93vqzvUREeKi+nZ3agp7W6xHfWMGDGkLj5uN/3Y9MRZRxzJOnJ02j
+H5v5BMRCJPNMFnNHD8v8eZT2nMholqLCE2ek4A1kqUcu68ECANW1xsnNyHJbu1i8
+LZqGfsQaAGSdEMQlYqi7zM9EoWNcPloN//2KZ978NfybSBWZTNRj/ToFjZ5TUZrz
+JN2/a0ECANuM9NeGxw3ooUUbMc2tWV1r8UDl9o6SS+MJYl5wnyyIttbgsUeeb3YA
+5OjlsemQ5gGKJzxvZYSoXPvkzs1MB9kB/0lGrW6bJk59sjjcMjRmirbjdRk0LvRD
+NAPGrWgPS2zbT2/Y6HzCdlJ+NLSSwAB+tr0AmTZDPsrN/1VIIoF6QiqndbRYUmVw
+cmVwcm8gVGVzdHN1aXRlIEtleSA1IChGT1IgVVNFIFdJVEhJTiBURVNUU1VJVEUg
+T05MWSkgPGV4cGlyZWR3aXRoc3Via2V5QG5vd2hlcmUudGxkPoi2BBMBAgAgBQJJ
+4wZVAhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQLxqbMqJgRJotAAP/UAiW
+KlcESGUen7/mfY2pl2rBrq6gTYPh/ElHZKiDrixeJNBEhDub9u7PCg9G8kKrJ+Yq
+Kh9l1yWBVoWq29w/ru2MXSNOgwfZfFMZpfQ0iHxwKreroAXkhHBpF4aG1Q3xwKEB
+nF+8UrA+ajhm2lYhVtw39yDzoH1eF4JdFERkgm+dAbsESeMGwREEANTZPc2YyIzY
+HYYPORodlyu8hSPX0weNh3McidVBZWs4eQfg57lqfXQrKZxfhAOWkS5o3CmU+Qzk
+si5Axkvl6hYt4OKnt9cOSS3IpeB4SCgF/ey1MeLu9pSxqmxvPR2ddZvJuYKrMgiZ
++QF3sgzeOPfYGgrRJOKO2iRx/17R4wy3AKDEhuFBX+ViClYkyvpjzx6J7fxQtQP/
+a6uhhB8AyV9VjjWbupAGZbG1YnrgzJJBUN81BEU9iNqNXNNE464slGI0Ey3KV04w
+5m4/N1heZ25T4BtdM2dTpbU0TEaUhRuqcoCSMhPBpcAPuYNT7NNeinr4Xq681iex
+WW5+LQnNuCEmr2tXPW4Ewy/Z3pmr9+nlZjEGuz+4T6gD/RKSRPccoLKE8Hk57wJM
+6+6tA1dXsLzDYrJLM6t8oQWa++GhPppsxr4r1i16SYTc0Uv/2rblUQlTfmmCE5wB
+vlg6cWb9ly+BhlMCdRCXHgmUeKndB3gaplEGt8R1iZJBW3y17BXf2LPMPti5YoQz
+JwgGLhrgSy0UOvX7eNhX7CUfAACgnc6LCcEXRxIJeM4glyt+hwPCki0IiojnBBgB
+AgAJBQJJ4wbBAhsCAFIJEC8amzKiYESaRyAEGRECAAYFAknjBsEACgkQHSzFxlz9
+b75NRQCgnYR3NtV9F8DuMFCecnRQfR2yPvMAn080v7py3eE1i+UnYJE65wrCB53N
+77AEAIeZLJ1Q26Sdop0r5WZrW98aL9weuWzShkDjuMYiLR1S3kuOCrucVzikrqV/
+JtjLZTAOgU8tGkxY0ORPZS33z3wLZQW4L1EWJWAeG0kbpaSj49GBnCeRs6zP7yu9
+dX3pNWi6ZYHNZ7FDR0hSxLPABeNRuKOhy3/0uLkoC9+Lo+0SnQHYBEnjBtABBADb
+Xlr92Lq288cqkouYZtOOM93zMALoSMoV1ciUf5C03fDaFGJX2l8omQvCsWRjUYTH
+4RgVg+eP3F0nlZnyCkuGM0mS096BheKD6bXpG2aVFnFahwm/3C2GJi7Jd7ajhYFR
+9sDRVfEUi5wTScwKcMmdjEutqmHr3Uuo5MaFCz20oQARAQABAAP8C5FFGWzE0z4F
+dQPtXSHictWetPq1k4m3kMlRMm3r27K2lyxTH3x0Dz4rQ+biYxeWyaqvoD5QbwpE
+5gAGMjB7aREh4ufQKPf5mBwEICluJpogQrTtrHrWprYUWxqkCkuwCDTh6QnH/UwO
+X3m7jaV2vTvFO9zTl/C9YdsKNNXxzxECAObR5GTKe50A/MxtpU7LCMi6MzMmkl+g
+zUJOae6DnTeRYTSJEEiHfbGYzKzrofWCH4qvMpoO0g9x0ON9PMYTP7ECAPNMqY/3
++kA7n6diK3OBa1PcNt2Od4gDSQBRRetG++9mCux/m0IlDn0uK1lPhAYA7D9ncLFn
+8qlbg9lMSqvRb/EB/A7UDEZAXnd4aZ0mhaJJmWLsU9emwspoQlpG50m4ovc6cvHH
+MXVe3WCU+YZYHjfJRdf85py1hKrvcupwkoIIaaGhV4kBPQQYAQIACQUCSeMG0AIb
+AgCoCRAvGpsyomBEmp0gBBkBAgAGBQJJ4wbQAAoJEKVz/rFg3e1bd5oD/RtJlbC6
+yRatjRG+O8AZg33lsKonJpKXtLaXr3seTbff1ODRUVfpm8hI0OuRcm9XJ7Uv9PDe
+rEtDvCokjm/oDUyymlRLLU4MfoUKMNlIs4Wg1jHKM0GZnbxPf5PZWTUmVRFEX+Tz
+/3Sgf5i5l68h8hRzA86UT3UWEPa8T4PsXsHdsl4D/R0+chNjUl9TmyVpyp+7OCEG
+NOpqOdwHdR78VEE7LLahyg/8XehGtHzqvx2Vv58Bt+EV//gGfwSmn2u0brOcdMgF
+w7OXHtqRM0NguAz3PQS3dgoYqShxjf6OSIGg9Q0Y109OF2iEslQ5uFb2p/FjA3wz
+f81KSrTWYPMlXQJlULb0
+=OXP5
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tests/expiredwithsubkey.key b/tests/expiredwithsubkey.key
new file mode 100644
index 0000000..85f33e1
--- /dev/null
+++ b/tests/expiredwithsubkey.key
@@ -0,0 +1,52 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.9 (GNU/Linux)
+
+lQHYBEnjBlUBBAC3SCzchwyhCXrSItaqHZSbp/d+s1zaD7Ui75GY67lgZ92aHPrd
+sXr9+/cCneIFyxyDbG20mLkFNrvOmNIsfDikZGtHhCXlgXMHlv/FqMxw/mvxfFmu
+iEbQwaGIfUqQq1HvrvdaWgbnge9EUxF7qAGFv1Sw+1YYY5qNy2xVF3yxGQARAQAB
+AAP7BxCKAyOU++mTwYEMhXDfsyrDpk93VSPtG1hA5FQ0uGe0gd3bWRt/1kWlqSUJ
+Cuhf9Fxj86W93vqzvUREeKi+nZ3agp7W6xHfWMGDGkLj5uN/3Y9MRZRxzJOnJ02j
+H5v5BMRCJPNMFnNHD8v8eZT2nMholqLCE2ek4A1kqUcu68ECANW1xsnNyHJbu1i8
+LZqGfsQaAGSdEMQlYqi7zM9EoWNcPloN//2KZ978NfybSBWZTNRj/ToFjZ5TUZrz
+JN2/a0ECANuM9NeGxw3ooUUbMc2tWV1r8UDl9o6SS+MJYl5wnyyIttbgsUeeb3YA
+5OjlsemQ5gGKJzxvZYSoXPvkzs1MB9kB/0lGrW6bJk59sjjcMjRmirbjdRk0LvRD
+NAPGrWgPS2zbT2/Y6HzCdlJ+NLSSwAB+tr0AmTZDPsrN/1VIIoF6QiqndbRYUmVw
+cmVwcm8gVGVzdHN1aXRlIEtleSA1IChGT1IgVVNFIFdJVEhJTiBURVNUU1VJVEUg
+T05MWSkgPGV4cGlyZWR3aXRoc3Via2V5QG5vd2hlcmUudGxkPoi8BBMBAgAmAhsD
+BgsJCAcDAgQVAggDBBYCAwECHgECF4AFAknjBwcFCQABUjAACgkQLxqbMqJgRJpK
+jwP8CFRafefR2NTb6C/9JLl6pJzrfs8bz6AZp8Hmq57HWVY9L8TfANNcKqVYhwbd
+f2VWU3Ab8wIYCXUkHeicmaRPvxEw7iVrkhvwg5jVP78Zk3UDKPRJxjq7VFAUc3qy
+WAcmOEv/ombRFFFkh5McEqSw4KVKXPaLXZgVfhpzSw8fIMidAbsESeMGwREEANTZ
+Pc2YyIzYHYYPORodlyu8hSPX0weNh3McidVBZWs4eQfg57lqfXQrKZxfhAOWkS5o
+3CmU+Qzksi5Axkvl6hYt4OKnt9cOSS3IpeB4SCgF/ey1MeLu9pSxqmxvPR2ddZvJ
+uYKrMgiZ+QF3sgzeOPfYGgrRJOKO2iRx/17R4wy3AKDEhuFBX+ViClYkyvpjzx6J
+7fxQtQP/a6uhhB8AyV9VjjWbupAGZbG1YnrgzJJBUN81BEU9iNqNXNNE464slGI0
+Ey3KV04w5m4/N1heZ25T4BtdM2dTpbU0TEaUhRuqcoCSMhPBpcAPuYNT7NNeinr4
+Xq681iexWW5+LQnNuCEmr2tXPW4Ewy/Z3pmr9+nlZjEGuz+4T6gD/RKSRPccoLKE
+8Hk57wJM6+6tA1dXsLzDYrJLM6t8oQWa++GhPppsxr4r1i16SYTc0Uv/2rblUQlT
+fmmCE5wBvlg6cWb9ly+BhlMCdRCXHgmUeKndB3gaplEGt8R1iZJBW3y17BXf2LPM
+Pti5YoQzJwgGLhrgSy0UOvX7eNhX7CUfAACgnc6LCcEXRxIJeM4glyt+hwPCki0I
+iojtBBgBAgAPAhsCBQJJ4wdkBQkAAVIhAFJHIAQZEQIABgUCSeMGwQAKCRAdLMXG
+XP1vvk1FAJ0QB4nk6jWf0Z9+aHBfnkyBF9DlmgCfaBY1cg0GMrZ7sH60IM7/Ym4L
+hP8JEC8amzKiYESas0IEAKmGTzRSawoX0KBTyfOo7AhNz5rhCWTUVo+DZNxC5Am6
+BmD48NEqWrUfG1Ee4vUj+RDzLNXnkG40mZuWFB2mmF+Ss6QuA17icCWxkHYWixOF
+85+Z7sOmS8tPpMrFZuWF48QGFeXfxc1unOQjEPxtOurI+KzeezuyNKQHk9QFCRHT
+nQHYBEnjBtABBADbXlr92Lq288cqkouYZtOOM93zMALoSMoV1ciUf5C03fDaFGJX
+2l8omQvCsWRjUYTH4RgVg+eP3F0nlZnyCkuGM0mS096BheKD6bXpG2aVFnFahwm/
+3C2GJi7Jd7ajhYFR9sDRVfEUi5wTScwKcMmdjEutqmHr3Uuo5MaFCz20oQARAQAB
+AAP8C5FFGWzE0z4FdQPtXSHictWetPq1k4m3kMlRMm3r27K2lyxTH3x0Dz4rQ+bi
+YxeWyaqvoD5QbwpE5gAGMjB7aREh4ufQKPf5mBwEICluJpogQrTtrHrWprYUWxqk
+CkuwCDTh6QnH/UwOX3m7jaV2vTvFO9zTl/C9YdsKNNXxzxECAObR5GTKe50A/Mxt
+pU7LCMi6MzMmkl+gzUJOae6DnTeRYTSJEEiHfbGYzKzrofWCH4qvMpoO0g9x0ON9
+PMYTP7ECAPNMqY/3+kA7n6diK3OBa1PcNt2Od4gDSQBRRetG++9mCux/m0IlDn0u
+K1lPhAYA7D9ncLFn8qlbg9lMSqvRb/EB/A7UDEZAXnd4aZ0mhaJJmWLsU9emwspo
+QlpG50m4ovc6cvHHMXVe3WCU+YZYHjfJRdf85py1hKrvcupwkoIIaaGhV4kBPQQY
+AQIACQUCSeMG0AIbAgCoCRAvGpsyomBEmp0gBBkBAgAGBQJJ4wbQAAoJEKVz/rFg
+3e1bd5oD/RtJlbC6yRatjRG+O8AZg33lsKonJpKXtLaXr3seTbff1ODRUVfpm8hI
+0OuRcm9XJ7Uv9PDerEtDvCokjm/oDUyymlRLLU4MfoUKMNlIs4Wg1jHKM0GZnbxP
+f5PZWTUmVRFEX+Tz/3Sgf5i5l68h8hRzA86UT3UWEPa8T4PsXsHdsl4D/R0+chNj
+Ul9TmyVpyp+7OCEGNOpqOdwHdR78VEE7LLahyg/8XehGtHzqvx2Vv58Bt+EV//gG
+fwSmn2u0brOcdMgFw7OXHtqRM0NguAz3PQS3dgoYqShxjf6OSIGg9Q0Y109OF2iE
+slQ5uFb2p/FjA3wzf81KSrTWYPMlXQJlULb0
+=Hzlj
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tests/export.test b/tests/export.test
new file mode 100644
index 0000000..d0a7643
--- /dev/null
+++ b/tests/export.test
@@ -0,0 +1,79 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf
+cat > conf/distributions <<EOF
+Codename: o
+Architectures: a
+Components: e
+DebIndices: Packages .
+EOF
+
+testrun - -b . export o 3<<EOF
+stdout
+$(odb)
+-v1*=Exporting o...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/o"
+-v2*=Created directory "./dists/o/e"
+-v2*=Created directory "./dists/o/e/binary-a"
+-v6*= exporting 'o|e|a'...
+-v6*= creating './dists/o/e/binary-a/Packages' (uncompressed)
+EOF
+
+testrun - -b . remove o nothing 3<<EOF
+stderr
+-v0*=Not removed as not found: nothing
+stdout
+-v0*=Exporting indices...
+-v6*= looking for changes in 'o|e|a'...
+EOF
+
+cat >> conf/distributions <<EOF
+Signed-By: test
+EOF
+
+testrun - -b . export o 3<<EOF
+stdout
+-v1*=Exporting o...
+-v6*= exporting 'o|e|a'...
+-v6*= replacing './dists/o/e/binary-a/Packages' (uncompressed)
+EOF
+
+dodo grep 'Signed-By: test' dists/o/Release
+
+testrun - -b . remove o nothing 3<<EOF
+stderr
+-v0*=Not removed as not found: nothing
+stdout
+-v0*=Exporting indices...
+-v6*= looking for changes in 'o|e|a'...
+EOF
+
+dodo grep 'Signed-By: test' dists/o/Release
+
+cat >> conf/distributions <<EOF
+ValidFor: 100d
+EOF
+
+testrun - -b . export o 3<<EOF
+stdout
+-v1*=Exporting o...
+-v6*= exporting 'o|e|a'...
+-v6*= replacing './dists/o/e/binary-a/Packages' (uncompressed)
+EOF
+
+dodo grep '^Valid-Until:' dists/o/Release
+
+testrun - -b . --export=lookedat remove o nothing 3<<EOF
+stderr
+-v0*=Not removed as not found: nothing
+stdout
+-v0*=Exporting indices...
+-v6*= looking for changes in 'o|e|a'...
+EOF
+
+dodo grep '^Valid-Until:' dists/o/Release
+
+rm -r conf db dists
+testsuccess
diff --git a/tests/exporthooks.test b/tests/exporthooks.test
new file mode 100644
index 0000000..5d7c9c2
--- /dev/null
+++ b/tests/exporthooks.test
@@ -0,0 +1,79 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf
+cat > conf/distributions <<EOF
+Codename: o
+Architectures: a b
+Components: e
+DebIndices: .
+EOF
+
+testrun - -b . export o 3<<EOF
+*=Error parsing ./conf/distributions, line 4, column 13: filename for index files expected!
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat > conf/distributions <<EOF
+Codename: o
+Architectures: a b
+Components: e
+DebIndices: X .gz .bz2 strange.sh
+EOF
+cat > conf/strange.sh <<'EOF'
+#!/bin/sh
+echo hook "$@"
+touch "$1/$3.something.new"
+echo "$3.something.new" >&3
+touch "$1/$3.something.hidden.new"
+echo "$3.something.hidden.new." >&3
+exit 0
+EOF
+chmod a+x conf/strange.sh
+
+testrun - -b . export o 3<<EOF
+stdout
+$(odb)
+-v1*=Exporting o...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/o"
+-v2*=Created directory "./dists/o/e"
+-v2*=Created directory "./dists/o/e/binary-a"
+-v6*= exporting 'o|e|a'...
+-v6*= creating './dists/o/e/binary-a/X' (gzipped,bzip2ed,script: strange.sh)
+*=hook ./dists/o e/binary-a/X.new e/binary-a/X new
+-v2*=Created directory "./dists/o/e/binary-b"
+*=hook ./dists/o e/binary-b/X.new e/binary-b/X new
+-v6*= exporting 'o|e|b'...
+-v6*= creating './dists/o/e/binary-b/X' (gzipped,bzip2ed,script: strange.sh)
+EOF
+
+find dists -type f | sort > results
+cat > results.expected <<EOF
+dists/o/Release
+dists/o/e/binary-a/X.bz2
+dists/o/e/binary-a/X.gz
+dists/o/e/binary-a/X.something
+dists/o/e/binary-a/X.something.hidden
+dists/o/e/binary-b/X.bz2
+dists/o/e/binary-b/X.gz
+dists/o/e/binary-b/X.something
+dists/o/e/binary-b/X.something.hidden
+EOF
+dodiff results.expected results
+
+grep something dists/o/Release > results || true
+cat > results.expected <<EOF
+ $(md5releaseline o e/binary-a/X.something)
+ $(md5releaseline o e/binary-b/X.something)
+ $(sha1releaseline o e/binary-a/X.something)
+ $(sha1releaseline o e/binary-b/X.something)
+ $(sha2releaseline o e/binary-a/X.something)
+ $(sha2releaseline o e/binary-b/X.something)
+EOF
+dodiff results.expected results
+
+rm -r conf db dists
+rm results results.expected
+testsuccess
diff --git a/tests/flat.test b/tests/flat.test
new file mode 100644
index 0000000..48d2eba
--- /dev/null
+++ b/tests/flat.test
@@ -0,0 +1,518 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+mkdir -p conf
+cat > conf/distributions <<EOF
+Codename: 1234
+Components: a bb
+UDebComponents: a
+Architectures: x yyyyyyyyyy source
+Update: flattest
+EOF
+cat > conf/updates.base <<EOF
+Name: flattest
+GetInRelease: no
+Flat: a
+VerifyRelease: blindtrust
+Method: file:$WORKDIR
+Suite: flatsource
+EOF
+
+testrun - -b . export 1234 3<<EOF
+stderr
+stdout
+$(odb)
+-v1*=Exporting 1234...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/1234"
+-v2*=Created directory "./dists/1234/a"
+-v2*=Created directory "./dists/1234/a/binary-x"
+-v6*= exporting '1234|a|x'...
+-v6*= creating './dists/1234/a/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/1234/a/binary-yyyyyyyyyy"
+-v6*= exporting '1234|a|yyyyyyyyyy'...
+-v6*= creating './dists/1234/a/binary-yyyyyyyyyy/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/1234/a/debian-installer"
+-v2*=Created directory "./dists/1234/a/debian-installer/binary-x"
+-v6*= exporting 'u|1234|a|x'...
+-v6*= creating './dists/1234/a/debian-installer/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/1234/a/debian-installer/binary-yyyyyyyyyy"
+-v6*= exporting 'u|1234|a|yyyyyyyyyy'...
+-v6*= creating './dists/1234/a/debian-installer/binary-yyyyyyyyyy/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/1234/a/source"
+-v6*= exporting '1234|a|source'...
+-v6*= creating './dists/1234/a/source/Sources' (gzipped)
+-v2*=Created directory "./dists/1234/bb"
+-v2*=Created directory "./dists/1234/bb/binary-x"
+-v6*= exporting '1234|bb|x'...
+-v6*= creating './dists/1234/bb/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/1234/bb/binary-yyyyyyyyyy"
+-v6*= exporting '1234|bb|yyyyyyyyyy'...
+-v6*= creating './dists/1234/bb/binary-yyyyyyyyyy/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/1234/bb/source"
+-v6*= exporting '1234|bb|source'...
+-v6*= creating './dists/1234/bb/source/Sources' (gzipped)
+EOF
+
+mkdir lists
+
+cp conf/updates.base conf/updates
+cat >>conf/updates <<EOF
+Components: a
+EOF
+
+testrun - -b . update 1234 3<<EOF
+returns 255
+stderr
+*=./conf/updates:1 to 8: Update pattern may not contain Components and Flat fields ad the same time.
+-v0*=There have been errors!
+stdout
+EOF
+
+cp conf/updates.base conf/updates
+cat >>conf/updates <<EOF
+UDebComponents: a
+EOF
+
+testrun - -b . update 1234 3<<EOF
+returns 255
+stderr
+*=./conf/updates:1 to 8: Update pattern may not contain UDebComponents and Flat fields ad the same time.
+-v0*=There have been errors!
+stdout
+EOF
+
+mv conf/updates.base conf/updates
+
+testrun - -b . update 1234 3<<EOF
+returns 255
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+*=aptmethod error receiving 'file:$WORKDIR/flatsource/Release':
+='File not found'
+='File not found - $WORKDIR/flatsource/Release (2: No such file or directory)'
+-v0*=There have been errors!
+stdout
+EOF
+
+mkdir flatsource
+touch flatsource/Release
+
+testrun - -b . update 1234 3<<EOF
+returns 255
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+*=Missing checksums in Release file './lists/flattest_flatsource_flat_Release'!
+-v0*=There have been errors!
+stdout
+EOF
+
+echo "MD5Sum:" > flatsource/Release
+
+testrun - -b . update 1234 3<<EOF
+returns 254
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+*=Could not find 'Packages' within './lists/flattest_flatsource_flat_Release'
+-v0*=There have been errors!
+stdout
+EOF
+
+echo " trash" >> flatsource/Release
+
+testrun - -b . update 1234 3<<EOF
+returns 255
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+*=Error parsing md5 checksum line ' trash' within './lists/flattest_flatsource_flat_Release'
+-v0*=There have been errors!
+stdout
+EOF
+
+gzip -c < /dev/null > flatsource/Sources.gz
+gzip -c < /dev/null > flatsource/Packages.gz
+cat > flatsource/Release <<EOF
+MD5Sum:
+ $EMPTYMD5 Sources
+ $(mdandsize flatsource/Sources.gz) Sources.gz
+ $(mdandsize flatsource/Packages.gz) Packages.gz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Sources.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Sources.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Sources.gz' into './lists/flattest_flatsource_Sources' using '/bin/gunzip'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Packages.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Packages.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Packages.gz' into './lists/flattest_flatsource_Packages' using '/bin/gunzip'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v3*= processing updates for '1234|a|source'
+-v5*= reading './lists/flattest_flatsource_Sources'
+-v4*= nothing to do for 'u|1234|a|yyyyyyyyyy'
+-v3*= processing updates for '1234|a|yyyyyyyyyy'
+-v5*= reading './lists/flattest_flatsource_Packages'
+-v4*= nothing to do for 'u|1234|a|x'
+-v3*= processing updates for '1234|a|x'
+#-v5*= reading './lists/flattest_flatsource_Packages'
+EOF
+
+cat > flatsource/Packages <<EOF
+
+EOF
+pkgmd="$(mdandsize flatsource/Packages)"
+gzip -f flatsource/Packages
+cat > flatsource/Release <<EOF
+MD5Sum:
+ $EMPTYMD5 Sources
+ $(mdandsize flatsource/Sources.gz) Sources.gz
+ $pkgmd Packages
+ $(mdandsize flatsource/Packages.gz) Packages.gz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Packages.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Packages.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Packages.gz' into './lists/flattest_flatsource_Packages' using '/bin/gunzip'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v0*= nothing new for '1234|a|source' (use --noskipold to process anyway)
+-v4*= nothing to do for 'u|1234|a|yyyyyyyyyy'
+-v3*= processing updates for '1234|a|yyyyyyyyyy'
+-v5*= reading './lists/flattest_flatsource_Packages'
+-v4*= nothing to do for 'u|1234|a|x'
+-v3*= processing updates for '1234|a|x'
+#-v5*= reading './lists/flattest_flatsource_Packages'
+EOF
+
+cat > flatsource/Packages <<EOF
+Package: test
+Architecture: all
+Version: 0
+Filename: flatsource/test.deb
+EOF
+pkgmd="$(mdandsize flatsource/Packages)"
+gzip -f flatsource/Packages
+cat > flatsource/Release <<EOF
+MD5Sum:
+ $EMPTYMD5 Sources
+ $(mdandsize flatsource/Sources.gz) Sources.gz
+ $pkgmd Packages
+ $(mdandsize flatsource/Packages.gz) Packages.gz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Packages.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Packages.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Packages.gz' into './lists/flattest_flatsource_Packages' using '/bin/gunzip'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v0*= nothing new for '1234|a|source' (use --noskipold to process anyway)
+-v4*= nothing to do for 'u|1234|a|yyyyyyyyyy'
+-v3*= processing updates for '1234|a|yyyyyyyyyy'
+-v5*= reading './lists/flattest_flatsource_Packages'
+stderr
+*=Missing 'Size' line in binary control chunk:
+*=No checksums found in binary control chunk:
+*= 'Package: test
+*=Architecture: all
+*=Version: 0
+*=Filename: flatsource/test.deb'
+-v1*=Stop reading further chunks from './lists/flattest_flatsource_Packages' due to previous errors.
+-v0*=There have been errors!
+return 249
+EOF
+
+cat > flatsource/Packages <<EOF
+Package: test
+Architecture: all
+Version: 0
+Filename: flatsource/test.deb
+Size: 0
+Description: test
+ test
+MD5Sum: $EMPTYMD5ONLY
+EOF
+pkgmd="$(mdandsize flatsource/Packages)"
+gzip -f flatsource/Packages
+cat > flatsource/Release <<EOF
+MD5Sum:
+ $EMPTYMD5 Sources
+ $(mdandsize flatsource/Sources.gz) Sources.gz
+ $pkgmd Packages
+ $(mdandsize flatsource/Packages.gz) Packages.gz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Packages.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Packages.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Packages.gz' into './lists/flattest_flatsource_Packages' using '/bin/gunzip'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v0*= nothing new for '1234|a|source' (use --noskipold to process anyway)
+-v4*= nothing to do for 'u|1234|a|yyyyyyyyyy'
+-v3*= processing updates for '1234|a|yyyyyyyyyy'
+-v5*= reading './lists/flattest_flatsource_Packages'
+-v4*= nothing to do for 'u|1234|a|x'
+-v3*= processing updates for '1234|a|x'
+#-v5*= reading './lists/flattest_flatsource_Packages'
+-v2=Created directory "./pool"
+-v2=Created directory "./pool/a"
+-v2=Created directory "./pool/a/t"
+-v2=Created directory "./pool/a/t/test"
+-v0*=Getting packages...
+-v1*=Shutting down aptmethods...
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/test.deb'
+*=aptmethod error receiving 'file:$WORKDIR/flatsource/test.deb':
+='File not found'
+='File not found - $WORKDIR/flatsource/test.deb (2: No such file or directory)'
+-v0*=There have been errors!
+return 255
+EOF
+
+touch flatsource/test.deb
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/test.deb'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/test.deb'
+-v2*=Linking file '$WORKDIR/flatsource/test.deb' to './pool/a/t/test/test_0_all.deb'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v0*= nothing new for '1234|a|source' (use --noskipold to process anyway)
+-v4*= nothing to do for 'u|1234|a|yyyyyyyyyy'
+-v3*= processing updates for '1234|a|yyyyyyyyyy'
+-v5*= reading './lists/flattest_flatsource_Packages'
+-v4*= nothing to do for 'u|1234|a|x'
+-v3*= processing updates for '1234|a|x'
+#-v5*= reading './lists/flattest_flatsource_Packages'
+-v2=Created directory "./pool"
+-v2=Created directory "./pool/a"
+-v2=Created directory "./pool/a/t"
+-v2=Created directory "./pool/a/t/test"
+-v0*=Getting packages...
+$(ofa 'pool/a/t/test/test_0_all.deb')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'test' 0 '1234' 'a' 'yyyyyyyyyy' 'deb')
+$(opa 'test' 0 '1234' 'a' 'x' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in '1234|a|x'...
+-v6*= replacing './dists/1234/a/binary-x/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'u|1234|a|x'...
+-v6*= looking for changes in '1234|a|yyyyyyyyyy'...
+-v6*= replacing './dists/1234/a/binary-yyyyyyyyyy/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'u|1234|a|yyyyyyyyyy'...
+-v6*= looking for changes in '1234|a|source'...
+-v6*= looking for changes in '1234|bb|x'...
+-v6*= looking for changes in '1234|bb|yyyyyyyyyy'...
+-v6*= looking for changes in '1234|bb|source'...
+EOF
+
+cat > flatsource/Packages <<EOF
+Package: test
+Architecture: yyyyyyyyyy
+Version: 1
+Description: test
+ test
+Filename: flatsource/test.deb
+Size: 0
+MD5Sum: $EMPTYMD5ONLY
+EOF
+pkgmd="$(mdandsize flatsource/Packages)"
+gzip -f flatsource/Packages
+cat > flatsource/Release <<EOF
+MD5Sum:
+ $EMPTYMD5 Sources
+ $(mdandsize flatsource/Sources.gz) Sources.gz
+ $pkgmd Packages
+ $(mdandsize flatsource/Packages.gz) Packages.gz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Packages.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Packages.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Packages.gz' into './lists/flattest_flatsource_Packages' using '/bin/gunzip'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/test.deb'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/test.deb'
+-v2*=Linking file '$WORKDIR/flatsource/test.deb' to './pool/a/t/test/test_1_yyyyyyyyyy.deb'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v0*= nothing new for '1234|a|source' (use --noskipold to process anyway)
+-v4*= nothing to do for 'u|1234|a|yyyyyyyyyy'
+-v3*= processing updates for '1234|a|yyyyyyyyyy'
+-v5*= reading './lists/flattest_flatsource_Packages'
+-v4*= nothing to do for 'u|1234|a|x'
+-v3*= processing updates for '1234|a|x'
+-v2=Created directory "./pool"
+-v2=Created directory "./pool/a"
+-v2=Created directory "./pool/a/t"
+-v2=Created directory "./pool/a/t/test"
+-v0*=Getting packages...
+$(ofa 'pool/a/t/test/test_1_yyyyyyyyyy.deb')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opu 'test' unset 1 '1234' 'a' 'yyyyyyyyyy' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in '1234|a|x'...
+-v6*= looking for changes in 'u|1234|a|x'...
+-v6*= looking for changes in '1234|a|yyyyyyyyyy'...
+-v6*= replacing './dists/1234/a/binary-yyyyyyyyyy/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'u|1234|a|yyyyyyyyyy'...
+-v6*= looking for changes in '1234|a|source'...
+-v6*= looking for changes in '1234|bb|x'...
+-v6*= looking for changes in '1234|bb|yyyyyyyyyy'...
+-v6*= looking for changes in '1234|bb|source'...
+EOF
+
+touch fake.dsc
+
+cat > flatsource/Sources <<EOF
+Package: test
+Version: 0
+Description: test
+ test
+Files:
+ $EMPTYMD5 fake.dsc
+EOF
+srcmd="$(mdandsize flatsource/Sources)"
+gzip -f flatsource/Sources
+cat > flatsource/Release <<EOF
+MD5Sum:
+ $srcmd Sources
+ $(mdandsize flatsource/Sources.gz) Sources.gz
+ $pkgmd Packages
+ $(mdandsize flatsource/Packages.gz) Packages.gz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Sources.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Sources.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Sources.gz' into './lists/flattest_flatsource_Sources' using '/bin/gunzip'...
+-v6=aptmethod start 'file:$WORKDIR/./fake.dsc'
+-v1*=aptmethod got 'file:$WORKDIR/./fake.dsc'
+-v2*=Linking file '$WORKDIR/./fake.dsc' to './pool/a/t/test/fake.dsc'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v3*= processing updates for '1234|a|source'
+-v0*= nothing new for '1234|a|yyyyyyyyyy' (use --noskipold to process anyway)
+-v4*= nothing to do for 'u|1234|a|yyyyyyyyyy'
+-v5*= reading './lists/flattest_flatsource_Sources'
+-v4*= nothing to do for 'u|1234|a|x'
+-v3*= nothing new for '1234|a|x' (use --noskipold to process anyway)
+-v0*=Getting packages...
+$(ofa 'pool/a/t/test/fake.dsc')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'test' unset '1234' 'a' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in '1234|a|x'...
+-v6*= looking for changes in 'u|1234|a|x'...
+-v6*= looking for changes in '1234|a|yyyyyyyyyy'...
+-v6*= looking for changes in 'u|1234|a|yyyyyyyyyy'...
+-v6*= looking for changes in '1234|a|source'...
+-v6*= replacing './dists/1234/a/source/Sources' (gzipped)
+-v6*= looking for changes in '1234|bb|x'...
+-v6*= looking for changes in '1234|bb|yyyyyyyyyy'...
+-v6*= looking for changes in '1234|bb|source'...
+EOF
+
+cat > flatsource/Sources <<EOF
+Package: test
+Version: 1
+Files:
+ $EMPTYMD5 ../fake.dsc
+EOF
+srcmd="$(mdandsize flatsource/Sources)"
+gzip -f flatsource/Sources
+cat > flatsource/Release <<EOF
+MD5Sum:
+ $srcmd Sources
+ $(mdandsize flatsource/Sources.gz) Sources.gz
+ $pkgmd Packages
+ $(mdandsize flatsource/Packages.gz) Packages.gz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Release'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Release'
+-v2*=Copy file '$WORKDIR/flatsource/Release' to './lists/flattest_flatsource_flat_Release'...
+-v6=aptmethod start 'file:$WORKDIR/flatsource/Sources.gz'
+-v1*=aptmethod got 'file:$WORKDIR/flatsource/Sources.gz'
+-v2*=Uncompress '$WORKDIR/flatsource/Sources.gz' into './lists/flattest_flatsource_Sources' using '/bin/gunzip'...
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for '1234|bb|source'
+-v4*= nothing to do for '1234|bb|yyyyyyyyyy'
+-v4*= nothing to do for '1234|bb|x'
+-v3*= processing updates for '1234|a|source'
+-v5*= reading './lists/flattest_flatsource_Sources'
+stderr
+*=Character '/' not allowed within filename '../fake.dsc'!
+*=Forbidden characters in source package 'test'!
+*=Stop reading further chunks from './lists/flattest_flatsource_Sources' due to previous errors.
+stdout
+stderr
+-v0*=There have been errors!
+return 255
+EOF
+
+rm -r -f db conf dists pool lists flatsource fake.dsc
+testsuccess
diff --git a/tests/flood.test b/tests/flood.test
new file mode 100644
index 0000000..1541d23
--- /dev/null
+++ b/tests/flood.test
@@ -0,0 +1,744 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir test-1
+mkdir test-1/debian
+cat >test-1/debian/control <<END
+Source: test
+Section: interpreters
+Priority: required
+Maintainer: me <guess@who>
+Standards-Version: 0.0
+
+Package: sibling
+Architecture: any
+Description: bla
+ blub
+
+Package: siblingtoo
+Architecture: any
+Description: bla
+ blub
+
+Package: mytest
+Architecture: all
+Description: bla
+ blub
+END
+cat >test-1/debian/changelog <<END
+test (1-1) test; urgency=critical
+
+ * new upstream release (Closes: #allofthem)
+
+ -- me <guess@who> Mon, 01 Jan 1980 01:02:02 +0000
+END
+mkdir -p test-1/debian/tmp/DEBIAN
+touch test-1/debian/tmp/best-file-in-the-root
+cd test-1
+DEB_HOST_ARCH="another" dpkg-gencontrol -psibling -v2
+DEB_HOST_ARCH="another" dpkg --build debian/tmp ..
+DEB_HOST_ARCH="another" dpkg-gencontrol -psiblingtoo -v3
+DEB_HOST_ARCH="another" dpkg --build debian/tmp ..
+DEB_HOST_ARCH="another" dpkg-gencontrol -pmytest -v2
+DEB_HOST_ARCH="another" dpkg --build debian/tmp ..
+DEB_HOST_ARCH="another" dpkg-genchanges -b > ../test-1.changes
+DEB_HOST_ARCH="somemore" dpkg-gencontrol -psiblingtoo -v3
+DEB_HOST_ARCH="somemore" dpkg --build debian/tmp ..
+cd ..
+rm -r test-1
+mkdir test-2
+mkdir test-2/debian
+cat >test-2/debian/control <<END
+Source: test
+Section: interpreters
+Priority: required
+Maintainer: me <guess@who>
+Standards-Version: 0.0
+
+Package: sibling
+Architecture: any
+Description: bla
+ blub
+
+Package: siblingalso
+Architecture: any
+Description: bla
+ blub
+
+Package: mytest
+Architecture: all
+Description: bla
+ blub
+END
+cat >test-2/debian/changelog <<END
+test (2-1) test; urgency=critical
+
+ * bla bla bla (Closes: #allofthem)
+
+ -- me <guess@who> Mon, 01 Jan 1980 01:02:02 +0000
+test (1-1) test; urgency=critical
+
+ * new upstream release (Closes: #allofthem)
+
+ -- me <guess@who> Mon, 01 Jan 1980 01:02:02 +0000
+END
+mkdir -p test-2/debian/tmp/DEBIAN
+touch test-2/debian/tmp/best-file-in-the-root
+cd test-2
+dpkg-gencontrol -psiblingalso -v3.1
+dpkg --build debian/tmp ..
+dpkg-gencontrol -pmytest -v2.4
+dpkg --build debian/tmp ..
+dpkg-gencontrol -psibling -v2.2
+dpkg --build debian/tmp ..
+dpkg-genchanges -b > ../test-2.changes
+rm debian/files
+DEB_HOST_ARCH="another" dpkg-gencontrol -psibling -v2.2
+DEB_HOST_ARCH="another" dpkg --build debian/tmp ..
+dpkg-genchanges -b > ../test-2a.changes
+cd ..
+rm -r test-2
+
+for tracking in false true ; do
+
+mkdir conf
+cat > conf/distributions <<EOF
+Codename: two
+Components: main bad
+Architectures: source abacus another somemore
+EOF
+
+echo "with tracking is $tracking"
+if $tracking ; then
+ echo "Tracking: minimal" >> conf/distributions
+ if test x"${REPREPROOPTIONS#*--verbosedb}" != x"$REPREPROOPTIONS" ; then
+ TRACKINGTESTOPTIONS="-D t=1"
+ else
+ TRACKINGTESTOPTIONS="-D t=0"
+ fi
+else
+ TRACKINGTESTOPTIONS="-D t=0"
+fi
+
+cat >> conf/distributions <<EOF
+
+Codename: test
+Components: main bad
+Architectures: source abacus another somemore
+EOF
+
+testrun - -b . -A another include test test-1.changes 3<<EOF
+stderr
+-v3*=Limiting 'mytest_2_all.deb' to architectures another as requested.
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/t"
+-v2*=Created directory "./pool/main/t/test"
+$(ofa 'pool/main/t/test/mytest_2_all.deb')
+$(ofa 'pool/main/t/test/siblingtoo_3_another.deb')
+$(ofa 'pool/main/t/test/sibling_2_another.deb')
+$(opa 'mytest' 2 'test' 'main' 'another' 'deb')
+$(opa 'siblingtoo' 3 'test' 'main' 'another' 'deb')
+$(opa 'sibling' 2 'test' 'main' 'another' 'deb')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-abacus"
+-v2*=Created directory "./dists/test/main/binary-another"
+-v2*=Created directory "./dists/test/main/binary-somemore"
+-v2*=Created directory "./dists/test/main/source"
+-v2*=Created directory "./dists/test/bad"
+-v2*=Created directory "./dists/test/bad/binary-abacus"
+-v2*=Created directory "./dists/test/bad/binary-another"
+-v2*=Created directory "./dists/test/bad/binary-somemore"
+-v2*=Created directory "./dists/test/bad/source"
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= creating './dists/test/main/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|another'...
+-v6*= creating './dists/test/main/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= creating './dists/test/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= creating './dists/test/bad/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= creating './dists/test/bad/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= creating './dists/test/bad/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|bad|source'...
+-v6*= creating './dists/test/bad/source/Sources' (gzipped)
+EOF
+
+testrun - -b . -A "abacus" include test test-2.changes 3<<EOF
+stderr
+-v3*=Limiting 'mytest_2.4_all.deb' to architectures abacus as requested.
+stdout
+$(ofa 'pool/main/t/test/mytest_2.4_all.deb')
+$(ofa 'pool/main/t/test/siblingalso_3.1_abacus.deb')
+$(ofa 'pool/main/t/test/sibling_2.2_abacus.deb')
+$(opa 'mytest' x 'test' 'main' 'abacus' 'deb')
+$(opa 'siblingalso' x 'test' 'main' 'abacus' 'deb')
+$(opa 'sibling' x 'test' 'main' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= replacing './dists/test/main/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|another'...
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= looking for changes in 'test|main|source'...
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= looking for changes in 'test|bad|source'...
+EOF
+
+if $tracking ; then
+echo "Tracking: minimal" >> conf/distributions
+testrun - -b . retrack test 3<<EOF
+stdout
+*=Retracking test...
+#2 times:
+$(ota 'test' 'test')
+EOF
+fi
+
+testrun - -b . list test 3<<EOF
+stdout
+*=test|main|abacus: mytest 2.4
+*=test|main|abacus: sibling 2.2
+*=test|main|abacus: siblingalso 3.1
+*=test|main|another: mytest 2
+*=test|main|another: sibling 2
+*=test|main|another: siblingtoo 3
+EOF
+
+testrun - -b . flood test 3<<EOF
+stdout
+$(opa 'mytest' x 'test' 'main' 'somemore' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= looking for changes in 'test|main|another'...
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= replacing './dists/test/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|source'...
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= looking for changes in 'test|bad|source'...
+EOF
+
+testrun - -b . list test 3<<EOF
+stdout
+*=test|main|abacus: mytest 2.4
+*=test|main|abacus: sibling 2.2
+*=test|main|abacus: siblingalso 3.1
+*=test|main|another: mytest 2
+*=test|main|another: sibling 2
+*=test|main|another: siblingtoo 3
+*=test|main|somemore: mytest 2.4
+EOF
+
+testrun - -b . -C main -A somemore includedeb test siblingtoo_3_somemore.deb 3<<EOF
+stdout
+$(ofa 'pool/main/t/test/siblingtoo_3_somemore.deb')
+$(opa 'siblingtoo' x 'test' 'main' 'somemore' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= looking for changes in 'test|main|another'...
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= replacing './dists/test/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|source'...
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= looking for changes in 'test|bad|source'...
+EOF
+
+testrun empty -b . flood test
+
+testrun - -b . -A somemore remove test mytest 3<<EOF
+stdout
+$(opd 'mytest' unset test main somemore deb)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= looking for changes in 'test|main|another'...
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= replacing './dists/test/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|source'...
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= looking for changes in 'test|bad|source'...
+EOF
+
+testrun - -b . list test 3<<EOF
+stdout
+*=test|main|abacus: mytest 2.4
+*=test|main|abacus: sibling 2.2
+*=test|main|abacus: siblingalso 3.1
+*=test|main|another: mytest 2
+*=test|main|another: sibling 2
+*=test|main|another: siblingtoo 3
+*=test|main|somemore: siblingtoo 3
+EOF
+
+testrun - -b . flood test 3<<EOF
+stdout
+$(opa 'mytest' x 'test' 'main' 'somemore' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= looking for changes in 'test|main|another'...
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= replacing './dists/test/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|source'...
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= looking for changes in 'test|bad|source'...
+EOF
+
+testrun - -b . list test 3<<EOF
+stdout
+*=test|main|abacus: mytest 2.4
+*=test|main|abacus: sibling 2.2
+*=test|main|abacus: siblingalso 3.1
+*=test|main|another: mytest 2
+*=test|main|another: sibling 2
+*=test|main|another: siblingtoo 3
+*=test|main|somemore: siblingtoo 3
+*=test|main|somemore: mytest 2
+EOF
+
+testrun - -b . -C main includedeb test sibling_2.2_another.deb 3<<EOF
+stdout
+$(ofa 'pool/main/t/test/sibling_2.2_another.deb')
+$(opu 'sibling' x x 'test' 'main' 'another' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= looking for changes in 'test|main|another'...
+-v6*= replacing './dists/test/main/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= looking for changes in 'test|main|source'...
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= looking for changes in 'test|bad|source'...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/t/test/sibling_2_another.deb')
+EOF
+
+if $tracking ; then
+testout - -b . dumptracks test 3<<EOF
+EOF
+cat > results.expected <<EOF
+Distribution: test
+Source: test
+Version: 1-1
+Files:
+ pool/main/t/test/mytest_2_all.deb a 2
+ pool/main/t/test/siblingtoo_3_another.deb b 1
+ pool/main/t/test/siblingtoo_3_somemore.deb b 1
+
+Distribution: test
+Source: test
+Version: 2-1
+Files:
+ pool/main/t/test/mytest_2.4_all.deb a 1
+ pool/main/t/test/sibling_2.2_abacus.deb b 1
+ pool/main/t/test/siblingalso_3.1_abacus.deb b 1
+ pool/main/t/test/sibling_2.2_another.deb b 1
+
+EOF
+dodiff results.expected results
+fi
+
+testrun - -b . flood test 3<<EOF
+stdout
+$(opu 'mytest' x x 'test' 'main' 'another' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|abacus'...
+-v6*= looking for changes in 'test|main|another'...
+-v6*= replacing './dists/test/main/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|somemore'...
+-v6*= looking for changes in 'test|main|source'...
+-v6*= looking for changes in 'test|bad|abacus'...
+-v6*= looking for changes in 'test|bad|another'...
+-v6*= looking for changes in 'test|bad|somemore'...
+-v6*= looking for changes in 'test|bad|source'...
+EOF
+
+testrun - -b . list test 3<<EOF
+stdout
+*=test|main|abacus: mytest 2.4
+*=test|main|abacus: sibling 2.2
+*=test|main|abacus: siblingalso 3.1
+*=test|main|another: mytest 2.4
+*=test|main|another: sibling 2.2
+*=test|main|another: siblingtoo 3
+*=test|main|somemore: siblingtoo 3
+*=test|main|somemore: mytest 2
+EOF
+
+if $tracking ; then
+testout - -b . dumptracks test 3<<EOF
+EOF
+cat > results.expected <<EOF
+Distribution: test
+Source: test
+Version: 1-1
+Files:
+ pool/main/t/test/mytest_2_all.deb a 1
+ pool/main/t/test/siblingtoo_3_another.deb b 1
+ pool/main/t/test/siblingtoo_3_somemore.deb b 1
+
+Distribution: test
+Source: test
+Version: 2-1
+Files:
+ pool/main/t/test/mytest_2.4_all.deb a 2
+ pool/main/t/test/sibling_2.2_abacus.deb b 1
+ pool/main/t/test/siblingalso_3.1_abacus.deb b 1
+ pool/main/t/test/sibling_2.2_another.deb b 1
+
+EOF
+dodiff results.expected results
+fi
+
+cat > conf/incoming << EOF
+Name: myrule
+Allow: test>two
+Options: limit_arch_all
+IncomingDir: i
+TempDir: tmp
+EOF
+
+ls *.changes
+mkdir i tmp
+cp test-1.changes sibling_2_another.deb siblingtoo_3_another.deb mytest_2_all.deb i/
+
+testrun - -b . processincoming myrule 3<<EOF
+stdout
+$(ofa 'pool/main/t/test/sibling_2_another.deb')
+$(opa 'mytest' x 'two' 'main' 'another' 'deb')
+$(opa 'siblingtoo' x 'two' 'main' 'another' 'deb')
+$(opa 'sibling' x 'two' 'main' 'another' 'deb')
+$(otta 'two' 'test')
+-v1*=deleting './i/mytest_2_all.deb'...
+-v1*=deleting './i/siblingtoo_3_another.deb'...
+-v1*=deleting './i/test-1.changes'...
+-v1*=deleting './i/sibling_2_another.deb'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/two"
+-v2*=Created directory "./dists/two/main"
+-v2*=Created directory "./dists/two/main/binary-abacus"
+-v2*=Created directory "./dists/two/main/binary-another"
+-v2*=Created directory "./dists/two/main/binary-somemore"
+-v2*=Created directory "./dists/two/main/source"
+-v2*=Created directory "./dists/two/bad"
+-v2*=Created directory "./dists/two/bad/binary-abacus"
+-v2*=Created directory "./dists/two/bad/binary-another"
+-v2*=Created directory "./dists/two/bad/binary-somemore"
+-v2*=Created directory "./dists/two/bad/source"
+-v6*= looking for changes in 'two|main|abacus'...
+-v6*= creating './dists/two/main/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|another'...
+-v6*= creating './dists/two/main/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|somemore'...
+-v6*= creating './dists/two/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|source'...
+-v6*= creating './dists/two/main/source/Sources' (gzipped)
+-v6*= looking for changes in 'two|bad|abacus'...
+-v6*= creating './dists/two/bad/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|bad|another'...
+-v6*= creating './dists/two/bad/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|bad|somemore'...
+-v6*= creating './dists/two/bad/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|bad|source'...
+-v6*= creating './dists/two/bad/source/Sources' (gzipped)
+EOF
+
+testrun - -b . list two 3<<EOF
+stdout
+*=two|main|another: mytest 2
+*=two|main|another: sibling 2
+*=two|main|another: siblingtoo 3
+EOF
+
+if $tracking ; then
+testrun - -b . dumptracks two 3<<EOF
+stdout
+*=Distribution: two
+*=Source: test
+*=Version: 1-1
+*=Files:
+*= pool/main/t/test/sibling_2_another.deb b 1
+*= pool/main/t/test/siblingtoo_3_another.deb b 1
+*= pool/main/t/test/mytest_2_all.deb a 1
+*=
+EOF
+fi
+
+testrun - -b . flood two 3<<EOF
+stdout
+$(opa 'mytest' x 'two' 'main' 'somemore' 'deb')
+$(opa 'mytest' x 'two' 'main' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'two|main|abacus'...
+-v6*= replacing './dists/two/main/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|another'...
+-v6*= looking for changes in 'two|main|somemore'...
+-v6*= replacing './dists/two/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|source'...
+-v6*= looking for changes in 'two|bad|abacus'...
+-v6*= looking for changes in 'two|bad|another'...
+-v6*= looking for changes in 'two|bad|somemore'...
+-v6*= looking for changes in 'two|bad|source'...
+EOF
+
+testrun - -b . list two 3<<EOF
+stdout
+*=two|main|abacus: mytest 2
+*=two|main|another: mytest 2
+*=two|main|another: sibling 2
+*=two|main|another: siblingtoo 3
+*=two|main|somemore: mytest 2
+EOF
+
+if $tracking ; then
+testrun - -b . dumptracks two 3<<EOF
+stdout
+*=Distribution: two
+*=Source: test
+*=Version: 1-1
+*=Files:
+*= pool/main/t/test/sibling_2_another.deb b 1
+*= pool/main/t/test/siblingtoo_3_another.deb b 1
+*= pool/main/t/test/mytest_2_all.deb a 3
+*=
+EOF
+fi
+
+dodo rmdir i
+mkdir i
+
+cp test-2.changes siblingalso_3.1_abacus.deb mytest_2.4_all.deb sibling_2.2_abacus.deb i/
+
+testrun - -b . processincoming myrule 3<<EOF
+stdout
+$(opu 'mytest' x x 'two' 'main' 'abacus' 'deb')
+$(opa 'siblingalso' x 'two' 'main' 'abacus' 'deb')
+$(opa 'sibling' x 'two' 'main' 'abacus' 'deb')
+$(otta 'two' 'test')
+-v1*=deleting './i/mytest_2.4_all.deb'...
+-v1*=deleting './i/siblingalso_3.1_abacus.deb'...
+-v1*=deleting './i/sibling_2.2_abacus.deb'...
+-v1*=deleting './i/test-2.changes'...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'two|main|abacus'...
+-v6*= replacing './dists/two/main/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|another'...
+-v6*= looking for changes in 'two|main|somemore'...
+-v6*= looking for changes in 'two|main|source'...
+-v6*= looking for changes in 'two|bad|abacus'...
+-v6*= looking for changes in 'two|bad|another'...
+-v6*= looking for changes in 'two|bad|somemore'...
+-v6*= looking for changes in 'two|bad|source'...
+EOF
+
+testrun - -b . list two 3<<EOF
+stdout
+*=two|main|abacus: mytest 2.4
+*=two|main|abacus: sibling 2.2
+*=two|main|abacus: siblingalso 3.1
+*=two|main|another: mytest 2
+*=two|main|another: sibling 2
+*=two|main|another: siblingtoo 3
+*=two|main|somemore: mytest 2
+EOF
+
+if $tracking ; then
+testout "" -b . dumptracks two
+cat > results.expected <<EOF
+Distribution: two
+Source: test
+Version: 1-1
+Files:
+ pool/main/t/test/mytest_2_all.deb a 2
+ pool/main/t/test/sibling_2_another.deb b 1
+ pool/main/t/test/siblingtoo_3_another.deb b 1
+
+Distribution: two
+Source: test
+Version: 2-1
+Files:
+ pool/main/t/test/mytest_2.4_all.deb a 1
+ pool/main/t/test/sibling_2.2_abacus.deb b 1
+ pool/main/t/test/siblingalso_3.1_abacus.deb b 1
+
+EOF
+dodiff results.expected results
+fi
+
+testrun - -b . flood two 3<<EOF
+stdout
+$(opu 'mytest' x x 'two' 'main' 'somemore' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'two|main|abacus'...
+-v6*= looking for changes in 'two|main|another'...
+-v6*= looking for changes in 'two|main|somemore'...
+-v6*= replacing './dists/two/main/binary-somemore/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|source'...
+-v6*= looking for changes in 'two|bad|abacus'...
+-v6*= looking for changes in 'two|bad|another'...
+-v6*= looking for changes in 'two|bad|somemore'...
+-v6*= looking for changes in 'two|bad|source'...
+EOF
+
+testrun - -b . list two 3<<EOF
+stdout
+*=two|main|abacus: mytest 2.4
+*=two|main|abacus: sibling 2.2
+*=two|main|abacus: siblingalso 3.1
+*=two|main|another: mytest 2
+*=two|main|another: sibling 2
+*=two|main|another: siblingtoo 3
+*=two|main|somemore: mytest 2.4
+EOF
+
+if $tracking ; then
+testout "" -b . dumptracks two
+cat > results.expected <<EOF
+Distribution: two
+Source: test
+Version: 1-1
+Files:
+ pool/main/t/test/mytest_2_all.deb a 1
+ pool/main/t/test/sibling_2_another.deb b 1
+ pool/main/t/test/siblingtoo_3_another.deb b 1
+
+Distribution: two
+Source: test
+Version: 2-1
+Files:
+ pool/main/t/test/mytest_2.4_all.deb a 2
+ pool/main/t/test/sibling_2.2_abacus.deb b 1
+ pool/main/t/test/siblingalso_3.1_abacus.deb b 1
+
+EOF
+dodiff results.expected results
+fi
+
+dodo rmdir i
+mkdir i
+cp test-2a.changes sibling_2.2_another.deb i/
+
+testrun - -b . processincoming myrule 3<<EOF
+stdout
+$(opu 'sibling' x x 'two' 'main' 'another' 'deb')
+-v1*=deleting './i/sibling_2.2_another.deb'...
+-v1*=deleting './i/test-2a.changes'...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'two|main|abacus'...
+-v6*= looking for changes in 'two|main|another'...
+-v6*= replacing './dists/two/main/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|somemore'...
+-v6*= looking for changes in 'two|main|source'...
+-v6*= looking for changes in 'two|bad|abacus'...
+-v6*= looking for changes in 'two|bad|another'...
+-v6*= looking for changes in 'two|bad|somemore'...
+-v6*= looking for changes in 'two|bad|source'...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/t/test/sibling_2_another.deb')
+EOF
+
+testrun - -b . list two 3<<EOF
+stdout
+*=two|main|abacus: mytest 2.4
+*=two|main|abacus: sibling 2.2
+*=two|main|abacus: siblingalso 3.1
+*=two|main|another: mytest 2
+*=two|main|another: sibling 2.2
+*=two|main|another: siblingtoo 3
+*=two|main|somemore: mytest 2.4
+EOF
+
+if $tracking ; then
+testout "" -b . dumptracks two
+cat > results.expected <<EOF
+Distribution: two
+Source: test
+Version: 1-1
+Files:
+ pool/main/t/test/mytest_2_all.deb a 1
+ pool/main/t/test/siblingtoo_3_another.deb b 1
+
+Distribution: two
+Source: test
+Version: 2-1
+Files:
+ pool/main/t/test/mytest_2.4_all.deb a 2
+ pool/main/t/test/sibling_2.2_abacus.deb b 1
+ pool/main/t/test/siblingalso_3.1_abacus.deb b 1
+ pool/main/t/test/sibling_2.2_another.deb b 1
+
+EOF
+dodiff results.expected results
+fi
+
+testrun - -b . flood two 3<<EOF
+stdout
+$(opu 'mytest' x x 'two' 'main' 'another' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'two|main|abacus'...
+-v6*= looking for changes in 'two|main|another'...
+-v6*= replacing './dists/two/main/binary-another/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'two|main|somemore'...
+-v6*= looking for changes in 'two|main|source'...
+-v6*= looking for changes in 'two|bad|abacus'...
+-v6*= looking for changes in 'two|bad|another'...
+-v6*= looking for changes in 'two|bad|somemore'...
+-v6*= looking for changes in 'two|bad|source'...
+EOF
+
+testrun - -b . list two 3<<EOF
+stdout
+*=two|main|abacus: mytest 2.4
+*=two|main|abacus: sibling 2.2
+*=two|main|abacus: siblingalso 3.1
+*=two|main|another: mytest 2.4
+*=two|main|another: sibling 2.2
+*=two|main|another: siblingtoo 3
+*=two|main|somemore: mytest 2.4
+EOF
+
+if $tracking ; then
+testout "" -b . dumptracks two
+cat > results.expected <<EOF
+Distribution: two
+Source: test
+Version: 1-1
+Files:
+ pool/main/t/test/siblingtoo_3_another.deb b 1
+
+Distribution: two
+Source: test
+Version: 2-1
+Files:
+ pool/main/t/test/mytest_2.4_all.deb a 3
+ pool/main/t/test/sibling_2.2_abacus.deb b 1
+ pool/main/t/test/siblingalso_3.1_abacus.deb b 1
+ pool/main/t/test/sibling_2.2_another.deb b 1
+
+EOF
+dodiff results.expected results
+fi
+
+rm -r conf dists pool db
+dodo rmdir i tmp
+done
+
+rm *.deb *.changes results results.expected
+testsuccess
diff --git a/tests/genpackage.sh b/tests/genpackage.sh
new file mode 100755
index 0000000..f228918
--- /dev/null
+++ b/tests/genpackage.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+set -e
+#PACKAGE=bloat+-0a9z.app
+#EPOCH=99:
+#VERSION=0.9-A:Z+a:z
+#REVISION=-0+aA.9zZ
+if [ "x$OUTPUT" == "x" ] ; then
+ OUTPUT=${PACKAGE}_${VERSION}${REVISION}_${ARCH:-$(dpkg-architecture -qDEB_HOST_ARCH)}.changes
+fi
+
+DIR="$PACKAGE-$VERSION"
+ARCH="${ARCH:-$(dpkg-architecture -qDEB_HOST_ARCH)}"
+rm -rf "$DIR"
+mkdir "$DIR"
+mkdir "$DIR"/debian
+cat >"$DIR"/debian/control <<END
+Source: $PACKAGE
+Section: $SECTION
+Priority: optional
+Maintainer: me <guess@who>
+Standards-Version: 0.0
+
+Package: $PACKAGE
+Architecture: ${ARCH}
+Description: bla
+ blub
+
+Package: ${PACKAGE}-addons
+Architecture: all
+Description: bla
+ blub
+END
+
+if test -n "${DDEB-}" ; then
+cat >>"$DIR"/debian/control <<END
+
+Package: ${PACKAGE}-dbgsym
+Architecture: ${ARCH}
+Description: ${PACKAGE} debug symbols
+Package-Type: ddeb
+END
+fi
+
+if test -z "$DISTRI" ; then
+ DISTRI=test1
+fi
+cat >"$DIR"/debian/changelog <<END
+$PACKAGE ($EPOCH$VERSION$REVISION) $DISTRI; urgency=critical
+
+ * new upstream release (Closes: #allofthem)
+
+ -- me <guess@who> Mon, 01 Jan 1980 01:02:02 +0000
+END
+
+mkdir -p "$DIR/debian/source"
+if test -z "$REVISION"; then
+ echo "3.0 (native)" > "$DIR/debian/source/format"
+else
+ echo "3.0 (quilt)" > "$DIR/debian/source/format"
+ orig_tarball="${PACKAGE}_${VERSION}.orig.tar.gz"
+ if test ! -f "$orig_tarball"; then
+ tar czvf "$orig_tarball" --files-from /dev/null
+ fi
+fi
+
+dpkg-source -b "$DIR" > /dev/null
+mkdir -p "$DIR"/debian/tmp/DEBIAN
+touch "$DIR"/debian/tmp/x
+mkdir "$DIR"/debian/tmp/a
+touch "$DIR"/debian/tmp/a/1
+mkdir "$DIR"/debian/tmp/dir
+touch "$DIR"/debian/tmp/dir/file
+touch "$DIR"/debian/tmp/dir/another
+mkdir "$DIR"/debian/tmp/dir/subdir
+touch "$DIR"/debian/tmp/dir/subdir/file
+cd "$DIR"
+for pkg in `grep '^Package: ' debian/control | sed -e 's/^Package: //'` ; do
+ case "$pkg" in
+ (*-udeb)
+ deb="${pkg}_${VERSION}${REVISION}_${ARCH}.udeb"
+ ;;
+ (*-dbgsym)
+ deb="${pkg}_${VERSION}${REVISION}_${ARCH}.ddeb"
+ ;;
+ (*-addons)
+ deb="${pkg}_${FAKEVER:-${VERSION}${REVISION}}_all.deb"
+ ;;
+ (*)
+ deb="${pkg}_${VERSION}${REVISION}_${ARCH}.deb"
+ ;;
+ esac
+ if [ "x$pkg" != "x${pkg%-addons}" -a -n "$FAKEVER" ] ; then
+ dpkg-gencontrol -p$pkg -v"$FAKEVER"
+ else
+ dpkg-gencontrol -p$pkg
+ fi
+ dpkg --build debian/tmp ../$deb > /dev/null
+done
+dpkg-genchanges -q "$@" > "$OUTPUT".pre
+# simulate dpkg-genchanges behaviour currently in sid so the testsuite runs for backports, too
+awk 'BEGIN{inheader=0} /^Files:/ || (inheader && /^ /) {inheader = 1; next} {inheader = 0 ; print}' "$OUTPUT".pre | sed -e 's/ \+$//' >../"$OUTPUT"
+echo "Files:" >> ../"$OUTPUT"
+awk 'BEGIN{inheader=0} (inheader && /^ .*\.deb$/) {print ; next} /^Files:/ || (inheader && /^ /) {inheader = 1; next} {inheader = 0 ;next}' "$OUTPUT".pre >>../"$OUTPUT"
+awk 'BEGIN{inheader=0} /^Files:/ || (inheader && /^ .*\.deb$/) {inheader = 1 ; next } (inheader && /^ /) {print ; next} {inheader = 0 ;next}' "$OUTPUT".pre >>../"$OUTPUT"
+cd ..
+rm -r "$DIR"
diff --git a/tests/good.key b/tests/good.key
new file mode 100644
index 0000000..eb79388
--- /dev/null
+++ b/tests/good.key
@@ -0,0 +1,18 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.6 (GNU/Linux)
+
+lQG7BEgsPz8RBADX5e8fzCqw5pxVuzjBbFoK3DqKZ//8wPqoRqu6IvbyqnGI1fCj
+aXgbKPMLBxX4yBSSU47V+o5rQjmAzfQEfDix1NHh1qU3FLqHRLqoop9IQACzgGEp
+qLl4Xyb4SpuMS6zVHiWT5mktlmIByjK3ME2Tog41nT5mvvijHdPOoDZ2PwCg4jSe
+s3vhqQJm1VCFFMOMbnsqkWkEAIMQ/VYH5EawKhCfGDAbPgwbcZ5PAnyi5kF81KcK
+l0T6BxLOZml74Ky7PuKbPgxx0b0MPpBGwjCj2ZiL80phWh/3JWHvCbZ6q9xb6b7Z
+rN5GuX0lwdlkUFFxxq1JIHhAzpTxm/yIfBs4xJtsNqI1fBT6VrfbydUGJRWIVXAG
+0i9xBACORO/dH1eMYxRZTA2029QWwOOkkby1jwVm2AXfw4ZBKtSQKODZWO/OkgC1
++bUn9lsrMvWrcyOqdOBd45iVTezOOlZse4V2VmGbAr2sTSm03f7AMiFvCmsw3uBW
+eH3QC5BkzSkBN4AlixPm6ci/q+2BTcMWW8p8nl2UTuZ6idYQBwAAn3QZLp6CH35G
+9sqCMS5t5Gd0m9QaCbO0S1JlcHJlcHJvIFRlc3RzdWl0ZSBLZXkgMSAoRk9SIFVT
+RSBXSVRISU4gVEVTVFNVSVRFIE9OTFkpIDxnb29kQG5vd2hlcmUudGxkPohgBBMR
+AgAgBQJILD8/AhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQgPTEPtw8Kbi7
+GACePQ6nzWAAtx8H8DxbJjijye47SjwAoMDMrZjHw7oXebKyfPolG4YRXy33
+=qUTa
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tests/includeasc.test b/tests/includeasc.test
new file mode 100644
index 0000000..ba46079
--- /dev/null
+++ b/tests/includeasc.test
@@ -0,0 +1,221 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir i j k
+
+# first create some fake package with .asc files:
+mkdir pkg-42
+tar -cvvzf pkg_42.orig.tar.gz pkg-42
+echo "Fake .asc file" > pkg_42.orig.tar.gz.asc
+cd pkg-42
+mkdir debian debian/source
+echo "3.0 (quilt)" > debian/source/format
+cat > debian/rules <<'EOF'
+#!/usr/bin/make
+clean:
+ echo clean
+EOF
+chmod a+x debian/rules
+cat > debian/changelog <<EOF
+pkg (42-42) test; urgency=low
+
+ * now ansers everything
+
+ -- Sky.NET <nowhere@example.com> Sat, 15 Jan 2000 17:12:05 +2700
+EOF
+
+cat > debian/control <<EOF
+Source: pkg
+Section: doc
+Priority: standard
+Maintainer: Sky.NET <nowhere@example.com>
+Standards-Version: Aleph_17
+
+Package: pkg-doc
+Architecture: all
+Description: pkg
+ pkg
+EOF
+
+cd ..
+dpkg-source -Zgzip -b pkg-42
+cd pkg-42
+
+OUTPUT=test.changes
+dpkg-genchanges > ../j/"$OUTPUT"
+cd ..
+cp pkg_* j/
+
+# now with an .asc filename that does not match:
+mv pkg_42.orig.tar.gz.asc pkg_42.tar.gz.asc
+sed -i 's/orig\.tar\.gz\.asc/tar.gz.asc/' pkg_42-42.dsc
+cd pkg-42
+
+dpkg-genchanges > ../k/broken.changes
+cd ..
+mv pkg_* k/
+rm -r pkg-42
+
+ls j
+cp j/* i/
+
+mkdir conf
+# first check files are properly ingored:
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: coal source
+Components: main
+EOF
+
+mkdir -p pool/main/p/pkg
+
+testrun - --export=never include test i/test.changes 3<<EOF
+stderr
+=Warning: database 'test|main|source' was modified but no index file was exported.
+=Changes will only be visible after the next 'export'!
+stdout
+$(odb)
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz.asc')
+$(ofa 'pool/main/p/pkg/pkg_42-42.debian.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42-42.dsc')
+$(opa 'pkg' x 'test' 'main' 'source' 'dsc')
+EOF
+rm -r pool db
+
+mkdir -p pool/main/p/pkg
+
+cat > conf/incoming <<EOF
+Name: foo
+IncomingDir: i
+TempDir: tmp
+Default: test
+EOF
+mkdir tmp
+
+testrun - --export=never processincoming foo 3<<EOF
+stderr
+=Warning: database 'test|main|source' was modified but no index file was exported.
+=Changes will only be visible after the next 'export'!
+stdout
+$(odb)
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz.asc')
+$(ofa 'pool/main/p/pkg/pkg_42-42.debian.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42-42.dsc')
+$(opa 'pkg' x 'test' 'main' 'source' 'dsc')
+-v3*=deleting './i/test.changes'...
+-v3*=deleting './i/pkg_42.orig.tar.gz.asc'...
+-v3*=deleting './i/pkg_42.orig.tar.gz'...
+-v3*=deleting './i/pkg_42-42.dsc'...
+-v3*=deleting './i/pkg_42-42.debian.tar.gz'...
+EOF
+
+cat >> conf/distributions <<EOF
+Tracking: all
+EOF
+rm -r pool db
+cp j/* i/
+mkdir -p pool/main/p/pkg
+
+testrun - --export=never processincoming foo 3<<EOF
+stderr
+=Warning: database 'test|main|source' was modified but no index file was exported.
+=Changes will only be visible after the next 'export'!
+stdout
+$(odb)
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz.asc')
+$(ofa 'pool/main/p/pkg/pkg_42-42.debian.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42-42.dsc')
+$(opa 'pkg' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'pkg')
+-v3*=deleting './i/test.changes'...
+-v3*=deleting './i/pkg_42.orig.tar.gz.asc'...
+-v3*=deleting './i/pkg_42.orig.tar.gz'...
+-v3*=deleting './i/pkg_42-42.dsc'...
+-v3*=deleting './i/pkg_42-42.debian.tar.gz'...
+EOF
+
+cat >results.expected <<EOF
+Distribution: test
+Source: pkg
+Version: 42-42
+Files:
+ pool/main/p/pkg/pkg_42-42.dsc s 1
+ pool/main/p/pkg/pkg_42.orig.tar.gz s 1
+ pool/main/p/pkg/pkg_42.orig.tar.gz.asc s 1
+ pool/main/p/pkg/pkg_42-42.debian.tar.gz s 1
+
+EOF
+
+rm -r pool db
+cp j/* i/
+mkdir -p pool/main/p/pkg
+
+testrun - --export=never -C main includedsc test i/pkg_42-42.dsc 3<<EOF
+stderr
+=Warning: database 'test|main|source' was modified but no index file was exported.
+=Changes will only be visible after the next 'export'!
+stdout
+$(odb)
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz.asc')
+$(ofa 'pool/main/p/pkg/pkg_42-42.debian.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42-42.dsc')
+$(opa 'pkg' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'pkg')
+EOF
+
+testout - dumptracks test 3<<EOF
+EOF
+dodiff results.expected results
+
+rm -r pool db i
+mkdir -p i pool/main/p/pkg
+cp k/* i/
+
+testrun - --export=never include test i/broken.changes 3<<EOF
+returns 255
+stdout
+$(odb)
+stderr
+*=Signature file without file to be signed: 'pkg_42.tar.gz.asc'!
+-v0*=There have been errors!
+EOF
+
+rm -r pool db i
+mkdir -p i pool/main/p/pkg
+cp k/* i/
+
+testrun - --export=never processincoming foo 3<<EOF
+returns 255
+stdout
+$(odb)
+stderr
+*=Signature file without file to be signed: 'pkg_42.tar.gz.asc'!
+-v0*=There have been errors!
+EOF
+
+rm -r pool db i
+mkdir -p i pool/main/p/pkg
+cp k/* i/
+
+# includedsc does not care....
+testrun - --export=never -C main includedsc test i/pkg_42-42.dsc 3<<EOF
+stderr
+=Warning: database 'test|main|source' was modified but no index file was exported.
+=Changes will only be visible after the next 'export'!
+stdout
+$(odb)
+$(ofa 'pool/main/p/pkg/pkg_42.orig.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42.tar.gz.asc')
+$(ofa 'pool/main/p/pkg/pkg_42-42.debian.tar.gz')
+$(ofa 'pool/main/p/pkg/pkg_42-42.dsc')
+$(opa 'pkg' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'pkg')
+EOF
+
+rm -r db pool
+rm -r i j tmp conf results results.expected
+testsuccess
diff --git a/tests/includeextra.test b/tests/includeextra.test
new file mode 100644
index 0000000..33d9cc1
--- /dev/null
+++ b/tests/includeextra.test
@@ -0,0 +1,857 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+# first create a fake package with logs and byhand files:
+mkdir documentation-9876AD
+cd documentation-9876AD
+mkdir debian
+cat > debian/rules <<'EOF'
+#!/usr/bin/make
+tmp = $(CURDIR)/debian/tmp
+binary-indep:
+ install -m 755 -d $(tmp)/DEBIAN $(tmp)/usr/share/doc/documentation
+ echo "I have told you so" > $(tmp)/usr/share/doc/documentation/NEWS
+ gzip -c9 debian/changelog > $(tmp)/usr/share/doc/documentation/changelog.gz
+ chown -R root.root $(tmp) && chmod -R go=rX $(tmp)
+ dpkg-gencontrol -isp
+ dpkg --build $(tmp) ..
+ echo "I forgot" >> ../manifesto.txt
+ echo "What?" >> ../history.txt
+ dpkg-distaddfile manifesto.txt byhand -
+ dpkg-distaddfile history.txt byhand -
+
+.PHONY: clean binary-arch binary-indep binary build build-indep buil-arch
+EOF
+chmod a+x debian/rules
+cat > debian/changelog <<EOF
+documentation (9876AD) test; urgency=low
+
+ * everything fixed
+
+ -- Sky.NET <nowhere@example.com> Sat, 15 Jan 2011 17:12:05 +2700
+EOF
+
+cat > debian/control <<EOF
+Source: documentation
+Section: doc
+Priority: standard
+Maintainer: Sky.NET <nowhere@example.com>
+Standards-Version: Aleph_17
+
+Package: documentation
+Architecture: all
+Description: documentation
+ documentation
+EOF
+
+cd ..
+dpkg-source -b documentation-9876AD ""
+cd documentation-9876AD
+
+fakeroot make -f debian/rules binary-indep > ../documentation_9876AD_coal+all.log
+OUTPUT=test.changes
+dpkg-genchanges > "$OUTPUT".pre
+# simulate dpkg-genchanges behaviour currently in sid so the testsuite runs for backports, too
+awk 'BEGIN{inheader=0} /^Files:/ || (inheader && /^ /) {inheader = 1; next} {inheader = 0 ; print}' "$OUTPUT".pre | sed -e 's/ \+$//' >../"$OUTPUT"
+echo "Files:" >> ../"$OUTPUT"
+awk 'BEGIN{inheader=0} (inheader && /^ .*\.deb$/) {print ; next} /^Files:/ || (inheader && /^ /) {inheader = 1; next} {inheader = 0 ;next}' "$OUTPUT".pre >>../"$OUTPUT"
+awk 'BEGIN{inheader=0} (inheader && /^ .*\.txt$/) {print ; next} /^Files:/ || (inheader && /^ /) {inheader = 1; next} {inheader = 0 ;next}' "$OUTPUT".pre >>../"$OUTPUT"
+awk 'BEGIN{inheader=0} /^Files:/ || (inheader && /^ .*\.(deb|txt)$/) {inheader = 1 ; next } (inheader && /^ /) {print ; next} {inheader = 0 ;next}' "$OUTPUT".pre >>../"$OUTPUT"
+cd ..
+rm -r documentation-9876AD
+
+ed -s test.changes <<EOF
+/^Files:/a
+ $(mdandsize documentation_9876AD_coal+all.log) - - documentation_9876AD_coal+all.log
+.
+w
+q
+EOF
+
+mkdir conf
+# first check files are properly ingored:
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: coal source
+Components: main
+EOF
+
+testrun - include test test.changes 3<<EOF
+stderr
+*=Ignoring byhand file: 'manifesto.txt'!
+*=Ignoring byhand file: 'history.txt'!
+*=Ignoring log file: 'documentation_9876AD_coal+all.log'!
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+EOF
+rm -r pool dists db
+
+# now include the byhand file:
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: coal source
+Components: main
+Tracking: minimal includebyhand
+EOF
+
+testrun - include test test.changes 3<<EOF
+stderr
+*=Ignoring log file: 'documentation_9876AD_coal+all.log'!
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+-v2*=Created directory "./pool/main/d/documentation/documentation_9876AD_byhand"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+EOF
+cat >results.expected <<EOF
+Distribution: test
+Source: documentation
+Version: 9876AD
+Files:
+ pool/main/d/documentation/documentation_9876AD.dsc s 1
+ pool/main/d/documentation/documentation_9876AD.tar.gz s 1
+ pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt x 0
+ pool/main/d/documentation/documentation_9876AD_byhand/history.txt x 0
+ pool/main/d/documentation/documentation_9876AD_all.deb a 1
+
+EOF
+testout - dumptracks test 3<<EOF
+EOF
+dodiff results.expected results
+testrun - retrack 3<<EOF
+stdout
+-v1*=Retracking test...
+EOF
+testout - dumptracks test 3<<EOF
+EOF
+dodiff results.expected results
+testrun - _listchecksums 3<<EOF
+stdout
+*=pool/main/d/documentation/documentation_9876AD.dsc $(fullchecksum documentation_9876AD.dsc)
+*=pool/main/d/documentation/documentation_9876AD.tar.gz $(fullchecksum documentation_9876AD.tar.gz)
+*=pool/main/d/documentation/documentation_9876AD_all.deb $(fullchecksum documentation_9876AD_all.deb)
+*=pool/main/d/documentation/documentation_9876AD_byhand/history.txt $(fullchecksum history.txt)
+
+*=pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt $(fullchecksum manifesto.txt)
+EOF
+
+testrun - remove test documentation 3<<EOF
+stdout
+$(opd 'documentation' unset test main coal deb)
+$(opd 'documentation' unset test main source dsc)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= replacing './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|source'...
+-v6*= replacing './dists/test/main/source/Sources' (gzipped)
+$(otd 'documentation' '9876AD' 'test')
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(ofd 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofd 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofd 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofd 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+-v2*=removed now empty directory ./pool/main/d/documentation/documentation_9876AD_byhand
+-v2*=removed now empty directory ./pool/main/d/documentation
+-v2*=removed now empty directory ./pool/main/d
+-v2*=removed now empty directory ./pool/main
+-v2*=removed now empty directory ./pool
+EOF
+dodo test ! -e pool
+rm -r dists db
+
+# now include the log file, too:
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: coal source
+Components: main
+Tracking: minimal includebyhand includelogs
+EOF
+
+testrun - include test test.changes 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_coal+all.log')
+-v2*=Created directory "./pool/main/d/documentation/documentation_9876AD_byhand"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+EOF
+cat >results.expected <<EOF
+Distribution: test
+Source: documentation
+Version: 9876AD
+Files:
+ pool/main/d/documentation/documentation_9876AD.dsc s 1
+ pool/main/d/documentation/documentation_9876AD.tar.gz s 1
+ pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt x 0
+ pool/main/d/documentation/documentation_9876AD_byhand/history.txt x 0
+ pool/main/d/documentation/documentation_9876AD_all.deb a 1
+ pool/main/d/documentation/documentation_9876AD_coal+all.log l 0
+
+EOF
+
+testout - dumptracks test 3<<EOF
+EOF
+dodiff results.expected results
+
+rm -r dists db pool
+
+# and now everything at once, too:
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: coal source
+Components: main
+Tracking: minimal includebyhand includelogs includechanges
+EOF
+
+testrun - include test test.changes 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_source+all.changes')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_coal+all.log')
+-v2*=Created directory "./pool/main/d/documentation/documentation_9876AD_byhand"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+EOF
+cat >results.expected <<EOF
+Distribution: test
+Source: documentation
+Version: 9876AD
+Files:
+ pool/main/d/documentation/documentation_9876AD.dsc s 1
+ pool/main/d/documentation/documentation_9876AD.tar.gz s 1
+ pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt x 0
+ pool/main/d/documentation/documentation_9876AD_byhand/history.txt x 0
+ pool/main/d/documentation/documentation_9876AD_all.deb a 1
+ pool/main/d/documentation/documentation_9876AD_coal+all.log l 0
+ pool/main/d/documentation/documentation_9876AD_source+all.changes c 0
+
+EOF
+testout - dumptracks test 3<<EOF
+EOF
+dodiff results.expected results
+testrun - _listchecksums 3<<EOF
+stdout
+*=pool/main/d/documentation/documentation_9876AD_coal+all.log $(fullchecksum documentation_9876AD_coal+all.log)
+*=pool/main/d/documentation/documentation_9876AD_source+all.changes $(fullchecksum test.changes)
+*=pool/main/d/documentation/documentation_9876AD.dsc $(fullchecksum documentation_9876AD.dsc)
+*=pool/main/d/documentation/documentation_9876AD.tar.gz $(fullchecksum documentation_9876AD.tar.gz)
+*=pool/main/d/documentation/documentation_9876AD_all.deb $(fullchecksum documentation_9876AD_all.deb)
+*=pool/main/d/documentation/documentation_9876AD_byhand/history.txt $(fullchecksum history.txt)
+
+*=pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt $(fullchecksum manifesto.txt)
+EOF
+
+testrun - remove test documentation 3<<EOF
+stdout
+$(opd 'documentation' unset test main coal deb)
+$(opd 'documentation' unset test main source dsc)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= replacing './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'test|main|source'...
+-v6*= replacing './dists/test/main/source/Sources' (gzipped)
+$(otd 'documentation' '9876AD' 'test')
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/d/documentation/documentation_9876AD_source+all.changes')
+$(ofd 'pool/main/d/documentation/documentation_9876AD_coal+all.log')
+$(ofd 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(ofd 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofd 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofd 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofd 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+-v2*=removed now empty directory ./pool/main/d/documentation/documentation_9876AD_byhand
+-v2*=removed now empty directory ./pool/main/d/documentation
+-v2*=removed now empty directory ./pool/main/d
+-v2*=removed now empty directory ./pool/main
+-v2*=removed now empty directory ./pool
+EOF
+dodo test ! -e pool
+rm -r dists db
+
+mkdir i j tmp
+mv *.txt documentation_9876AD* test.changes j/
+cp j/* i/
+cat > conf/incoming <<EOF
+Name: foo
+IncomingDir: i
+TempDir: tmp
+Default: test
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_source+all.changes')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_coal+all.log')
+-v2*=Created directory "./pool/main/d/documentation/documentation_9876AD_byhand"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD_coal+all.log'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+EOF
+cat >results.expected <<EOF
+Distribution: test
+Source: documentation
+Version: 9876AD
+Files:
+ pool/main/d/documentation/documentation_9876AD_all.deb a 1
+ pool/main/d/documentation/documentation_9876AD.dsc s 1
+ pool/main/d/documentation/documentation_9876AD.tar.gz s 1
+ pool/main/d/documentation/documentation_9876AD_byhand/history.txt x 0
+ pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt x 0
+ pool/main/d/documentation/documentation_9876AD_coal+all.log l 0
+ pool/main/d/documentation/documentation_9876AD_source+all.changes c 0
+
+EOF
+
+testout - dumptracks test 3<<EOF
+EOF
+dodiff results.expected results
+
+rm -r db pool dists
+
+cp j/* i/
+ed -s conf/distributions <<EOF
+g/^Tracking: /s/include[^ ]*//g
+w
+q
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+stderr
+*=Error: 'test.changes' contains unused file 'documentation_9876AD_coal+all.log'!
+*=(Do Permit: unused_files to conf/incoming to ignore and
+*= additionally Cleanup: unused_files to delete them)
+*=Alternatively, you can also add a LogDir: for 'foo' into conf/incoming
+*=then files like that will be stored there.
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat >> conf/incoming <<EOF
+Logdir: log
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+-v2*=Created directory "./log"
+-v2*=Created directory "./log/documentation_9876AD_source+all.0000000"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD_coal+all.log'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+EOF
+
+ls log/documentation_9876AD_source+all.0000000 | sort > results
+cat > results.expected <<EOF
+documentation_9876AD_coal+all.log
+history.txt
+manifesto.txt
+test.changes
+EOF
+dodiff results.expected results
+
+rm -r db pool dists
+
+cp j/* i/
+ed -s conf/distributions <<EOF
+g/^Tracking: /d
+a
+Tracking: all includechanges includelogs includebyhand
+.
+w
+q
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+-v2*=Created directory "./log/documentation_9876AD_source+all.0000001"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_source+all.changes')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_coal+all.log')
+-v2*=Created directory "./pool/main/d/documentation/documentation_9876AD_byhand"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD_coal+all.log'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+EOF
+
+ls log/documentation_9876AD_source+all.0000001 | sort > results
+cat > results.expected <<EOF
+documentation_9876AD_coal+all.log
+test.changes
+EOF
+dodiff results.expected results
+
+# Now add a script to manually handle byhand files:
+
+rm -r db pool dists
+
+cp j/* i/
+
+cat >> conf/distributions <<EOF
+ByhandHooks:
+ byhand * manifesto.txt handle-byhand.sh
+EOF
+
+# first without the script, to check the error:
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./log/documentation_9876AD_source+all.0000002"
+stderr
+*=Error 2 executing './conf/handle-byhand.sh': No such file or directory
+*=Byhandhook './conf/handle-byhand.sh' 'test' 'byhand' '-' 'manifesto.txt' './tmp/manifesto.txt' failed with exit code 255!
+-v0*=There have been errors!
+return 255
+EOF
+
+# then with the script
+
+cat > conf/handle-byhand.sh <<'EOF'
+#!/bin/sh
+echo "byhand-script called with: " "'$*'" >&2
+EOF
+cat > conf/handle-alternate.sh <<'EOF'
+#!/bin/sh
+echo "alternate-script called with: " "'$*'" >&2
+EOF
+chmod u+x conf/handle-alternate.sh
+chmod u+x conf/handle-byhand.sh
+
+testrun - processincoming foo 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+-v2*=Created directory "./log/documentation_9876AD_source+all.0000003"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_source+all.changes')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_coal+all.log')
+-v2*=Created directory "./pool/main/d/documentation/documentation_9876AD_byhand"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/history.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_byhand/manifesto.txt')
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD_coal+all.log'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+stderr
+*=byhand-script called with: 'test byhand - manifesto.txt ./tmp/manifesto.txt'
+EOF
+
+ls log/documentation_9876AD_source+all.0000003 | sort > results
+cat > results.expected <<EOF
+documentation_9876AD_coal+all.log
+test.changes
+EOF
+dodiff results.expected results
+
+# then don't install byhand, now the unprocessed ones should end up in the log
+
+rm -r db dists pool
+cp j/* i/
+ed -s conf/distributions <<EOF
+g/^Tracking: /d
+i
+Tracking: all
+.
+w
+q
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+-v2*=Created directory "./log/documentation_9876AD_source+all.0000004"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+$(ota 'test' 'documentation')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD_coal+all.log'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+stderr
+*=byhand-script called with: 'test byhand - manifesto.txt ./tmp/manifesto.txt'
+EOF
+
+ls log/documentation_9876AD_source+all.0000004 | sort > results
+cat > results.expected <<EOF
+documentation_9876AD_coal+all.log
+history.txt
+test.changes
+EOF
+dodiff results.expected results
+
+# then do without tracking at all
+
+rm -r db dists pool
+cp j/* i/
+ed -s conf/distributions <<EOF
+g/^Tracking: /d
+w
+q
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+-v2*=Created directory "./log/documentation_9876AD_source+all.0000005"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD_coal+all.log'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+stderr
+*=byhand-script called with: 'test byhand - manifesto.txt ./tmp/manifesto.txt'
+EOF
+
+ls log/documentation_9876AD_source+all.0000005 | sort > results
+cat > results.expected <<EOF
+documentation_9876AD_coal+all.log
+history.txt
+test.changes
+EOF
+dodiff results.expected results
+
+# then do without tracking and without log dir
+
+rm -r db dists pool
+cp j/* i/
+ed -s conf/incoming <<EOF
+g/^Logdir: /d
+w
+q
+EOF
+ed -s i/test.changes <<'EOF'
+g/^ .*\.log$/d
+w
+q
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+stderr
+*=Error: 'test.changes' contains unused file 'history.txt'!
+*=(Do Permit: unused_files to conf/incoming to ignore and
+*= additionally Cleanup: unused_files to delete them)
+*=Alternatively, you can also add a LogDir: for 'foo' into conf/incoming
+*=then files like that will be stored there.
+-v0*=There have been errors!
+returns 255
+EOF
+
+# add more hooks:
+
+cat >> conf/distributions <<EOF
+ * * * handle-alternate.sh
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+stderr
+*=byhand-script called with: 'test byhand - manifesto.txt ./tmp/manifesto.txt'
+*=alternate-script called with: 'test byhand - history.txt ./tmp/history.txt'
+EOF
+
+# try a more real-worl example.
+
+rm -r db dists pool
+cp j/* i/
+ed -s i/test.changes <<'EOF'
+g/^ .*\.log$/d
+w
+q
+EOF
+ed -s conf/distributions <<EOF
+g/handle-/d
+a
+ byhand - * $SRCDIR/docs/copybyhand.example
+.
+w
+q
+EOF
+
+testrun - processincoming foo 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/d"
+-v2*=Created directory "./pool/main/d/documentation"
+$(ofa 'pool/main/d/documentation/documentation_9876AD_all.deb')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.tar.gz')
+$(ofa 'pool/main/d/documentation/documentation_9876AD.dsc')
+$(opa 'documentation' x 'test' 'main' 'coal' 'deb')
+$(opa 'documentation' x 'test' 'main' 'source' 'dsc')
+-v3*=deleting './i/documentation_9876AD_all.deb'...
+-v3*=deleting './i/documentation_9876AD.tar.gz'...
+-v3*=deleting './i/history.txt'...
+-v3*=deleting './i/manifesto.txt'...
+-v3*=deleting './i/documentation_9876AD.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/test/main"
+-v2*=Created directory "./dists/test/main/binary-coal"
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/main/source"
+-v6*= looking for changes in 'test|main|source'...
+-v6*= creating './dists/test/main/source/Sources' (gzipped)
+stderr
+EOF
+
+ls dists/test/extra/ | sort > results
+cat > results.expected <<EOF
+history.txt
+manifesto.txt
+EOF
+dodiff results.expected results
+
+# TODO: check for multiple distributions
+# some storing some not, and when the handling script is implemented
+
+rm -r db pool dists
+rm -r i j tmp conf results results.expected log
+testsuccess
diff --git a/tests/layeredupdate.test b/tests/layeredupdate.test
new file mode 100644
index 0000000..6bdc381
--- /dev/null
+++ b/tests/layeredupdate.test
@@ -0,0 +1,684 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+mkdir -p conf dists
+echo "export never" > conf/options
+cat > conf/distributions <<EOF
+Codename: codename1
+Update: notexisting
+Components: component
+Architectures: architecture
+EOF
+cat > conf/updates <<EOF
+Name: chainb5
+From: chainb4
+
+Name: chainb4
+From: chainb3
+
+Name: chainb3
+From: chainb2
+
+Name: chainb2
+From: chainb1
+
+Name: chainb1
+From: circular2
+
+Name: circular1
+From: circular2
+
+Name: circular2
+From: circular1
+
+Name: chaina1
+From: circular1
+
+Name: chaina2
+From: chaina1
+
+Name: chaina3
+From: chaina2
+
+Name: chaina4
+From: chaina3
+
+Name: chaina5
+From: chaina4
+
+Name: chaina6
+From: chaina5
+EOF
+
+mkdir lists db
+
+testrun - -b . update 3<<EOF
+returns 255
+stderr
+*=Error: Update rule 'circular1' part of circular From-referencing.
+-v0*=There have been errors!
+stdout
+EOF
+
+cat > conf/updates <<EOF
+Name: name
+From: broken
+EOF
+
+testrun - -b . update 3<<EOF
+returns 255
+stderr
+*=./conf/updates: Update pattern 'name' references unknown pattern 'broken' via From!
+-v0*=There have been errors!
+stdout
+EOF
+
+cat > conf/updates <<EOF
+EOF
+
+testrun - -b . update 3<<EOF
+returns 255
+stderr
+*=Cannot find definition of upgrade-rule 'notexisting' for distribution 'codename1'!
+-v0*=There have been errors!
+stdout
+EOF
+
+cat > conf/distributions <<EOF
+Codename: codename1
+Update: test
+Components: component
+Architectures: architecture
+EOF
+cat > conf/updates <<EOF
+Name: test
+Components: comonent
+Architectures: achitecture
+VerifyRelease: blindtrust
+Method: file:///notexistant
+EOF
+
+testrun - -b . update 3<<EOF
+returns 255
+stderr
+*=Warning parsing ./conf/updates, line 2: unknown component 'comonent' will be ignored!
+*=Warning parsing ./conf/updates, line 3: unknown architecture 'achitecture' will be ignored!
+-v6=aptmethod start 'file:///notexistant/dists/codename1/InRelease'
+*=aptmethod error receiving 'file:///notexistant/dists/codename1/InRelease':
+-v6=aptmethod start 'file:///notexistant/dists/codename1/Release'
+*=aptmethod error receiving 'file:///notexistant/dists/codename1/Release':
+='File not found'
+='File not found - /notexistant/dists/codename1/Release (2: No such file or directory)'
+='File not found - /notexistant/dists/codename1/InRelease (2: No such file or directory)'
+-v0*=There have been errors!
+stdout
+EOF
+
+cat > conf/updates <<EOF
+Name: test
+Components: comonent
+EOF
+
+rm -r db
+mkdir db
+
+cat > conf/distributions <<EOF
+Codename: codename1
+Components: a bb
+UDebComponents: a
+Architectures: x yyyyyyyyyy source
+Update: a b - b c
+
+Codename: codename2
+Components: a bb
+Architectures: x yyyyyyyyyy
+Update: c - a
+EOF
+cat > conf/updates <<EOF
+Name: base
+VerifyRelease: blindtrust
+Method: file:$WORKDIR/testsource
+Components: error1
+
+Name: a
+Components: a
+From: base
+
+Name: b
+Components: a
+From: base
+
+Name: c
+From: base
+EOF
+
+#testrun - -b . update 3<<EOF
+#returns 255
+#stderr
+#-v0*=There have been errors!
+#stdout
+#EOF
+
+cat > conf/updates <<EOF
+Name: base
+VerifyRelease: blindtrust
+Method: file:$WORKDIR/testsource
+Suite: test
+
+Name: a
+Suite: codename1
+From: base
+
+Name: b
+Suite: codename2
+DownloadListsAs: .gz .lzma
+From: base
+
+Name: c
+Suite: *
+From: base
+EOF
+
+testrun - -b . update codename2 3<<EOF
+returns 255
+stderr
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/Release'
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/Release'
+*=aptmethod error receiving 'file:$WORKDIR/testsource/dists/codename1/InRelease':
+*=aptmethod error receiving 'file:$WORKDIR/testsource/dists/codename1/Release':
+*=aptmethod error receiving 'file:$WORKDIR/testsource/dists/codename2/InRelease':
+*=aptmethod error receiving 'file:$WORKDIR/testsource/dists/codename2/Release':
+='File not found'
+='File not found - $WORKDIR/testsource/dists/codename1/InRelease (2: No such file or directory)'
+='File not found - $WORKDIR/testsource/dists/codename2/InRelease (2: No such file or directory)'
+='File not found - $WORKDIR/testsource/dists/codename1/Release (2: No such file or directory)'
+='File not found - $WORKDIR/testsource/dists/codename2/Release (2: No such file or directory)'
+-v0*=There have been errors!
+stdout
+EOF
+
+mkdir testsource testsource/dists testsource/dists/codename1 testsource/dists/codename2
+touch testsource/dists/codename1/InRelease testsource/dists/codename2/InRelease
+
+testrun - -b . update codename2 3<<EOF
+returns 255
+stderr
+-v6*=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename2/InRelease' to './lists/base_codename2_InRelease'...
+-v6*=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/InRelease' to './lists/base_codename1_InRelease'...
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+*=Missing checksums in Release file './lists/base_codename2_InRelease'!
+-v0*=There have been errors!
+stdout
+EOF
+
+cat > testsource/dists/codename1/InRelease <<EOF
+Codename: codename1
+Architectures: x yyyyyyyyyy
+Components: a bb
+MD5Sum:
+EOF
+cat > testsource/dists/codename2/InRelease <<EOF
+Codename: codename2
+Architectures: x yyyyyyyyyy
+Components: a bb
+MD5Sum:
+EOF
+
+testrun - -b . update codename2 3<<EOF
+returns 254
+stderr
+-v6*=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename2/InRelease' to './lists/base_codename2_InRelease'...
+-v6*=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/InRelease' to './lists/base_codename1_InRelease'...
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=Could not find 'a/binary-x/Packages' within './lists/base_codename2_InRelease'
+-v0*=There have been errors!
+stdout
+EOF
+
+mkdir -p testsource/dists/codename1/a/debian-installer/binary-x
+touch testsource/dists/codename1/a/debian-installer/binary-x/Packages
+mkdir -p testsource/dists/codename1/a/debian-installer/binary-yyyyyyyyyy
+touch testsource/dists/codename1/a/debian-installer/binary-yyyyyyyyyy/Packages
+mkdir -p testsource/dists/codename1/a/binary-x
+touch testsource/dists/codename1/a/binary-x/Packages
+mkdir -p testsource/dists/codename1/a/binary-yyyyyyyyyy
+touch testsource/dists/codename1/a/binary-yyyyyyyyyy/Packages
+mkdir -p testsource/dists/codename1/a/source
+touch testsource/dists/codename1/a/source/Sources
+mkdir -p testsource/dists/codename1/bb/binary-x
+touch testsource/dists/codename1/bb/binary-x/Packages
+mkdir -p testsource/dists/codename1/bb/binary-yyyyyyyyyy
+touch testsource/dists/codename1/bb/binary-yyyyyyyyyy/Packages
+mkdir -p testsource/dists/codename1/bb/source
+touch testsource/dists/codename1/bb/source/Sources
+
+cat > testsource/dists/codename1/InRelease <<EOF
+Codename: codename1
+Architectures: x yyyyyyyyyy
+Components: a bb
+MD5Sum:
+ 11111111111111111111111111111111 17 bb/source/Sources.lzma
+ $(cd testsource ; md5releaseline codename1 a/debian-installer/binary-x/Packages)
+ $(cd testsource ; md5releaseline codename1 a/debian-installer/binary-yyyyyyyyyy/Packages)
+ $(cd testsource ; md5releaseline codename1 a/binary-x/Packages)
+ $(cd testsource ; md5releaseline codename1 a/binary-yyyyyyyyyy/Packages)
+ $(cd testsource ; md5releaseline codename1 a/source/Sources)
+ $(cd testsource ; md5releaseline codename1 bb/binary-x/Packages)
+ $(cd testsource ; md5releaseline codename1 bb/binary-yyyyyyyyyy/Packages)
+ $(cd testsource ; md5releaseline codename1 bb/source/Sources)
+EOF
+
+mkdir -p testsource/dists/codename2/a/binary-x
+touch testsource/dists/codename2/a/binary-x/Packages
+mkdir -p testsource/dists/codename2/a/binary-yyyyyyyyyy
+touch testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages
+mkdir -p testsource/dists/codename2/bb/binary-x
+touch testsource/dists/codename2/bb/binary-x/Packages
+mkdir -p testsource/dists/codename2/bb/binary-yyyyyyyyyy
+touch testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages
+mkdir -p testsource/dists/codename2/a/debian-installer/binary-x
+touch testsource/dists/codename2/a/debian-installer/binary-x/Packages
+mkdir -p testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy
+touch testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy/Packages
+mkdir -p testsource/dists/codename2/a/source
+touch testsource/dists/codename2/a/source/Sources
+mkdir -p testsource/dists/codename2/bb/source
+touch testsource/dists/codename2/bb/source/Sources
+
+cat > testsource/dists/codename2/InRelease <<EOF
+Codename: codename2
+Architectures: x yyyyyyyyyy
+Components: a bb
+MD5Sum:
+ $(cd testsource ; md5releaseline codename2 a/debian-installer/binary-x/Packages)
+ $(cd testsource ; md5releaseline codename2 a/debian-installer/binary-yyyyyyyyyy/Packages)
+ $(cd testsource ; md5releaseline codename2 a/binary-x/Packages)
+ $(cd testsource ; md5releaseline codename2 a/binary-yyyyyyyyyy/Packages)
+ $(cd testsource ; md5releaseline codename2 a/source/Sources)
+ $(cd testsource ; md5releaseline codename2 bb/binary-x/Packages)
+ $(cd testsource ; md5releaseline codename2 bb/binary-yyyyyyyyyy/Packages)
+ $(cd testsource ; md5releaseline codename2 bb/source/Sources)
+EOF
+
+lzma testsource/dists/codename2/a/binary-x/Packages
+lzma testsource/dists/codename2/a/source/Sources
+lzma testsource/dists/codename2/bb/source/Sources
+lzma testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy/Packages
+lzma testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages
+lzma testsource/dists/codename2/bb/binary-x/Packages
+lzma testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages
+lzma testsource/dists/codename2/a/debian-installer/binary-x/Packages
+
+cat >> testsource/dists/codename2/InRelease <<EOF
+ $(cd testsource ; md5releaseline codename2 a/debian-installer/binary-x/Packages.lzma)
+ $(cd testsource ; md5releaseline codename2 a/debian-installer/binary-yyyyyyyyyy/Packages.lzma)
+ $(cd testsource ; md5releaseline codename2 a/binary-x/Packages.lzma)
+ $(cd testsource ; md5releaseline codename2 a/binary-yyyyyyyyyy/Packages.lzma)
+ $(cd testsource ; md5releaseline codename2 a/source/Sources.lzma)
+ $(cd testsource ; md5releaseline codename2 bb/binary-x/Packages.lzma)
+ $(cd testsource ; md5releaseline codename2 bb/binary-yyyyyyyyyy/Packages.lzma)
+ $(cd testsource ; md5releaseline codename2 bb/source/Sources.lzma)
+EOF
+
+
+testout - -b . update codename2 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename2/InRelease' to './lists/base_codename2_InRelease'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/InRelease' to './lists/base_codename1_InRelease'...
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/a/binary-x/Packages'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/a/binary-x/Packages'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/a/binary-x/Packages' to './lists/base_codename1_a_x_Packages'...
+-v1=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/bb/binary-x/Packages'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/bb/binary-x/Packages'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/bb/binary-x/Packages' to './lists/base_codename1_bb_x_Packages'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/a/binary-yyyyyyyyyy/Packages'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/a/binary-yyyyyyyyyy/Packages'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/a/binary-yyyyyyyyyy/Packages' to './lists/base_codename1_a_yyyyyyyyyy_Packages'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/bb/binary-yyyyyyyyyy/Packages'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/bb/binary-yyyyyyyyyy/Packages'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/bb/binary-yyyyyyyyyy/Packages' to './lists/base_codename1_bb_yyyyyyyyyy_Packages'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/a/binary-x/Packages.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/a/binary-x/Packages.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/a/binary-x/Packages.lzma' into './lists/base_codename2_a_x_Packages' using '/usr/bin/unlzma'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/bb/binary-x/Packages.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/bb/binary-x/Packages.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/bb/binary-x/Packages.lzma' into './lists/base_codename2_bb_x_Packages' using '/usr/bin/unlzma'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages.lzma' into './lists/base_codename2_a_yyyyyyyyyy_Packages' using '/usr/bin/unlzma'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages.lzma' into './lists/base_codename2_bb_yyyyyyyyyy_Packages' using '/usr/bin/unlzma'...
+EOF
+
+true > results.expected
+if [ $verbosity -ge 0 ] ; then
+echo "Calculating packages to get..." > results.expected ; fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename2|bb|yyyyyyyyyy'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename2_bb_yyyyyyyyyy_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename1_bb_yyyyyyyyyy_Packages'" >>results.expected ; fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename2|bb|x'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename2_bb_x_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename1_bb_x_Packages'" >>results.expected ; fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename2|a|yyyyyyyyyy'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename2_a_yyyyyyyyyy_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename1_a_yyyyyyyyyy_Packages'" >>results.expected ; fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename2|a|x'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename2_a_x_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename1_a_x_Packages'" >>results.expected ; fi
+dodiff results.expected results
+mv results.expected results2.expected
+
+testout - -b . update codename1 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename2/InRelease' to './lists/base_codename2_InRelease'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/InRelease' to './lists/base_codename1_InRelease'...
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-x/Packages'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-x/Packages'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-x/Packages' to './lists/base_codename1_a_x_uPackages'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-yyyyyyyyyy/Packages'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-yyyyyyyyyy/Packages'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-yyyyyyyyyy/Packages' to './lists/base_codename1_a_yyyyyyyyyy_uPackages'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-x/Packages.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-x/Packages.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-x/Packages.lzma' into './lists/base_codename2_a_x_uPackages' using '/usr/bin/unlzma'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy/Packages.lzma' into './lists/base_codename2_a_yyyyyyyyyy_uPackages' using '/usr/bin/unlzma'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/a/source/Sources'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/a/source/Sources'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/a/source/Sources' to './lists/base_codename1_a_Sources'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/bb/source/Sources.lzma'
+-v1*=aptmethod error receiving 'file:$WORKDIR/testsource/dists/codename1/bb/source/Sources.lzma':
+-v1='File not found'
+-v1='File not found - $WORKDIR/testsource/dists/codename1/bb/source/Sources.lzma (2: No such file or directory)'
+-v1='<File not there, apt-method suggests '$WORKDIR/testsource/dists/codename1/bb/source/Sources' instead>'
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/bb/source/Sources'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/bb/source/Sources'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/bb/source/Sources' to './lists/base_codename1_bb_Sources'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/a/source/Sources.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/a/source/Sources.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/a/source/Sources.lzma' into './lists/base_codename2_a_Sources' using '/usr/bin/unlzma'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/bb/source/Sources.lzma'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/bb/source/Sources.lzma'
+-v2*=Uncompress '$WORKDIR/testsource/dists/codename2/bb/source/Sources.lzma' into './lists/base_codename2_bb_Sources' using '/usr/bin/unlzma'...
+EOF
+
+ed -s testsource/dists/codename1/InRelease <<EOF
+g/^ 11111111111111111/d
+w
+q
+EOF
+
+true > results.expected
+if [ $verbosity -ge 0 ] ; then
+echo "Calculating packages to get..." > results.expected ; fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename1|bb|source'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_bb_Sources'" >>results.expected
+echo " reading './lists/base_codename2_bb_Sources'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_bb_Sources'" >>results.expected
+echo " reading './lists/base_codename1_bb_Sources'" >>results.expected
+fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename1|bb|yyyyyyyyyy'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_bb_yyyyyyyyyy_Packages'" >>results.expected
+echo " reading './lists/base_codename2_bb_yyyyyyyyyy_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_bb_yyyyyyyyyy_Packages'" >>results.expected
+echo " reading './lists/base_codename1_bb_yyyyyyyyyy_Packages'" >>results.expected
+fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename1|bb|x'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_bb_x_Packages'" >>results.expected
+echo " reading './lists/base_codename2_bb_x_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_bb_x_Packages'" >>results.expected
+echo " reading './lists/base_codename1_bb_x_Packages'" >>results.expected
+fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename1|a|source'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_a_Sources'" >>results.expected
+echo " reading './lists/base_codename2_a_Sources'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_a_Sources'" >>results.expected
+echo " reading './lists/base_codename1_a_Sources'" >>results.expected
+fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'u|codename1|a|yyyyyyyyyy'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_a_yyyyyyyyyy_uPackages'" >>results.expected
+echo " reading './lists/base_codename2_a_yyyyyyyyyy_uPackages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_a_yyyyyyyyyy_uPackages'" >>results.expected
+echo " reading './lists/base_codename1_a_yyyyyyyyyy_uPackages'" >>results.expected
+fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename1|a|yyyyyyyyyy'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_a_yyyyyyyyyy_Packages'" >>results.expected
+echo " reading './lists/base_codename2_a_yyyyyyyyyy_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_a_yyyyyyyyyy_Packages'" >>results.expected
+echo " reading './lists/base_codename1_a_yyyyyyyyyy_Packages'" >>results.expected
+fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'u|codename1|a|x'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_a_x_uPackages'" >>results.expected
+echo " reading './lists/base_codename2_a_x_uPackages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_a_x_uPackages'" >>results.expected
+echo " reading './lists/base_codename1_a_x_uPackages'" >>results.expected
+fi
+if [ $verbosity -ge 3 ] ; then
+echo " processing updates for 'codename1|a|x'" >>results.expected ; fi
+if [ $verbosity -ge 5 ] ; then
+echo " reading './lists/base_codename1_a_x_Packages'" >>results.expected
+echo " reading './lists/base_codename2_a_x_Packages'" >>results.expected
+echo " marking everything to be deleted" >>results.expected
+echo " reading './lists/base_codename2_a_x_Packages'" >>results.expected
+echo " reading './lists/base_codename1_a_x_Packages'" >>results.expected
+fi
+dodiff results.expected results
+
+testrun - -b . update codename2 codename1 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename2/InRelease' to './lists/base_codename2_InRelease'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/InRelease' to './lists/base_codename1_InRelease'...
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+stdout
+-v0*=Nothing to do found. (Use --noskipold to force processing)
+EOF
+dodo rm lists/_codename*
+testout - -b . update codename2 codename1 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename1/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename2/InRelease' to './lists/base_codename2_InRelease'...
+-v6=aptmethod start 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'file:$WORKDIR/testsource/dists/codename2/InRelease'
+-v2*=Copy file '$WORKDIR/testsource/dists/codename1/InRelease' to './lists/base_codename1_InRelease'...
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+EOF
+grep '^C' results.expected > resultsboth.expected || true
+grep '^ ' results2.expected >> resultsboth.expected || true
+grep '^ ' results.expected >> resultsboth.expected || true
+grep '^[^ C]' results.expected >> resultsboth.expected || true
+dodiff resultsboth.expected results
+
+sed -i -e "s/Method: file:/Method: copy:/" conf/updates
+
+dodo rm lists/_codename*
+testout - -b . update codename1 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+EOF
+dodiff results.expected results
+
+rm -r lists ; mkdir lists
+
+testout - -b . update codename2 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/a/binary-x/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/a/binary-x/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/bb/binary-x/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/bb/binary-x/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/a/binary-yyyyyyyyyy/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/a/binary-yyyyyyyyyy/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/bb/binary-yyyyyyyyyy/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/bb/binary-yyyyyyyyyy/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/a/binary-x/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/a/binary-x/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_a_x_Packages.lzma' into './lists/base_codename2_a_x_Packages' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-x/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-x/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_bb_x_Packages.lzma' into './lists/base_codename2_bb_x_Packages' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_a_yyyyyyyyyy_Packages.lzma' into './lists/base_codename2_a_yyyyyyyyyy_Packages' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_bb_yyyyyyyyyy_Packages.lzma' into './lists/base_codename2_bb_yyyyyyyyyy_Packages' using '/usr/bin/unlzma'...
+EOF
+dodiff results2.expected results
+
+testout - -b . update codename1 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-x/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-x/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-yyyyyyyyyy/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/a/debian-installer/binary-yyyyyyyyyy/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-x/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-x/Packages.lzma'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/a/debian-installer/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_a_x_uPackages.lzma' into './lists/base_codename2_a_x_uPackages' using '/usr/bin/unlzma'...
+-v2*=Uncompress './lists/base_codename2_a_yyyyyyyyyy_uPackages.lzma' into './lists/base_codename2_a_yyyyyyyyyy_uPackages' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/a/source/Sources'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/a/source/Sources'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/bb/source/Sources'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/bb/source/Sources'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/a/source/Sources.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/a/source/Sources.lzma'
+-v2*=Uncompress './lists/base_codename2_a_Sources.lzma' into './lists/base_codename2_a_Sources' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/bb/source/Sources.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/bb/source/Sources.lzma'
+-v2*=Uncompress './lists/base_codename2_bb_Sources.lzma' into './lists/base_codename2_bb_Sources' using '/usr/bin/unlzma'...
+EOF
+dodiff results.expected results
+
+# Test repositories without uncompressed files listed:
+printf '%%g/^ .*[^a]$/d\nw\nq\n' | ed -s testsource/dists/codename2/InRelease
+# lists/_codename* no longer has to be deleted, as without the uncompressed checksums
+# reprepro does not know it already processed those (it only saves the uncompressed
+# checksums of the already processed files)
+
+# As the checksums for the uncompressed files are not know, and the .lzma files
+# not saved, the lzma files have to be downloaded again:
+testout - -b . update codename2 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/a/binary-x/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/a/binary-x/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_a_x_Packages.lzma' into './lists/base_codename2_a_x_Packages' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-x/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-x/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_bb_x_Packages.lzma' into './lists/base_codename2_bb_x_Packages' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/a/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_a_yyyyyyyyyy_Packages.lzma' into './lists/base_codename2_a_yyyyyyyyyy_Packages' using '/usr/bin/unlzma'...
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages.lzma'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/bb/binary-yyyyyyyyyy/Packages.lzma'
+-v2*=Uncompress './lists/base_codename2_bb_yyyyyyyyyy_Packages.lzma' into './lists/base_codename2_bb_yyyyyyyyyy_Packages' using '/usr/bin/unlzma'...
+EOF
+dodiff results2.expected results
+
+# last time the .lzma files should have not been deleted, so no download
+# but uncompress has still to be done...
+testout - -b . update codename2 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename1/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/testsource/dists/codename2/InRelease'
+*=WARNING: No signature found in ./lists/base_codename1_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/base_codename2_InRelease, assuming it is unsigned!
+-v2*=Uncompress './lists/base_codename2_a_x_Packages.lzma' into './lists/base_codename2_a_x_Packages'...
+-v2*=Uncompress './lists/base_codename2_bb_x_Packages.lzma' into './lists/base_codename2_bb_x_Packages'...
+-v2*=Uncompress './lists/base_codename2_a_yyyyyyyyyy_Packages.lzma' into './lists/base_codename2_a_yyyyyyyyyy_Packages'...
+-v2*=Uncompress './lists/base_codename2_bb_yyyyyyyyyy_Packages.lzma' into './lists/base_codename2_bb_yyyyyyyyyy_Packages'...
+EOF
+dodiff results2.expected results
+
+rm -r -f db conf dists pool lists testsource
+testsuccess
diff --git a/tests/layeredupdate2.test b/tests/layeredupdate2.test
new file mode 100644
index 0000000..ce6166d
--- /dev/null
+++ b/tests/layeredupdate2.test
@@ -0,0 +1,683 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+mkdir -p conf dists
+echo "export silent-never" > conf/options
+cat > conf/updatelog.sh <<EOF
+#!/bin/sh
+echo "\$@" >> '$WORKDIR/updatelog'
+exit 0
+EOF
+cat > conf/shouldnothappen.sh <<EOF
+#!/bin/sh
+echo "\$@" >> '$WORKDIR/shouldnothappen'
+exit 0
+EOF
+chmod a+x conf/updatelog.sh conf/shouldnothappen.sh
+cat > conf/distributions <<EOF
+Codename: boring
+Suite: unstable
+Components: main firmware
+Architectures: abacus coal source
+Log:
+ --via update updatelog.sh
+ --via include shouldnothappen.sh
+Update: - 1 2 3 4
+
+Codename: interesting
+Suite: experimental
+Components: main firmware
+Architectures: abacus coal source
+Update: 5 6 - 7 8
+
+Codename: dummy
+Components: dummycomponent
+Architectures: dummyarchitecture
+EOF
+mkdir source1 source2
+cat > conf/updates <<EOF
+Name: a
+VerifyRelease: blindtrust
+Method: copy:$WORKDIR/source1
+Architectures: dummyarchitecture
+DownloadListsAs: .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma .lzma
+Components: dummycomponent
+
+Name: b
+VerifyRelease: blindtrust
+Method: copy:$WORKDIR/source2
+Architectures: dummyarchitecture
+DownloadListsAs: .lzma .bz2 .gz .
+Flat: dummycomponent
+
+Name: ca
+From: a
+Architectures: dummyarchitecture
+Components: dummycomponent
+
+Name: ma
+From: ca
+Architectures: dummyarchitecture
+Components: main
+
+Name: wa
+From: ma
+Suite: suitename
+Architectures: source
+
+Name: 3
+From: wa
+
+Name: 4
+Suite: suitename
+From: a
+Architectures: abacus coal
+#without this I do not get a warning, why?
+Components: main firmware
+
+Name: pre1
+Flat: firmware
+From: b
+#without this I do not get a warning, why?
+Architectures: abacus coal
+FilterFormula: section (>=firmware/), section(<< firmware0)
+
+Name: 1
+From: pre1
+Suite: x
+
+Name: 2
+Flat: main
+From: b
+#without this I do not get a warning, why?
+Architectures: abacus coal source
+FilterFormula: section (<<firmware/) | section(>= firmware0) | !section
+Suite: x
+
+Name: 5
+From: b
+
+Name: 6
+From: b
+
+Name: 7
+From: b
+
+Name: 8
+From: b
+EOF
+
+DISTRI=dummy PACKAGE=aa EPOCH="" VERSION=1 REVISION=-1000 SECTION="base" genpackage.sh -sa
+DISTRI=dummy PACKAGE=bb EPOCH="" VERSION=2 REVISION=-0 SECTION="firmware/base" genpackage.sh -sa
+DISTRI=dummy PACKAGE=cc EPOCH="" VERSION=1 REVISION=-1000 SECTION="base" genpackage.sh -sa
+DISTRI=dummy PACKAGE=dd EPOCH="" VERSION=2 REVISION=-0 SECTION="firmware/base" genpackage.sh -sa
+
+mkdir source1/pool source1/pool/main source1/pool/firmware
+mv aa* source1/pool/main
+mv bb* source1/pool/firmware
+mv cc* source2
+mv dd* source2
+
+mkdir source2/x
+cd source2
+echo 'dpkg-scanpackages . /dev/null > x/Packages'
+dpkg-scanpackages . /dev/null > x/Packages
+cd ..
+cat > sourcesections <<EOF
+cc standard base
+dd standard firmware/base
+EOF
+cd source2
+echo 'dpkg-scansources . sourcesections > x/Sources'
+dpkg-scansources . ../sourcesections > x/Sources
+cd ..
+rm sourcesections
+
+cat > source2/x/InRelease <<EOF
+Codename: x
+Suite: toostupidfornonflat
+Architectures: coal abacus
+MD5Sum:
+ $(mdandsize source2/x/Sources) Sources
+ $(mdandsize source2/x/Packages) Packages
+EOF
+
+mkdir -p source1/dists/suitename/main/binary-abacus
+mkdir source1/dists/suitename/main/binary-coal
+mkdir source1/dists/suitename/main/source
+mkdir -p source1/dists/suitename/firmware/binary-abacus
+mkdir source1/dists/suitename/firmware/binary-coal
+mkdir source1/dists/suitename/firmware/source
+
+cd source1
+dpkg-scansources pool/main /dev/null > dists/suitename/main/source/Sources
+dpkg-scanpackages pool/main /dev/null > dists/suitename/main/binary-abacus/Packages
+dpkg-scanpackages -a coal pool/main /dev/null > dists/suitename/main/binary-coal/Packages
+dpkg-scansources pool/firmware /dev/null > dists/suitename/firmware/source/Sources
+dpkg-scanpackages pool/firmware /dev/null > dists/suitename/firmware/binary-abacus/Packages
+dpkg-scanpackages -a coal pool/firmware /dev/null > dists/suitename/firmware/binary-coal/Packages
+cd ..
+
+cat > source1/dists/suitename/InRelease <<EOF
+Codename: hohoho
+Suite: suitename
+Architectures: coal abacus
+MD5Sum:
+ 00000000000000000000000000000000 0 main/binary-abacus/Packages.lz
+ 00000000000000000000000000000000 0 main/binary-abacus/Packages.gz
+ 00000000000000000000000000000000 0 main/binary-coal/Packages.lz
+ 00000000000000000000000000000000 0 main/source/Sources.lz
+ 00000000000000000000000000000000 0 firmware/binary-abacus/Packages.lz
+ 00000000000000000000000000000000 0 firmware/binary-coal/Packages.lz
+ 00000000000000000000000000000000 0 firmware/source/Sources.lz
+ 00000000000000000000000000000000 0 main/binary-coal/Packages.gz
+ 00000000000000000000000000000000 0 main/source/Sources.gz
+ 00000000000000000000000000000000 0 firmware/binary-abacus/Packages.gz
+ 00000000000000000000000000000000 0 firmware/binary-coal/Packages.gz
+ 00000000000000000000000000000000 0 firmware/source/Sources.gz
+EOF
+
+sed -e 's/\.lzma/.lz/' -i conf/updates
+
+testrun - --lunzip=NONE update boring 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./lists"
+stderr
+*=./conf/updates:5:124: Ignoring all but first 18 entries...
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+*=Error: './lists/a_suitename_InRelease' only lists unusable or unrequested compressions of 'main/binary-abacus/Packages'.
+*=Try e.g the '--lunzip' option (or check what it is set to) to make more useable.
+*=Or change your DownloadListsAs to request e.g. '.gz'.
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat > source1/dists/suitename/InRelease <<EOF
+Codename: hohoho
+Suite: suitename
+Architectures: coal abacus
+MD5Sum:
+ 00000000000000000000000000000000 0 main/binary-abacus/Packages.lz
+ 00000000000000000000000000000000 0 main/binary-coal/Packages.lz
+ 00000000000000000000000000000000 0 main/source/Sources.lz
+ 00000000000000000000000000000000 0 firmware/binary-abacus/Packages.lz
+ 00000000000000000000000000000000 0 firmware/binary-coal/Packages.lz
+ 00000000000000000000000000000000 0 firmware/source/Sources.lz
+EOF
+
+testrun - --lunzip=NONE update boring 3<<EOF
+stderr
+*=./conf/updates:5:124: Ignoring all but first 18 entries...
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+*=Error: './lists/a_suitename_InRelease' only lists unusable compressions of 'main/binary-abacus/Packages'.
+*=Try e.g the '--lunzip' option (or check what it is set to) to make more useable.
+-v0*=There have been errors!
+returns 255
+EOF
+
+sed -e 's/\.lz\>/.lzma/' -i conf/updates
+
+cat > source1/dists/suitename/InRelease <<EOF
+Codename: hohoho
+Suite: suitename
+Architectures: coal abacus
+MD5Sum:
+ $(cd source1 ; md5releaseline suitename main/binary-abacus/Packages)
+ $(cd source1 ; md5releaseline suitename main/binary-coal/Packages)
+ $(cd source1 ; md5releaseline suitename main/source/Sources)
+ $(cd source1 ; md5releaseline suitename firmware/binary-abacus/Packages)
+ $(cd source1 ; md5releaseline suitename firmware/binary-coal/Packages)
+ $(cd source1 ; md5releaseline suitename firmware/source/Sources)
+EOF
+
+testrun - update boring 3<<EOF
+stderr
+*=./conf/updates:5:126: Ignoring all but first 18 entries...
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+*=Error: './lists/a_suitename_InRelease' only lists unrequested compressions of 'main/binary-abacus/Packages'.
+*=Try changing your DownloadListsAs to request e.g. '.'.
+-v0*=There have been errors!
+returns 255
+EOF
+
+ed -s conf/updates <<EOF
+g/.lzma .lzma .lzma .lzma/d
+w
+q
+EOF
+
+testrun - update boring 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/Sources'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/main/source/Sources'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/main/binary-abacus/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/main/binary-coal/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-abacus/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-coal/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/Sources'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/main/source/Sources'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/main/binary-abacus/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/main/binary-coal/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-abacus/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-coal/Packages'
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'boring|firmware|source'
+# 6 times:
+-v5*= marking everything to be deleted
+-v3*= processing updates for 'boring|firmware|coal'
+-v5*= reading './lists/a_suitename_firmware_coal_Packages'
+-v3*= processing updates for 'boring|firmware|abacus'
+-v5*= reading './lists/a_suitename_firmware_abacus_Packages'
+-v3*= processing updates for 'boring|main|source'
+-v5*= reading './lists/b_x_Sources'
+-v5*= reading './lists/a_suitename_main_Sources'
+-v3*= processing updates for 'boring|main|coal'
+-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_coal_Packages'
+-v3*= processing updates for 'boring|main|abacus'
+#-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_abacus_Packages'
+-v0*=Getting packages...
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source2/./cc-addons_1-1000_all.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/./dd-addons_2-0_all.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/./cc_1-1000_abacus.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/./dd_2-0_abacus.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/./cc_1-1000.tar.gz'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/./cc_1-1000.dsc'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/./cc-addons_1-1000_all.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/./dd-addons_2-0_all.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/./cc_1-1000_abacus.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/./dd_2-0_abacus.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/./cc_1-1000.tar.gz'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/./cc_1-1000.dsc'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa-addons_1-1000_all.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/firmware/bb-addons_2-0_all.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa_1-1000_abacus.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/firmware/bb_2-0_abacus.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa_1-1000.tar.gz'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa_1-1000.dsc'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa-addons_1-1000_all.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/firmware/bb-addons_2-0_all.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa_1-1000_abacus.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/firmware/bb_2-0_abacus.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa_1-1000.tar.gz'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa_1-1000.dsc'
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/firmware"
+-v2*=Created directory "./pool/firmware/b"
+-v2*=Created directory "./pool/firmware/b/bb"
+-v2*=Created directory "./pool/firmware/d"
+-v2*=Created directory "./pool/firmware/d/dd"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/c"
+-v2*=Created directory "./pool/main/c/cc"
+-v2*=Created directory "./pool/main/a"
+-v2*=Created directory "./pool/main/a/aa"
+$(ofa 'pool/firmware/d/dd/dd-addons_2-0_all.deb')
+$(ofa 'pool/firmware/d/dd/dd_2-0_abacus.deb')
+$(ofa 'pool/main/c/cc/cc-addons_1-1000_all.deb')
+$(ofa 'pool/main/c/cc/cc_1-1000_abacus.deb')
+$(ofa 'pool/main/c/cc/cc_1-1000.dsc')
+$(ofa 'pool/main/c/cc/cc_1-1000.tar.gz')
+$(ofa 'pool/firmware/b/bb/bb-addons_2-0_all.deb')
+$(ofa 'pool/firmware/b/bb/bb_2-0_abacus.deb')
+$(ofa 'pool/main/a/aa/aa-addons_1-1000_all.deb')
+$(ofa 'pool/main/a/aa/aa_1-1000_abacus.deb')
+$(ofa 'pool/main/a/aa/aa_1-1000.dsc')
+$(ofa 'pool/main/a/aa/aa_1-1000.tar.gz')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'dd-addons' x 'boring' 'firmware' 'coal' 'deb')
+$(opa 'dd' x 'boring' 'firmware' 'abacus' 'deb')
+$(opa 'dd-addons' x 'boring' 'firmware' 'abacus' 'deb')
+$(opa 'cc-addons' x 'boring' 'main' 'coal' 'deb')
+$(opa 'cc' x 'boring' 'main' 'source' 'dsc')
+$(opa 'cc' x 'boring' 'main' 'abacus' 'deb')
+$(opa 'cc-addons' x 'boring' 'main' 'abacus' 'deb')
+$(opa 'bb-addons' x 'boring' 'firmware' 'coal' 'deb')
+$(opa 'bb' x 'boring' 'firmware' 'abacus' 'deb')
+$(opa 'bb-addons' x 'boring' 'firmware' 'abacus' 'deb')
+$(opa 'aa-addons' x 'boring' 'main' 'coal' 'deb')
+$(opa 'aa' x 'boring' 'main' 'source' 'dsc')
+$(opa 'aa' x 'boring' 'main' 'abacus' 'deb')
+$(opa 'aa-addons' x 'boring' 'main' 'abacus' 'deb')
+EOF
+
+DISTRI=dummy PACKAGE=aa EPOCH="" VERSION=2 REVISION=-1 SECTION="base" genpackage.sh -sa
+DISTRI=dummy PACKAGE=bb EPOCH="" VERSION=1 REVISION=-1 SECTION="firmware/base" genpackage.sh -sa
+DISTRI=dummy PACKAGE=ee EPOCH="" VERSION=2 REVISION=-1 SECTION="firmware/base" genpackage.sh -sa
+
+rm source1/pool/firmware/bb*
+mv aa* source1/pool/main
+mv ee* bb* source1/pool/firmware
+
+cd source1
+dpkg-scansources pool/main /dev/null > dists/suitename/main/source/Sources
+dpkg-scanpackages pool/main /dev/null > dists/suitename/main/binary-abacus/Packages
+dpkg-scanpackages -a coal pool/main /dev/null > dists/suitename/main/binary-coal/Packages
+dpkg-scansources pool/firmware /dev/null > dists/suitename/firmware/source/Sources
+dpkg-scanpackages pool/firmware /dev/null > dists/suitename/firmware/binary-abacus/Packages
+dpkg-scanpackages -a coal pool/firmware /dev/null > dists/suitename/firmware/binary-coal/Packages
+cd ..
+
+cat > source1/dists/suitename/InRelease <<EOF
+Codename: hohoho
+Suite: suitename
+Architectures: coal abacus
+MD5Sum:
+ $(cd source1 ; md5releaseline suitename main/binary-abacus/Packages)
+ $(cd source1 ; md5releaseline suitename main/binary-coal/Packages)
+ $(cd source1 ; md5releaseline suitename main/source/Sources)
+ $(cd source1 ; md5releaseline suitename firmware/binary-abacus/Packages)
+ $(cd source1 ; md5releaseline suitename firmware/binary-coal/Packages)
+ $(cd source1 ; md5releaseline suitename firmware/source/Sources)
+EOF
+
+sed -e 's/Update: - 1/Update: 1/' -i conf/distributions
+ed -s conf/updates <<EOF
+1a
+FilterList: upgradeonly
+.
+w
+q
+EOF
+
+testrun - --keepunreferenced update boring 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/main/source/Sources'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/main/binary-abacus/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/main/binary-coal/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-abacus/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-coal/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/main/source/Sources'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/main/binary-abacus/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/main/binary-coal/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-abacus/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/firmware/binary-coal/Packages'
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for 'boring|firmware|source'
+-v3*= processing updates for 'boring|firmware|coal'
+-v5*= reading './lists/a_suitename_firmware_coal_Packages'
+-v3*= processing updates for 'boring|firmware|abacus'
+-v5*= reading './lists/a_suitename_firmware_abacus_Packages'
+-v3*= processing updates for 'boring|main|source'
+-v5*= reading './lists/b_x_Sources'
+-v5*= reading './lists/a_suitename_main_Sources'
+-v3*= processing updates for 'boring|main|coal'
+-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_coal_Packages'
+-v3*= processing updates for 'boring|main|abacus'
+#-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_abacus_Packages'
+-v0*=Getting packages...
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa-addons_2-1_all.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa_2-1_abacus.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa_2-1.tar.gz'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/main/aa_2-1.dsc'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa-addons_2-1_all.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa_2-1_abacus.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa_2-1.tar.gz'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/main/aa_2-1.dsc'
+stdout
+$(ofa 'pool/main/a/aa/aa-addons_2-1_all.deb')
+$(ofa 'pool/main/a/aa/aa_2-1_abacus.deb')
+$(ofa 'pool/main/a/aa/aa_2-1.dsc')
+$(ofa 'pool/main/a/aa/aa_2-1.tar.gz')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opu 'aa-addons' x x 'boring' 'main' 'coal' 'deb')
+$(opu 'aa' x x 'boring' 'main' 'source' 'dsc')
+$(opu 'aa' x x 'boring' 'main' 'abacus' 'deb')
+$(opu 'aa-addons' x x 'boring' 'main' 'abacus' 'deb')
+-v1*=4 files lost their last reference.
+-v1*=(dumpunreferenced lists such files, use deleteunreferenced to delete them.)
+EOF
+
+#remove upgradeonly again, letting ee in
+ed -s conf/updates <<EOF
+%g/FilterList: upgradeonly/d
+w
+q
+EOF
+
+testrun - --keepunreferenced update boring 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Nothing to do found. (Use --noskipold to force processing)
+EOF
+
+testrun - --nolistsdownload --keepunreferenced update boring 3<<EOF
+stderr
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Nothing to do found. (Use --noskipold to force processing)
+EOF
+
+testrun - --nolistsdownload --noskipold --keepunreferenced update boring 3<<EOF
+stderr
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Calculating packages to get...
+-v4*= nothing to do for 'boring|firmware|source'
+-v3*= processing updates for 'boring|firmware|coal'
+-v5*= reading './lists/a_suitename_firmware_coal_Packages'
+-v3*= processing updates for 'boring|firmware|abacus'
+-v5*= reading './lists/a_suitename_firmware_abacus_Packages'
+-v3*= processing updates for 'boring|main|source'
+-v5*= reading './lists/b_x_Sources'
+-v5*= reading './lists/a_suitename_main_Sources'
+-v3*= processing updates for 'boring|main|coal'
+-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_coal_Packages'
+-v3*= processing updates for 'boring|main|abacus'
+#-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_abacus_Packages'
+-v0*=Getting packages...
+-v2*=Created directory "./pool/firmware/e"
+-v2*=Created directory "./pool/firmware/e/ee"
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/firmware/ee-addons_2-1_all.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/firmware/ee_2-1_abacus.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/firmware/ee-addons_2-1_all.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/firmware/ee_2-1_abacus.deb'
+stdout
+$(ofa 'pool/firmware/e/ee/ee-addons_2-1_all.deb')
+$(ofa 'pool/firmware/e/ee/ee_2-1_abacus.deb')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'ee-addons' x 'boring' 'firmware' 'coal' 'deb')
+$(opa 'ee' x 'boring' 'firmware' 'abacus' 'deb')
+$(opa 'ee-addons' x 'boring' 'firmware' 'abacus' 'deb')
+EOF
+
+# reinsert delete rule, this should cause a downgrade of bb
+sed -e 's/Update: 1/Update: - 1/' -i conf/distributions
+
+# changes to the clean rules causes automatic reprocessing, so new noskipold needed here
+
+testrun - --keepunreferenced update boring 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/dists/suitename/InRelease'
+*=WARNING: No signature found in ./lists/a_suitename_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'boring|firmware|source'
+# 6 times:
+-v5*= marking everything to be deleted
+-v3*= processing updates for 'boring|firmware|coal'
+-v5*= reading './lists/a_suitename_firmware_coal_Packages'
+-v3*= processing updates for 'boring|firmware|abacus'
+-v5*= reading './lists/a_suitename_firmware_abacus_Packages'
+-v3*= processing updates for 'boring|main|source'
+-v5*= reading './lists/b_x_Sources'
+-v5*= reading './lists/a_suitename_main_Sources'
+-v3*= processing updates for 'boring|main|coal'
+-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_coal_Packages'
+-v3*= processing updates for 'boring|main|abacus'
+#-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/a_suitename_main_abacus_Packages'
+-v0*=Getting packages...
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/firmware/bb-addons_1-1_all.deb'
+-v6*=aptmethod start 'copy:$WORKDIR/source1/pool/firmware/bb_1-1_abacus.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/firmware/bb-addons_1-1_all.deb'
+-v1*=aptmethod got 'copy:$WORKDIR/source1/pool/firmware/bb_1-1_abacus.deb'
+*=Warning: downgrading 'bb-addons' from '2-0' to '1-1' in 'boring|firmware|coal'!
+*=Warning: downgrading 'bb' from '2-0' to '1-1' in 'boring|firmware|abacus'!
+*=Warning: downgrading 'bb-addons' from '2-0' to '1-1' in 'boring|firmware|abacus'!
+stdout
+$(ofa 'pool/firmware/b/bb/bb-addons_1-1_all.deb')
+$(ofa 'pool/firmware/b/bb/bb_1-1_abacus.deb')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opu 'bb-addons' x x 'boring' 'firmware' 'coal' 'deb')
+$(opu 'bb' x x 'boring' 'firmware' 'abacus' 'deb')
+$(opu 'bb-addons' x x 'boring' 'firmware' 'abacus' 'deb')
+stdout
+-v1*=2 files lost their last reference.
+-v1*=(dumpunreferenced lists such files, use deleteunreferenced to delete them.)
+EOF
+
+#Now it gets evil! Name flat and non-flat the same
+dodo sed -i -e 's/suitename/x/' source1/dists/suitename/InRelease
+mv source1/dists/suitename source1/dists/x
+mv source1/dists source2/dists
+dodo sed -i -e 's/suitename/x/' -e 's/^From: a$/From: b/' -e 's/Flat: dummycomponent/#&/' conf/updates
+
+testrun - update boring 3<<EOF
+stderr
+-v0*=Warning: From the same remote repository 'copy:${WORKDIR}/source2', distribution 'x'
+-v0*=is requested both flat and non-flat. While this is possible
+-v0*=(having copy:${WORKDIR}/source2/dists/x and copy:${WORKDIR}/source2/x), it is unlikely.
+-v0*=To no longer see this message, use --ignore=flatandnonflat.
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/dists/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/dists/x/main/source/Sources'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/dists/x/main/binary-abacus/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/dists/x/main/binary-coal/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/dists/x/firmware/binary-abacus/Packages'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/dists/x/firmware/binary-coal/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/dists/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/dists/x/main/source/Sources'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/dists/x/main/binary-abacus/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/dists/x/main/binary-coal/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/dists/x/firmware/binary-abacus/Packages'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/dists/x/firmware/binary-coal/Packages'
+*=WARNING: No signature found in ./lists/b_x_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Calculating packages to get...
+-v0*= nothing new for 'boring|firmware|source' (use --noskipold to process anyway)
+-v3*= processing updates for 'boring|firmware|coal'
+# 5 times:
+-v5*= marking everything to be deleted
+-v5*= reading './lists/b_x_firmware_coal_Packages'
+-v3*= processing updates for 'boring|firmware|abacus'
+-v5*= reading './lists/b_x_firmware_abacus_Packages'
+-v3*= processing updates for 'boring|main|source'
+-v5*= reading './lists/b_x_Sources'
+-v5*= reading './lists/b_x_main_Sources'
+-v3*= processing updates for 'boring|main|coal'
+-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/b_x_main_coal_Packages'
+-v3*= processing updates for 'boring|main|abacus'
+#-v5*= reading './lists/b_x_Packages'
+-v5*= reading './lists/b_x_main_abacus_Packages'
+stderr
+EOF
+
+testrun - --ignore=flatandnonflat update boring 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/source2/x/InRelease'
+-v6*=aptmethod start 'copy:$WORKDIR/source2/dists/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/x/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/source2/dists/x/InRelease'
+*=WARNING: No signature found in ./lists/b_x_InRelease, assuming it is unsigned!
+*=WARNING: No signature found in ./lists/b_x_flat_InRelease, assuming it is unsigned!
+stdout
+-v0*=Nothing to do found. (Use --noskipold to force processing)
+stderr
+EOF
+
+cat > results.expected <<EOF
+add boring deb firmware coal bb-addons 2-0 -- pool/firmware/b/bb/bb-addons_2-0_all.deb
+add boring deb firmware coal dd-addons 2-0 -- pool/firmware/d/dd/dd-addons_2-0_all.deb
+add boring deb firmware abacus bb 2-0 -- pool/firmware/b/bb/bb_2-0_abacus.deb
+add boring deb firmware abacus bb-addons 2-0 -- pool/firmware/b/bb/bb-addons_2-0_all.deb
+add boring deb firmware abacus dd 2-0 -- pool/firmware/d/dd/dd_2-0_abacus.deb
+add boring deb firmware abacus dd-addons 2-0 -- pool/firmware/d/dd/dd-addons_2-0_all.deb
+add boring dsc main source aa 1-1000 -- pool/main/a/aa/aa_1-1000.dsc pool/main/a/aa/aa_1-1000.tar.gz
+add boring dsc main source cc 1-1000 -- pool/main/c/cc/cc_1-1000.dsc pool/main/c/cc/cc_1-1000.tar.gz
+add boring deb main coal aa-addons 1-1000 -- pool/main/a/aa/aa-addons_1-1000_all.deb
+add boring deb main coal cc-addons 1-1000 -- pool/main/c/cc/cc-addons_1-1000_all.deb
+add boring deb main abacus aa 1-1000 -- pool/main/a/aa/aa_1-1000_abacus.deb
+add boring deb main abacus aa-addons 1-1000 -- pool/main/a/aa/aa-addons_1-1000_all.deb
+add boring deb main abacus cc 1-1000 -- pool/main/c/cc/cc_1-1000_abacus.deb
+add boring deb main abacus cc-addons 1-1000 -- pool/main/c/cc/cc-addons_1-1000_all.deb
+replace boring dsc main source aa 2-1 1-1000 -- pool/main/a/aa/aa_2-1.dsc pool/main/a/aa/aa_2-1.tar.gz -- pool/main/a/aa/aa_1-1000.dsc pool/main/a/aa/aa_1-1000.tar.gz
+replace boring deb main coal aa-addons 2-1 1-1000 -- pool/main/a/aa/aa-addons_2-1_all.deb -- pool/main/a/aa/aa-addons_1-1000_all.deb
+replace boring deb main abacus aa 2-1 1-1000 -- pool/main/a/aa/aa_2-1_abacus.deb -- pool/main/a/aa/aa_1-1000_abacus.deb
+replace boring deb main abacus aa-addons 2-1 1-1000 -- pool/main/a/aa/aa-addons_2-1_all.deb -- pool/main/a/aa/aa-addons_1-1000_all.deb
+add boring deb firmware coal ee-addons 2-1 -- pool/firmware/e/ee/ee-addons_2-1_all.deb
+add boring deb firmware abacus ee 2-1 -- pool/firmware/e/ee/ee_2-1_abacus.deb
+add boring deb firmware abacus ee-addons 2-1 -- pool/firmware/e/ee/ee-addons_2-1_all.deb
+replace boring deb firmware coal bb-addons 1-1 2-0 -- pool/firmware/b/bb/bb-addons_1-1_all.deb -- pool/firmware/b/bb/bb-addons_2-0_all.deb
+replace boring deb firmware abacus bb 1-1 2-0 -- pool/firmware/b/bb/bb_1-1_abacus.deb -- pool/firmware/b/bb/bb_2-0_abacus.deb
+replace boring deb firmware abacus bb-addons 1-1 2-0 -- pool/firmware/b/bb/bb-addons_1-1_all.deb -- pool/firmware/b/bb/bb-addons_2-0_all.deb
+EOF
+
+dodo test ! -f shouldnothappen
+dodiff results.expected updatelog
+rm updatelog results.expected
+rm -r -f db conf dists pool lists source1 source2 test.changes
+testsuccess
diff --git a/tests/listcodenames.test b/tests/listcodenames.test
new file mode 100644
index 0000000..90b08a2
--- /dev/null
+++ b/tests/listcodenames.test
@@ -0,0 +1,41 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+testrun - -b . _listcodenames 3<<EOF
+return 254
+stderr
+*=Error opening config file './conf/distributions': No such file or directory(2)
+-v0*=There have been errors!
+stdout
+EOF
+mkdir -p conf
+touch conf/distributions
+testrun - -b . _listcodenames 3<<EOF
+return 249
+stderr
+*=No distribution definitions found in ./conf/distributions!
+-v0*=There have been errors!
+stdout
+EOF
+cat > conf/distributions <<EOF
+Codename: foo/updates
+Suite: suitename
+Components: a bb ccc dddd
+UDebComponents: a dddd
+Architectures: x source
+EOF
+testrun - -b . _listcodenames 3<<EOF
+stderr
+stdout
+*=foo/updates
+EOF
+testrun - -b . --nothingiserror _listcodenames 3<<EOF
+stderr
+stdout
+*=foo/updates
+EOF
+dodo test ! -d db
+dodo test ! -d pool
+dodo test ! -d dists
+rm -r -f conf
+testsuccess
diff --git a/tests/morgue.test b/tests/morgue.test
new file mode 100644
index 0000000..a302ec2
--- /dev/null
+++ b/tests/morgue.test
@@ -0,0 +1,276 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf
+cat > conf/options <<EOF
+morguedir ./morgue
+export silent-never
+EOF
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: source
+Components: main
+EOF
+cat > fake.dsc <<EOF
+Format: 1.0
+Source: bla
+Binary: bla
+Architecture: all
+Section: whatever
+Priority: important
+Version: 1.7
+Maintainer: nobody <nobody@localhost>
+Files:
+EOF
+
+
+testrun - -C main includedsc test fake.dsc 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/b"
+-v2*=Created directory "./pool/main/b/bla"
+$(ofa 'pool/main/b/bla/bla_1.7.dsc')
+$(opa 'bla' unset 'test' 'main' 'source' 'dsc')
+EOF
+
+testrun - remove test bla 3<<EOF
+stdout
+$(opd 'bla' unset test main source dsc)
+-v0*=Deleting files no longer referenced...
+-v2*=Created directory "./morgue"
+-v2*=removed now empty directory ./pool/main/b/bla
+-v2*=removed now empty directory ./pool/main/b
+-v2*=removed now empty directory ./pool/main
+-v2*=removed now empty directory ./pool
+$(ofd 'pool/main/b/bla/bla_1.7.dsc')
+EOF
+
+ls -la morgue
+dodo test -f morgue/bla_1.7.dsc
+dodo test ! -e pool
+
+rm -r morgue
+# test what happens if one cannot write there:
+mkdir morgue
+chmod a-w morgue
+
+testrun - -C main includedsc test fake.dsc 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/b"
+-v2*=Created directory "./pool/main/b/bla"
+$(ofa 'pool/main/b/bla/bla_1.7.dsc')
+$(opa 'bla' unset 'test' 'main' 'source' 'dsc')
+EOF
+
+testrun - remove test bla 3<<EOF
+stdout
+$(opd 'bla' unset test main source dsc)
+-v0*=Deleting files no longer referenced...
+-v1*=deleting and forgetting pool/main/b/bla/bla_1.7.dsc
+stderr
+*=error 13 creating morgue-file ./morgue/bla_1.7.dsc: Permission denied
+-v0*=There have been errors!
+returns 243
+EOF
+
+find morgue -mindepth 1 | sort > results
+cat > results.expected <<EOF
+EOF
+dodiff results.expected results
+
+# if it could not be moved to the morgue, it should stay in the pool:
+testrun - dumpunreferenced 3<<EOF
+stdout
+*=pool/main/b/bla/bla_1.7.dsc
+EOF
+
+# and deleting it there of course fails again:
+testrun - deleteunreferenced 3<<EOF
+stdout
+-v1*=deleting and forgetting pool/main/b/bla/bla_1.7.dsc
+stderr
+*=error 13 creating morgue-file ./morgue/bla_1.7.dsc: Permission denied
+-v0*=There have been errors!
+returns 243
+EOF
+
+# if it could not be moved to the morgue, it should stay in the pool:
+testrun - dumpunreferenced 3<<EOF
+stdout
+*=pool/main/b/bla/bla_1.7.dsc
+EOF
+
+chmod u+w morgue
+
+# now it should work:
+testrun - deleteunreferenced 3<<EOF
+stdout
+-v2*=removed now empty directory ./pool/main/b/bla
+-v2*=removed now empty directory ./pool/main/b
+-v2*=removed now empty directory ./pool/main
+-v2*=removed now empty directory ./pool
+$(ofd 'pool/main/b/bla/bla_1.7.dsc')
+EOF
+find morgue -mindepth 1 | sort > results
+cat > results.expected <<EOF
+morgue/bla_1.7.dsc
+EOF
+dodiff results.expected results
+# and be gone:
+testrun empty dumpunreferenced
+
+
+
+ls -la morgue
+dodo test -f morgue/bla_1.7.dsc
+dodo test ! -e pool
+
+# Next test: what if the file is missing?
+
+testrun - -C main includedsc test fake.dsc 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/b"
+-v2*=Created directory "./pool/main/b/bla"
+$(ofa 'pool/main/b/bla/bla_1.7.dsc')
+$(opa 'bla' unset 'test' 'main' 'source' 'dsc')
+EOF
+
+dodo rm pool/main/b/bla/bla_1.7.dsc
+
+testrun - remove test bla 3<<EOF
+stdout
+$(opd 'bla' unset test main source dsc)
+-v0*=Deleting files no longer referenced...
+stderr
+*=./pool/main/b/bla/bla_1.7.dsc not found, forgetting anyway
+stdout
+$(ofd 'pool/main/b/bla/bla_1.7.dsc')
+EOF
+
+find morgue -mindepth 1 | sort > results
+cat > results.expected <<EOF
+morgue/bla_1.7.dsc
+EOF
+dodiff results.expected results
+
+# Next test: file cannot be moved
+
+testrun - -C main includedsc test fake.dsc 3<<EOF
+stdout
+$(ofa 'pool/main/b/bla/bla_1.7.dsc')
+$(opa 'bla' unset 'test' 'main' 'source' 'dsc')
+EOF
+
+dodo chmod a-w pool/main/b/bla
+
+testrun - remove test bla 3<<EOF
+stdout
+$(opd 'bla' unset test main source dsc)
+-v0*=Deleting files no longer referenced...
+-v1*=deleting and forgetting pool/main/b/bla/bla_1.7.dsc
+stderr
+*=error 13 while unlinking ./pool/main/b/bla/bla_1.7.dsc: Permission denied
+-v0*=There have been errors!
+returns 243
+EOF
+
+dodo chmod u+w pool/main/b/bla
+
+find morgue -mindepth 1 | sort > results
+cat > results.expected <<EOF
+morgue/bla_1.7.dsc
+EOF
+dodiff results.expected results
+testrun - dumpunreferenced 3<<EOF
+stdout
+*=pool/main/b/bla/bla_1.7.dsc
+EOF
+
+# now it should work:
+testrun - deleteunreferenced 3<<EOF
+stdout
+-v2*=removed now empty directory ./pool/main/b/bla
+-v2*=removed now empty directory ./pool/main/b
+-v2*=removed now empty directory ./pool/main
+-v2*=removed now empty directory ./pool
+$(ofd 'pool/main/b/bla/bla_1.7.dsc')
+EOF
+find morgue -mindepth 1 | sort > results
+cat > results.expected <<EOF
+morgue/bla_1.7.dsc
+morgue/bla_1.7.dsc-1
+EOF
+dodiff results.expected results
+# and be gone:
+testrun empty dumpunreferenced
+
+# Test symbolic link:
+testrun - -C main includedsc test fake.dsc 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/main"
+-v2*=Created directory "./pool/main/b"
+-v2*=Created directory "./pool/main/b/bla"
+$(ofa 'pool/main/b/bla/bla_1.7.dsc')
+$(opa 'bla' unset 'test' 'main' 'source' 'dsc')
+EOF
+
+dodo mv pool/main/b/bla/bla_1.7.dsc pool/main/b/bla/bla_1.7.dscc
+dodo ln -s bla_1.7.dscc pool/main/b/bla/bla_1.7.dsc
+
+testrun - remove test bla 3<<EOF
+stdout
+$(opd 'bla' unset test main source dsc)
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/b/bla/bla_1.7.dsc')
+EOF
+
+ls -l morgue
+find morgue -mindepth 1 | sort > results
+cat > results.expected <<EOF
+morgue/bla_1.7.dsc
+morgue/bla_1.7.dsc-1
+EOF
+dodiff results.expected results
+
+dodo mv pool/main/b/bla/bla_1.7.dscc pool/main/b/bla/bla_1.7.dsc
+testrun - _detect pool/main/b/bla/bla_1.7.dsc 3<<EOF
+stdout
+$(ofa 'pool/main/b/bla/bla_1.7.dsc')
+-v0*=1 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+
+dodo chmod a-r pool/main/b/bla/bla_1.7.dsc
+testrun - deleteunreferenced 3<<EOF
+stdout
+$(ofd 'pool/main/b/bla/bla_1.7.dsc')
+-v2*=removed now empty directory ./pool/main/b/bla
+-v2*=removed now empty directory ./pool/main/b
+-v2*=removed now empty directory ./pool/main
+-v2*=removed now empty directory ./pool
+EOF
+ls -l morgue
+find morgue -mindepth 1 | sort > results
+cat > results.expected <<EOF
+morgue/bla_1.7.dsc
+morgue/bla_1.7.dsc-1
+morgue/bla_1.7.dsc-2
+EOF
+dodiff results.expected results
+
+# TODO: is there a way to check if failing copying is handled correctly?
+# that needs a file not readable, not renameable to morgue, but can be unlinked...
+
+# TODO: check if things like a failed include work correctly
+# (they should only copy things to the morgue that were in the pool previously)
+
+dodo test ! -e pool
+rm -r db morgue fake.dsc conf results results.expected
+testsuccess
diff --git a/tests/multiversion.sh b/tests/multiversion.sh
new file mode 100755
index 0000000..a7b83ee
--- /dev/null
+++ b/tests/multiversion.sh
@@ -0,0 +1,353 @@
+#!/bin/sh
+set -u
+
+# Copyright (C) 2017, Benjamin Drung <benjamin.drung@profitbricks.com>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+. "${0%/*}/shunit2-helper-functions.sh"
+
+oneTimeSetUp() {
+ for revision in 1 2 2+deb8u1 10; do
+ mkdir -p "$PKGS"
+ (cd $PKGS && PACKAGE=hello SECTION=main DISTRI=buster EPOCH="" VERSION=2.9 REVISION=-$revision ../genpackage.sh)
+ done
+}
+
+setUp() {
+ create_repo
+ echo "Limit: -1" >> $REPO/conf/distributions
+}
+
+tearDown() {
+ check_db
+}
+
+four_hellos() {
+ for revision in 1 2 2+deb8u1 10; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-${revision}_${ARCH}.deb
+ done
+}
+
+test_ls() {
+ (cd $PKGS && PACKAGE=kvm SECTION=main DISTRI=buster VERSION=1.2.1 REVISION=-8 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=kvm SECTION=main DISTRI=buster VERSION=1.2.1 REVISION=-9 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=appdirs SECTION=main DISTRI=buster VERSION=1.3.0 REVISION=-1 ../genpackage.sh)
+ for package in hello_2.9-1 kvm_1.2.1-8 kvm_1.2.1-9 appdirs_1.3.0-1; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/${package}_${ARCH}.deb
+ done
+ assertEquals "\
+kvm | 1.2.1-9 | buster | $ARCH
+kvm | 1.2.1-8 | buster | $ARCH" "$($REPREPRO -b $REPO ls kvm)"
+ assertEquals "\
+buster|main|$ARCH: kvm 1.2.1-9
+buster|main|$ARCH: kvm 1.2.1-8" "$($REPREPRO -b $REPO list buster kvm)"
+}
+
+test_sorting() {
+ four_hellos
+ assertEquals "\
+buster|main|$ARCH: hello 2.9-10
+buster|main|$ARCH: hello 2.9-2+deb8u1
+buster|main|$ARCH: hello 2.9-2
+buster|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ assertEquals "\
+hello | 2.9-10 | buster | $ARCH
+hello | 2.9-2+deb8u1 | buster | $ARCH
+hello | 2.9-2 | buster | $ARCH
+hello | 2.9-1 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+}
+
+test_include_twice() {
+ for revision in 1 2; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-${revision}_${ARCH}.deb
+ done
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-1_${ARCH}.deb
+ assertEquals "\
+hello | 2.9-2 | buster | $ARCH
+hello | 2.9-1 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+}
+
+test_copy_latest() {
+ four_hellos
+ add_distro bullseye "Limit: -1"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copy bullseye buster hello hello
+ assertEquals "bullseye|main|$ARCH: hello 2.9-10" "$($REPREPRO -b $REPO list bullseye)"
+}
+
+test_copy_specific() {
+ four_hellos
+ add_distro bullseye "Limit: -1"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copy bullseye buster hello=2.9-10 hello=2.9-1 hello=2.9-10
+ assertEquals "\
+bullseye|main|$ARCH: hello 2.9-10
+bullseye|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list bullseye)"
+}
+
+test_remove_latest() {
+ four_hellos
+ add_distro bullseye "Limit: -1"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copy bullseye buster hello=2.9-10 hello=2.9-1 hello=2.9-10
+ call $REPREPRO $VERBOSE_ARGS -b $REPO remove bullseye hello
+ assertEquals "\
+hello | 2.9-10 | buster | $ARCH
+hello | 2.9-2+deb8u1 | buster | $ARCH
+hello | 2.9-2 | buster | $ARCH
+hello | 2.9-1 | buster | $ARCH
+hello | 2.9-1 | bullseye | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+}
+
+test_remove_specific() {
+ four_hellos
+ call $REPREPRO $VERBOSE_ARGS -b $REPO remove buster hello=2.9-2+deb8u1 hellox hello=2.9-2+deb8u1
+ assertEquals "\
+buster|main|$ARCH: hello 2.9-10
+buster|main|$ARCH: hello 2.9-2
+buster|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_removefilter() {
+ (cd $PKGS && PACKAGE=kvm SECTION=main DISTRI=buster VERSION=1.2.1 REVISION=-8 ../genpackage.sh)
+ (cd $PKGS && PACKAGE=kvm SECTION=main DISTRI=buster VERSION=1.2.1 REVISION=-9 ../genpackage.sh)
+ for package in hello_2.9-1 kvm_1.2.1-8 kvm_1.2.1-9; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/${package}_${ARCH}.deb
+ done
+ assertEquals "\
+buster|main|$ARCH: hello 2.9-1
+buster|main|$ARCH: kvm 1.2.1-9
+buster|main|$ARCH: kvm 1.2.1-8" "$($REPREPRO -b $REPO list buster)"
+
+ add_distro bullseye "Limit: -1"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO copy bullseye buster kvm
+ assertEquals "bullseye|main|$ARCH: kvm 1.2.1-9" "$($REPREPRO -b $REPO list bullseye)"
+
+ call $REPREPRO $VERBOSE_ARGS -b $REPO removefilter buster "Package (= kvm)"
+ assertEquals "buster|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ assertTrue "kvm_1.2.1-8_$ARCH.deb is still in the pool!" "test ! -e $REPO/pool/main/k/kvm/kvm_1.2.1-8_$ARCH.deb"
+ assertTrue "kvm_1.2.1-9_$ARCH.deb is missing from the pool!" "test -e $REPO/pool/main/k/kvm/kvm_1.2.1-9_$ARCH.deb"
+
+ call $REPREPRO $VERBOSE_ARGS -b $REPO removefilter bullseye "Package (= kvm)"
+ assertEquals "buster|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ assertTrue "kvm_1.2.1-9_$ARCH.deb is still in the pool!" "test ! -e $REPO/pool/main/k/kvm/kvm_1.2.1-9_$ARCH.deb"
+}
+
+test_readd_distribution() {
+ # Test case for https://github.com/profitbricks/reprepro/issues/1
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-1_${ARCH}.deb
+
+ # Add distribution
+ cp $REPO/conf/distributions $REPO/conf/distributions.backup
+ add_distro bullseye "Limit: -1"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb bullseye $PKGS/hello_2.9-2_${ARCH}.deb
+
+ # Remove distribution
+ mv $REPO/conf/distributions.backup $REPO/conf/distributions
+ call $REPREPRO $VERBOSE_ARGS -b $REPO --delete clearvanished
+
+ # Re-add distribution again
+ echo "I: Re-adding bullseye..."
+ add_distro bullseye "Limit: -1"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb bullseye $PKGS/hello_2.9-10_${ARCH}.deb
+ assertEquals "bullseye|main|$ARCH: hello 2.9-10" "$($REPREPRO -b $REPO list bullseye)"
+}
+
+test_limit3() {
+ sed -i 's/^Limit: .*$/Limit: 3/' $REPO/conf/distributions
+ for revision in 1 2 2+deb8u1; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-${revision}_${ARCH}.deb
+ done
+ assertEquals "\
+buster|main|${ARCH}: hello 2.9-2+deb8u1
+buster|main|${ARCH}: hello 2.9-2
+buster|main|${ARCH}: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-10_${ARCH}.deb
+ assertEquals "\
+buster|main|${ARCH}: hello 2.9-10
+buster|main|${ARCH}: hello 2.9-2+deb8u1
+buster|main|${ARCH}: hello 2.9-2" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_reduce_limit() {
+ for revision in 1 2 2+deb8u1; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-${revision}_${ARCH}.deb
+ done
+ assertEquals "\
+buster|main|${ARCH}: hello 2.9-2+deb8u1
+buster|main|${ARCH}: hello 2.9-2
+buster|main|${ARCH}: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ sed -i 's/^Limit: .*$/Limit: 1/' $REPO/conf/distributions
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-10_${ARCH}.deb
+ assertEquals "buster|main|${ARCH}: hello 2.9-10" "$($REPREPRO -b $REPO list buster)"
+ assertEquals "\
+Distribution: buster
+Source: hello
+Version: 2.9-10
+Files:
+ pool/main/h/hello/hello_2.9-10_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+}
+
+test_reduce_limit_archive() {
+ clear_distro
+ add_distro buster-archive "Limit: 7"
+ add_distro buster "Limit: -1\nArchive: buster-archive"
+ for revision in 1 2; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-${revision}_${ARCH}.deb
+ done
+ assertEquals "\
+buster|main|${ARCH}: hello 2.9-2
+buster|main|${ARCH}: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ sed -i 's/^Limit: -1$/Limit: 1/' $REPO/conf/distributions
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-10_${ARCH}.deb
+ assertEquals "\
+hello | 2.9-2 | buster-archive | $ARCH
+hello | 2.9-1 | buster-archive | $ARCH
+hello | 2.9-10 | buster | $ARCH" "$($REPREPRO -b $REPO ls hello)"
+ assertEquals "\
+Distribution: buster-archive
+Source: hello
+Version: 2.9-1
+Files:
+ pool/main/h/hello/hello_2.9-1_$ARCH.deb b 1
+
+Distribution: buster-archive
+Source: hello
+Version: 2.9-2
+Files:
+ pool/main/h/hello/hello_2.9-2_$ARCH.deb b 1
+
+Distribution: buster
+Source: hello
+Version: 2.9-10
+Files:
+ pool/main/h/hello/hello_2.9-10_$ARCH.deb b 1" "$($REPREPRO -b $REPO dumptracks)"
+}
+
+test_limit_old() {
+ for revision in 1 2 10; do
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-${revision}_${ARCH}.deb
+ done
+ assertEquals "\
+buster|main|${ARCH}: hello 2.9-10
+buster|main|${ARCH}: hello 2.9-2
+buster|main|${ARCH}: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ sed -i 's/^Limit: .*$/Limit: 2/' $REPO/conf/distributions
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb buster $PKGS/hello_2.9-2+deb8u1_${ARCH}.deb
+ assertEquals "\
+buster|main|${ARCH}: hello 2.9-10
+buster|main|${ARCH}: hello 2.9-2+deb8u1" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_update_packages() {
+ # Test case for https://github.com/profitbricks/reprepro/issues/6
+ local upstream_repo
+ upstream_repo="${0%/*}/upstreamrepo"
+
+ four_hellos
+ rm -rf "$upstream_repo"
+ mv "$REPO" "$upstream_repo"
+
+ mkdir -p "$REPO/conf"
+ cat > "$REPO/conf/distributions" <<EOF
+Origin: Icinga2
+Label: Icinga2
+Suite: icinga-stretch
+Codename: icinga-stretch
+Description: Icinga2 packages for Debian Stretch
+Architectures: $ARCH
+Components: main
+Update: icinga-stretch
+Log: icinga2.log
+Limit: -1
+EOF
+ cat > "$REPO/conf/updates" <<EOF
+Name: icinga-stretch
+Method: file://$(readlink -f $upstream_repo)
+Suite: buster
+Components: main
+Architectures: $ARCH
+VerifyRelease: blindtrust
+GetInRelease: no
+EOF
+ call $REPREPRO $VERBOSE_ARGS -b $REPO --noskipold update
+ assertEquals "icinga-stretch|main|$ARCH: hello 2.9-10" "$($REPREPRO -b $REPO list icinga-stretch)"
+
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedeb icinga-stretch $PKGS/hello_2.9-2_${ARCH}.deb
+ call $REPREPRO $VERBOSE_ARGS -b $REPO --noskipold update
+ assertEquals "\
+icinga-stretch|main|$ARCH: hello 2.9-10
+icinga-stretch|main|$ARCH: hello 2.9-2" "$($REPREPRO -b $REPO list icinga-stretch)"
+}
+
+test_includedsc_sources() {
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedsc buster $PKGS/hello_2.9-1.dsc
+ call $REPREPRO $VERBOSE_ARGS -b $REPO -C main includedsc buster $PKGS/hello_2.9-2.dsc
+ assertEquals "\
+buster|main|source: hello 2.9-2
+buster|main|source: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+
+ call $REPREPRO $VERBOSE_ARGS -b $REPO removesrc buster hello 2.9-1
+ assertEquals "buster|main|source: hello 2.9-2" "$($REPREPRO -b $REPO list buster)"
+}
+
+test_database_upgrade() {
+ # Test case for https://github.com/profitbricks/reprepro/issues/8
+ rm -rf "$REPO"
+ cp -r "${0%/*}/old-database" "$REPO"
+ call $REPREPRO $VERBOSE_ARGS -b $REPO export
+ assertEquals "\
+bullseye|main|amd64
+bullseye|main|i386
+bullseye|main|source
+bullseye|non-free|amd64
+bullseye|non-free|i386
+bullseye|non-free|source" "$(db_dump "$REPO/db/packages.db" | sed -n 's/^database=//p')"
+}
+
+test_move_specific() {
+ four_hellos
+ add_distro bullseye
+ $REPREPRO -b $REPO export bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO move bullseye buster hello=2.9-2
+ assertEquals "\
+buster|main|$ARCH: hello 2.9-10
+buster|main|$ARCH: hello 2.9-2+deb8u1
+buster|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ assertEquals "bullseye|main|$ARCH: hello 2.9-2" "$($REPREPRO -b $REPO list bullseye)"
+}
+
+test_movesrc_specific() {
+ four_hellos
+ add_distro bullseye
+ $REPREPRO -b $REPO export bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO movesrc bullseye buster hello 2.9-2
+ assertEquals "\
+buster|main|$ARCH: hello 2.9-10
+buster|main|$ARCH: hello 2.9-2+deb8u1
+buster|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ assertEquals "bullseye|main|$ARCH: hello 2.9-2" "$($REPREPRO -b $REPO list bullseye)"
+}
+
+test_movefilter_specific() {
+ four_hellos
+ add_distro bullseye "Limit: -1"
+ $REPREPRO -b $REPO export bullseye
+ call $REPREPRO $VERBOSE_ARGS -b $REPO movefilter bullseye buster 'Package (= hello), $Version (>> 2.9-2)'
+ assertEquals "\
+buster|main|$ARCH: hello 2.9-2
+buster|main|$ARCH: hello 2.9-1" "$($REPREPRO -b $REPO list buster)"
+ assertEquals "\
+bullseye|main|$ARCH: hello 2.9-10
+bullseye|main|$ARCH: hello 2.9-2+deb8u1" "$($REPREPRO -b $REPO list bullseye)"
+}
+
+. shunit2
diff --git a/tests/old-database/conf/distributions b/tests/old-database/conf/distributions
new file mode 100644
index 0000000..80ba858
--- /dev/null
+++ b/tests/old-database/conf/distributions
@@ -0,0 +1,5 @@
+Codename: bullseye
+Architectures: amd64 i386 source
+Components: main non-free
+Log: testrepo.log
+Tracking: all
diff --git a/tests/old-database/db/checksums.db b/tests/old-database/db/checksums.db
new file mode 100644
index 0000000..842fdd1
--- /dev/null
+++ b/tests/old-database/db/checksums.db
Binary files differ
diff --git a/tests/old-database/db/contents.cache.db b/tests/old-database/db/contents.cache.db
new file mode 100644
index 0000000..646a2fd
--- /dev/null
+++ b/tests/old-database/db/contents.cache.db
Binary files differ
diff --git a/tests/old-database/db/packages.db b/tests/old-database/db/packages.db
new file mode 100644
index 0000000..6a7d498
--- /dev/null
+++ b/tests/old-database/db/packages.db
Binary files differ
diff --git a/tests/old-database/db/references.db b/tests/old-database/db/references.db
new file mode 100644
index 0000000..7b8e4f8
--- /dev/null
+++ b/tests/old-database/db/references.db
Binary files differ
diff --git a/tests/old-database/db/release.caches.db b/tests/old-database/db/release.caches.db
new file mode 100644
index 0000000..7958f47
--- /dev/null
+++ b/tests/old-database/db/release.caches.db
Binary files differ
diff --git a/tests/old-database/db/version b/tests/old-database/db/version
new file mode 100644
index 0000000..50d2a22
--- /dev/null
+++ b/tests/old-database/db/version
@@ -0,0 +1,4 @@
+5.2.0
+3.3.0
+bdb5.3.28
+bdb5.3.0
diff --git a/tests/onlysmalldeletes.test b/tests/onlysmalldeletes.test
new file mode 100644
index 0000000..6042eeb
--- /dev/null
+++ b/tests/onlysmalldeletes.test
@@ -0,0 +1,142 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf
+cat >conf/distributions <<EOF
+Codename: test
+Architectures: abacus source
+Components: all
+
+Codename: copy
+Architectures: abacus source
+Components: all
+Pull: rule
+EOF
+touch conf/updates
+cat >conf/pulls <<EOF
+Name: rule
+From: test
+EOF
+cat >conf/incoming <<EOF
+Name: i
+Tempdir: tmp
+Incomingdir: i
+Default: test
+EOF
+cat >conf/options <<EOF
+onlysmalldeletes
+EOF
+
+mkdir i
+cd i
+for i in $(seq 1 40) ; do
+PACKAGE=a$i EPOCH="" VERSION=$i REVISION="" SECTION="many" genpackage.sh
+mv test.changes a$i.changes
+done
+cd ..
+
+cat > pi.rules <<EOF
+stdout
+$(odb)
+-v2*=Created directory "./tmp"
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/all"
+-v2*=Created directory "./pool/all/a"
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/test"
+-v2*=Created directory "./dists/test/all"
+-v2*=Created directory "./dists/test/all/binary-abacus"
+-v6*= looking for changes in 'test|all|abacus'...
+-v6*= creating './dists/test/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/test/all/source"
+-v6*= looking for changes in 'test|all|source'...
+-v6*= creating './dists/test/all/source/Sources' (gzipped)
+EOF
+
+for i in $(seq 1 40) ; do
+cat >>pi.rules <<EOF
+-v2*=Created directory "./pool/all/a/a$i"
+$(ofa "pool/all/a/a${i}/a${i}_${i}.dsc")
+$(ofa "pool/all/a/a${i}/a${i}_${i}.tar.gz")
+$(ofa "pool/all/a/a${i}/a${i}_${i}_abacus.deb")
+$(ofa "pool/all/a/a${i}/a${i}-addons_${i}_all.deb")
+$(opa "a${i}" unset 'test' 'all' 'source' 'dsc')
+$(opa "a${i}" x 'test' 'all' 'abacus' 'deb')
+$(opa "a${i}-addons" x 'test' 'all' 'abacus' 'deb')
+-v1*=deleting './i/a${i}.changes'...
+-v1*=deleting './i/a${i}_${i}.dsc'...
+-v1*=deleting './i/a${i}_${i}.tar.gz'...
+-v1*=deleting './i/a${i}_${i}_abacus.deb'...
+-v1*=deleting './i/a${i}-addons_${i}_all.deb'...
+EOF
+done
+
+testrun pi -b . processincoming i
+dodo rmdir i
+rm pi.rules
+
+cat >pull.rules <<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'copy|all|source'
+-v5*= looking what to get from 'test|all|source'
+-v3*= pulling into 'copy|all|abacus'
+-v5*= looking what to get from 'test|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/copy"
+-v2*=Created directory "./dists/copy/all"
+-v2*=Created directory "./dists/copy/all/binary-abacus"
+-v6*= looking for changes in 'copy|all|abacus'...
+-v6*= creating './dists/copy/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/copy/all/source"
+-v6*= looking for changes in 'copy|all|source'...
+-v6*= creating './dists/copy/all/source/Sources' (gzipped)
+EOF
+
+for i in $(seq 1 40) ; do
+cat >>pull.rules <<EOF
+$(opa "a${i}" unset 'copy' 'all' 'source' 'dsc')
+$(opa "a${i}" x 'copy' 'all' 'abacus' 'deb')
+$(opa "a${i}-addons" x 'copy' 'all' 'abacus' 'deb')
+EOF
+done
+
+testrun pull -b . pull
+rm pull.rules
+
+sed -e 's/Pull: rule/Pull: -/' -i conf/distributions
+
+testrun - -b . pull 3<<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'copy|all|source'
+-v5*= marking everything to be deleted
+-v3*= pulling into 'copy|all|abacus'
+#-v5*= marking everything to be deleted
+-v0*=Installing (and possibly deleting) packages...
+stderr
+*=Not processing 'copy' because of --onlysmalldeletes
+EOF
+
+sed -e 's/Pull: -/Update: -/' -i conf/distributions
+testrun - -b . --noskipold update 3<<EOF
+stdout
+-v2*=Created directory "./lists"
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'copy|all|source'
+-v5*= marking everything to be deleted
+-v3*= processing updates for 'copy|all|abacus'
+#-v5*= marking everything to be deleted
+stderr
+*=Not processing updates for 'copy' because of --onlysmalldeletes!
+EOF
+
+rm -r conf
+rm -r db
+rm -r pool
+rm -r dists
+rmdir tmp
+rmdir lists
+testsuccess
diff --git a/tests/override.test b/tests/override.test
new file mode 100644
index 0000000..84e8634
--- /dev/null
+++ b/tests/override.test
@@ -0,0 +1,172 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir -p conf dists/c/main/source
+mkdir -p conf dists/d/main/source
+mkdir -p conf dists/c/main/binary-abacus
+mkdir -p conf dists/d/main/binary-abacus
+mkdir -p conf dists/c/component/source
+mkdir -p conf dists/d/component/source
+mkdir -p conf dists/c/component/binary-abacus
+mkdir -p conf dists/d/component/binary-abacus
+mkdir -p dists/c/main/source
+mkdir -p dists/d/main/source
+mkdir -p dists/c/main/binary-abacus
+mkdir -p dists/d/main/binary-abacus
+mkdir -p pool/main/a/aa pool/component/b/bb
+mkdir -p pool/component/a/aa pool/main/b/bb
+cat > conf/distributions <<EOF
+Codename: c
+Components: main component
+Architectures: abacus source
+# Don't do that at home, kids....
+DebIndices: Index .
+DscIndices: Index .
+DebOverride: override-c-deb
+DscOverride: override-c-dsc
+
+Codename: d
+Components: main component
+Architectures: abacus source
+# Don't do that at home, kids....
+DebIndices: Index .
+DscIndices: Index .
+DebOverride: override-d-deb
+DscOverride: override-d-dsc
+EOF
+cat > conf/override-c-deb <<EOF
+EOF
+cat > conf/override-c-dsc <<EOF
+EOF
+cat > conf/override-d-deb <<EOF
+aa Section component/section
+aa Somefield value
+aa-addons Section component/addons
+a* ShouldNot ShowUp
+bb Section base
+bb-addons Section addons
+b* Section blub
+EOF
+cat > conf/override-d-dsc <<EOF
+a* Section component/section
+b? Section base
+b? SomeOtherfield somevalue
+b* ShouldNot ShowUp
+EOF
+
+DISTRI=c PACKAGE=aa EPOCH="" VERSION=1 REVISION="-1" SECTION="section" genpackage.sh
+mv test.changes aa.changes
+DISTRI=c PACKAGE=bb EPOCH="" VERSION=1 REVISION="-1" SECTION="component/base" genpackage.sh
+mv test.changes bb.changes
+
+testrun - --nodelete include c aa.changes 3<<EOF
+stdout
+$(odb)
+$(ofa 'pool/main/a/aa/aa-addons_1-1_all.deb')
+$(ofa 'pool/main/a/aa/aa_1-1_abacus.deb')
+$(ofa 'pool/main/a/aa/aa_1-1.tar.gz')
+$(ofa 'pool/main/a/aa/aa_1-1.dsc')
+$(opa 'aa-addons' x 'c' 'main' 'abacus' 'deb')
+$(opa 'aa' x 'c' 'main' 'abacus' 'deb')
+$(opa 'aa' unset 'c' 'main' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'c|main|abacus'...
+-v6*= creating './dists/c/main/binary-abacus/Index' (uncompressed)
+-v6*= looking for changes in 'c|component|abacus'...
+-v6*= creating './dists/c/component/binary-abacus/Index' (uncompressed)
+-v6*= looking for changes in 'c|main|source'...
+-v6*= creating './dists/c/main/source/Index' (uncompressed)
+-v6*= looking for changes in 'c|component|source'...
+-v6*= creating './dists/c/component/source/Index' (uncompressed)
+EOF
+testrun - --nodelete include c bb.changes 3<<EOF
+stdout
+$(ofa 'pool/component/b/bb/bb-addons_1-1_all.deb')
+$(ofa 'pool/component/b/bb/bb_1-1_abacus.deb')
+$(ofa 'pool/component/b/bb/bb_1-1.tar.gz')
+$(ofa 'pool/component/b/bb/bb_1-1.dsc')
+$(opa 'bb-addons' x 'c' 'component' 'abacus' 'deb')
+$(opa 'bb' x 'c' 'component' 'abacus' 'deb')
+$(opa 'bb' unset 'c' 'component' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'c|main|abacus'...
+-v6*= looking for changes in 'c|component|abacus'...
+-v6*= replacing './dists/c/component/binary-abacus/Index' (uncompressed)
+-v6*= looking for changes in 'c|main|source'...
+-v6*= looking for changes in 'c|component|source'...
+-v6*= replacing './dists/c/component/source/Index' (uncompressed)
+EOF
+ed -s aa.changes <<EOF
+g/^Distribution/s/ c/ d/
+w
+q
+EOF
+ed -s bb.changes <<EOF
+g/^Distribution/s/ c/ d/
+w
+q
+EOF
+testrun - --nodelete include d aa.changes 3<<EOF
+stdout
+$(ofa 'pool/component/a/aa/aa-addons_1-1_all.deb')
+$(ofa 'pool/component/a/aa/aa_1-1_abacus.deb')
+$(ofa 'pool/component/a/aa/aa_1-1.tar.gz')
+$(ofa 'pool/component/a/aa/aa_1-1.dsc')
+$(opa 'aa-addons' x 'd' 'component' 'abacus' 'deb')
+$(opa 'aa' x 'd' 'component' 'abacus' 'deb')
+$(opa 'aa' unset 'd' 'component' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'd|component|abacus'...
+-v6*= creating './dists/d/component/binary-abacus/Index' (uncompressed)
+-v6*= looking for changes in 'd|main|abacus'...
+-v6*= creating './dists/d/main/binary-abacus/Index' (uncompressed)
+-v6*= looking for changes in 'd|component|source'...
+-v6*= creating './dists/d/component/source/Index' (uncompressed)
+-v6*= looking for changes in 'd|main|source'...
+-v6*= creating './dists/d/main/source/Index' (uncompressed)
+EOF
+testrun - --nodelete include d bb.changes 3<<EOF
+stdout
+$(ofa 'pool/main/b/bb/bb-addons_1-1_all.deb')
+$(ofa 'pool/main/b/bb/bb_1-1_abacus.deb')
+$(ofa 'pool/main/b/bb/bb_1-1.tar.gz')
+$(ofa 'pool/main/b/bb/bb_1-1.dsc')
+$(opa 'bb-addons' x 'd' 'main' 'abacus' 'deb')
+$(opa 'bb' x 'd' 'main' 'abacus' 'deb')
+$(opa 'bb' unset 'd' 'main' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'd|component|abacus'...
+-v6*= looking for changes in 'd|main|abacus'...
+-v6*= replacing './dists/d/main/binary-abacus/Index' (uncompressed)
+-v6*= looking for changes in 'd|component|source'...
+-v6*= looking for changes in 'd|main|source'...
+-v6*= replacing './dists/d/main/source/Index' (uncompressed)
+EOF
+
+cp dists/c/main/binary-abacus/Index Index.expected
+ed -s Index.expected <<EOF
+/^Priority:/i
+Somefield: value
+.
+g/Section/s#section#component/addons#
+/Section/s#addons#section#
+%s/main/component/
+w
+EOF
+dodiff Index.expected dists/d/component/binary-abacus/Index
+
+cp dists/c/component/source/Index Index.expected
+ed -s Index.expected <<EOF
+/^Priority:/i
+SomeOtherfield: somevalue
+.
+g/Section/s#component/base#base#
+g/Directory/s/component/main/
+w
+EOF
+dodiff Index.expected dists/d/main/source/Index
+
+
+dodo rm -r aa* bb* pool dists db conf
+
+testsuccess
diff --git a/tests/packagediff.test b/tests/packagediff.test
new file mode 100644
index 0000000..c726bea
--- /dev/null
+++ b/tests/packagediff.test
@@ -0,0 +1,287 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf db pool fakes old
+mkdir -p dists/sourcedistribution/main/binary-coal
+
+cp "$SRCDIR/docs/pdiff.example" conf/pdiff.py
+cat > conf/distributions <<EOF
+Codename: sourcedistribution
+Architectures: coal
+Components: main
+DebIndices: Packages Release . pdiff.py
+
+Codename: test
+Architectures: coal
+Components: main
+Update: fromsource
+EOF
+
+testrun - -b . export sourcedistribution 3<<EOF
+stdout
+-v1*=Exporting sourcedistribution...
+-v6*= exporting 'sourcedistribution|main|coal'...
+-v6*= creating './dists/sourcedistribution/main/binary-coal/Packages' (uncompressed,script: pdiff.py)
+EOF
+
+dodo test -f dists/sourcedistribution/main/binary-coal/Packages
+dodo test -f dists/sourcedistribution/main/binary-coal/Release
+dodo test \! -e dists/sourcedistribution/main/binary-coal/Packages.diff
+
+testrun - -b . _addpackage sourcedistribution fakes/1 a 3<<EOF
+stderr
+*=_addpackage needs -C and -A and -T set!
+-v0*=There have been errors!
+returns 255
+EOF
+
+testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 a 3<<EOF
+stderr
+*=Error 2 opening 'fakes/1': No such file or directory!
+-v0*=There have been errors!
+return 254
+EOF
+
+touch fakes/1
+
+# TODO: getting a warning here would be nice...
+testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 a 3<<EOF
+EOF
+testrun - --nothingiserror -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 a 3<<EOF
+returns 1
+EOF
+
+cat > fakes/1 <<EOF
+Package: 5dchess
+Priority: extra
+Section: games
+Installed-Size: 400000
+Maintainer: test <nobody@nowhere>
+Architecture: coal
+Version: 0.0-1
+Filename: pool/main/5/5dchess/5dchess_0.0-1_coal.deb
+MD5sum: $EMPTYMD5ONLY
+Size: 0
+Description: the lazy fox
+ jumps over the quick brown dog.
+
+Package: a
+Priority: critical
+Section: required
+Installed-Size: 1
+Maintainer: test <nobody@nowhere>
+Architecture: all
+Version: 1
+Filename: pool/main/a/a/a_1_all.deb
+MD5sum: $EMPTYMD5ONLY
+Size: 0
+Description: the lazy fox
+ jumps over the quick brown dog.
+
+Package: b
+Source: baa
+Priority: critical
+Section: required
+Installed-Size: 1
+Maintainer: test <nobody@nowhere>
+Architecture: coal
+Version: 2
+Filename: pool/main/b/baa/b_2_coal.deb
+MD5sum: $EMPTYMD5ONLY
+Size: 0
+Description: the lazy fox
+ jumps over the quick brown dog.
+EOF
+
+cat > fakes/2 <<EOF
+Package: a
+Priority: critical
+Section: required
+Installed-Size: 2
+Maintainer: test <nobody@nowhere>
+Architecture: all
+Version: 2
+Filename: pool/main/a/a/a_2_all.deb
+MD5sum: $EMPTYMD5ONLY
+Size: 0
+Description: the lazy fox
+ jumps over the quick brown dog.
+EOF
+
+testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 a 3<<EOF
+*=Error: package a version 1 lists file pool/main/a/a/a_1_all.deb not yet in the pool!
+-v0*=There have been errors!
+returns 249
+EOF
+
+cat > addchecksums.rules <<EOF
+stdout
+$(ofa 'pool/main/a/a/a_1_all.deb')
+$(ofa 'pool/main/a/a/a_2_all.deb')
+$(ofa 'pool/main/b/baa/b_2_coal.deb')
+$(ofa 'pool/main/5/5dchess/5dchess_0.0-1_coal.deb')
+-v0*=4 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+
+testrun addchecksums -b . _addchecksums <<EOF
+pool/main/b/baa/b_2_coal.deb $EMPTYMD5
+pool/main/a/a/a_1_all.deb $EMPTYMD5
+pool/main/a/a/a_2_all.deb $EMPTYMD5
+pool/main/5/5dchess/5dchess_0.0-1_coal.deb $EMPTYMD5
+EOF
+
+testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 a 3<<EOF
+stdout
+-v1*=Adding 'a' '1' to 'sourcedistribution|main|coal'.
+$(opa 'a' x 'sourcedistribution' 'main' 'coal' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'sourcedistribution|main|coal'...
+-v6*= replacing './dists/sourcedistribution/main/binary-coal/Packages' (uncompressed,script: pdiff.py)
+=making diffs between ./dists/sourcedistribution/main/binary-coal/Packages and ./dists/sourcedistribution/main/binary-coal/Packages.new:
+=generating diff
+EOF
+sleep 1
+testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 5dchess 3<<EOF
+stdout
+-v1*=Adding '5dchess' '0.0-1' to 'sourcedistribution|main|coal'.
+$(opa '5dchess' x 'sourcedistribution' 'main' 'coal' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'sourcedistribution|main|coal'...
+-v6*= replacing './dists/sourcedistribution/main/binary-coal/Packages' (uncompressed,script: pdiff.py)
+=making diffs between ./dists/sourcedistribution/main/binary-coal/Packages and ./dists/sourcedistribution/main/binary-coal/Packages.new:
+=generating diff
+=This was too fast, diffile already there, waiting a bit...
+EOF
+sleep 1
+cp dists/sourcedistribution/main/binary-coal/Packages old/1
+testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/2 a 3<<EOF
+stderr
+*=./pool/main/a/a/a_1_all.deb not found, forgetting anyway
+stdout
+-v1*=Adding 'a' '2' to 'sourcedistribution|main|coal'.
+$(opu 'a' x x 'sourcedistribution' 'main' 'coal' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'sourcedistribution|main|coal'...
+-v6*= replacing './dists/sourcedistribution/main/binary-coal/Packages' (uncompressed,script: pdiff.py)
+=making diffs between ./dists/sourcedistribution/main/binary-coal/Packages and ./dists/sourcedistribution/main/binary-coal/Packages.new:
+=generating diff
+=This was too fast, diffile already there, waiting a bit...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/main/a/a/a_1_all.deb')
+EOF
+cp dists/sourcedistribution/main/binary-coal/Packages old/2
+sleep 1
+testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 b 3<<EOF
+stdout
+-v1*=Adding 'b' '2' to 'sourcedistribution|main|coal'.
+$(opa 'b' x 'sourcedistribution' 'main' 'coal' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'sourcedistribution|main|coal'...
+-v6*= replacing './dists/sourcedistribution/main/binary-coal/Packages' (uncompressed,script: pdiff.py)
+=making diffs between ./dists/sourcedistribution/main/binary-coal/Packages and ./dists/sourcedistribution/main/binary-coal/Packages.new:
+=generating diff
+=This was too fast, diffile already there, waiting a bit...
+EOF
+
+dodo test -f dists/sourcedistribution/main/binary-coal/Packages
+dodo test -f dists/sourcedistribution/main/binary-coal/Release
+dodo test -d dists/sourcedistribution/main/binary-coal/Packages.diff
+dodo test -f dists/sourcedistribution/main/binary-coal/Packages.diff/Index
+testrun empty -b . dumpunreferenced
+
+# now update from that one....
+cat > conf/updates <<EOF
+Name: fromsource
+Suite: sourcedistribution
+VerifyRelease: blindtrust
+GetInRelease: no
+DownloadListsAs: .diff
+Method: file:$WORKDIR
+EOF
+mkdir lists
+mkdir -p dists/test/main/binary-coal
+
+cp old/2 lists/fromsource_sourcedistribution_main_coal_Packages
+
+diffname="$(grep "^ $(sha1 old/2)" dists/sourcedistribution/main/binary-coal/Packages.diff/Index | sed -e 's/.* //')"
+
+testrun - -b . update test 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/Release'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/Release'
+-v2*=Copy file '$WORKDIR/dists/sourcedistribution/Release' to './lists/fromsource_sourcedistribution_Release'...
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/Index'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/Index'
+-v2*=Copy file '$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/Index' to './lists/fromsource_sourcedistribution_main_coal_Packages.diffindex'...
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname}.gz'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname}.gz'
+-v2*=Uncompress '$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname}.gz' into './lists/fromsource_sourcedistribution_main_coal_Packages.diff-${diffname}' using '/bin/gunzip'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test|main|coal'
+-v5*= reading './lists/fromsource_sourcedistribution_main_coal_Packages'
+-v0*=Getting packages...
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa '5dchess' x 'test' 'main' 'coal' 'deb')
+$(opa 'a' x 'test' 'main' 'coal' 'deb')
+$(opa 'b' x 'test' 'main' 'coal' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test|main|coal'...
+-v6*= creating './dists/test/main/binary-coal/Packages' (uncompressed,gzipped)
+EOF
+
+dodiff dists/sourcedistribution/main/binary-coal/Packages lists/fromsource_sourcedistribution_main_coal_Packages
+
+cp old/1 lists/fromsource_sourcedistribution_main_coal_Packages
+
+diffname2="$(grep "^ $(sha1 old/1)" dists/sourcedistribution/main/binary-coal/Packages.diff/Index | sed -e 's/.* //')"
+testrun - --noskipold -b . update test 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/Release'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/Release'
+-v2*=Copy file '$WORKDIR/dists/sourcedistribution/Release' to './lists/fromsource_sourcedistribution_Release'...
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/Index'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/Index'
+-v2*=Copy file '$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/Index' to './lists/fromsource_sourcedistribution_main_coal_Packages.diffindex'...
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname2}.gz'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname2}.gz'
+-v2*=Uncompress '$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname2}.gz' into './lists/fromsource_sourcedistribution_main_coal_Packages.diff-${diffname2}' using '/bin/gunzip'...
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname}.gz'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname}.gz'
+-v2*=Uncompress '$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages.diff/${diffname}.gz' into './lists/fromsource_sourcedistribution_main_coal_Packages.diff-${diffname}' using '/bin/gunzip'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test|main|coal'
+-v5*= reading './lists/fromsource_sourcedistribution_main_coal_Packages'
+EOF
+
+dodiff dists/sourcedistribution/main/binary-coal/Packages lists/fromsource_sourcedistribution_main_coal_Packages
+
+# Check without DownLoadListsAs and not index file
+cat > conf/updates <<EOF
+Name: fromsource
+Suite: sourcedistribution
+GetInRelease: no
+VerifyRelease: blindtrust
+Method: file:$WORKDIR
+EOF
+rm -r lists
+mkdir lists
+testrun - --noskipold -b . update test 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/Release'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/Release'
+-v2*=Copy file '$WORKDIR/dists/sourcedistribution/Release' to './lists/fromsource_sourcedistribution_Release'...
+-v6=aptmethod start 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages'
+-v1*=aptmethod got 'file:$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages'
+-v2*=Copy file '$WORKDIR/dists/sourcedistribution/main/binary-coal/Packages' to './lists/fromsource_sourcedistribution_main_coal_Packages'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test|main|coal'
+-v5*= reading './lists/fromsource_sourcedistribution_main_coal_Packages'
+EOF
+
+rm -r conf dists pool db fakes addchecksums.rules old lists
+testsuccess
diff --git a/tests/revoked.key b/tests/revoked.key
new file mode 100644
index 0000000..841536b
--- /dev/null
+++ b/tests/revoked.key
Binary files differ
diff --git a/tests/revoked.pkey b/tests/revoked.pkey
new file mode 100644
index 0000000..2941473
--- /dev/null
+++ b/tests/revoked.pkey
Binary files differ
diff --git a/tests/shunit2-helper-functions.sh b/tests/shunit2-helper-functions.sh
new file mode 100644
index 0000000..8f664b8
--- /dev/null
+++ b/tests/shunit2-helper-functions.sh
@@ -0,0 +1,68 @@
+# Copyright (C) 2017, Benjamin Drung <benjamin.drung@profitbricks.com>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+REPO="${0%/*}/testrepo"
+PKGS="${0%/*}/testpkgs"
+ARCH=${ARCH:-$(dpkg-architecture -qDEB_HOST_ARCH)}
+REPREPRO=$(realpath -m "${0%/*}/.." --relative-base=.)/reprepro
+VERBOSE_ARGS="${VERBOSE_ARGS-}"
+
+call() {
+ command="$@"
+ echo "I: Calling $@"
+ "$@" || fail "Command '$command' failed with exit code $?."
+}
+
+check_db() {
+ db_verify $REPO/db/packages.db || fail "BerkeleyDB 'packages.db' is broken."
+ db_verify -o $REPO/db/packagenames.db || fail "BerkeleyDB 'packagenames.db' is broken."
+}
+
+add_distro() {
+ local name="$1"
+ if test -e $REPO/conf/distributions; then
+ echo >> $REPO/conf/distributions
+ fi
+ cat >> $REPO/conf/distributions <<EOF
+Codename: $name
+Architectures: $ARCH source
+Components: main non-free
+Log: testrepo.log
+Tracking: all
+EOF
+ if test -n "${2-}"; then
+ echo "$2" >> $REPO/conf/distributions
+ fi
+}
+
+clear_distro() {
+ rm -f $REPO/conf/distributions
+}
+
+create_repo() {
+ rm -rf $REPO
+ mkdir -p $REPO/conf
+ add_distro buster
+ mkdir -p $PKGS
+ $REPREPRO -b $REPO export
+}
+
+# See https://github.com/wting/shunit2/issues/23
+if test -n "${TEST_CASES-}"; then
+ suite() {
+ for testcase in "${TEST_CASES}" ; do
+ suite_addTest $testcase
+ done
+ }
+fi
diff --git a/tests/signatures.test b/tests/signatures.test
new file mode 100644
index 0000000..bca35ff
--- /dev/null
+++ b/tests/signatures.test
@@ -0,0 +1,286 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+if ! which gpg 2>/dev/null ; then
+ echo "SKIPPED: gpg not found!"
+ exit 0
+fi
+
+rm -rf db dists pool lists conf gpgtestdir
+
+mkdir -p gpgtestdir
+export GNUPGHOME="`pwd`/gpgtestdir"
+gpg --import $TESTSDIR/good.key $TESTSDIR/evil.key $TESTSDIR/expired.key $TESTSDIR/revoked.key
+
+mkdir -p conf
+cat > conf/options <<CONFEND
+export changed
+CONFEND
+cat > conf/distributions <<CONFEND
+Codename: ATest
+Uploaders: auploaders
+Architectures: abacus source
+Components: everything
+
+Codename: BTest
+Uploaders: buploaders
+Architectures: abacus source
+Components: everything
+
+Codename: CTest
+Uploaders: cuploaders
+Architectures: abacus source
+Components: everything
+CONFEND
+
+gpg --list-keys
+
+cat > conf/auploaders <<CONFEND
+# Nothing is allowed in here
+CONFEND
+cat > conf/buploaders <<CONFEND
+allow * by key FFFFFFFF
+allow * by key DC3C29B8
+allow * by key 685AF714
+allow * by key 00000000
+CONFEND
+cat > conf/cuploaders <<CONFEND
+allow * by key FFFFFFFF
+allow * by any key
+allow * by unsigned
+allow * by key 00000000
+allow * by anybody
+CONFEND
+cat > conf/incoming <<CONFEND
+Name: abc
+Incomingdir: i
+TempDir: tmp
+Allow: ATest BTest CTest
+
+Name: ab
+Incomingdir: i
+TempDir: tmp
+Allow: ATest BTest
+CONFEND
+mkdir i tmp
+
+DISTRI="ATest BTest CTest" PACKAGE=package EPOCH="" VERSION=9 REVISION="-2" SECTION="otherofs" genpackage.sh
+echo generating signature with evil key:
+gpg --default-key evil@nowhere.tld --sign -a test.changes
+mv test.changes.asc testbadsigned.changes
+echo generating signature with good key:
+gpg --default-key good@nowhere.tld --sign -a test.changes
+mv test.changes.asc testsigned.changes
+echo generating signature with revoked key:
+gpg --expert --default-key revoked@nowhere.tld --sign -a test.changes
+mv test.changes.asc testrevsigned.changes
+gpg --import $TESTSDIR/revoked.pkey
+
+
+testrun - -b . include ATest test.changes 3<<EOF
+return 255
+stderr
+*=No rule allowing this package in found in auploaders!
+*=To ignore use --ignore=uploaders.
+-v0*=There have been errors!
+stdout
+$(odb)
+EOF
+
+testrun - -b . include BTest test.changes 3<<EOF
+return 255
+stderr
+*=No rule allowing this package in found in buploaders!
+*=To ignore use --ignore=uploaders.
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . include CTest test.changes 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/everything"
+-v2*=Created directory "./pool/everything/p"
+-v2*=Created directory "./pool/everything/p/package"
+$(ofa 'pool/everything/p/package/package-addons_9-2_all.deb')
+$(ofa 'pool/everything/p/package/package_9-2_abacus.deb')
+$(ofa 'pool/everything/p/package/package_9-2.tar.gz')
+$(ofa 'pool/everything/p/package/package_9-2.dsc')
+$(opa 'package-addons' x 'CTest' 'everything' 'abacus' 'deb')
+$(opa 'package' x 'CTest' 'everything' 'abacus' 'deb')
+$(opa 'package' unset 'CTest' 'everything' 'source' 'dsc')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/CTest"
+-v2*=Created directory "./dists/CTest/everything"
+-v2*=Created directory "./dists/CTest/everything/binary-abacus"
+-v6*= looking for changes in 'CTest|everything|abacus'...
+-v6*= creating './dists/CTest/everything/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/CTest/everything/source"
+-v6*= looking for changes in 'CTest|everything|source'...
+-v6*= creating './dists/CTest/everything/source/Sources' (gzipped)
+EOF
+
+testrun - -b . include ATest testbadsigned.changes 3<<EOF
+return 255
+stderr
+*=No rule allowing this package in found in auploaders!
+*=To ignore use --ignore=uploaders.
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . include BTest testbadsigned.changes 3<<EOF
+return 255
+stderr
+*=No rule allowing this package in found in buploaders!
+*=To ignore use --ignore=uploaders.
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . include CTest testbadsigned.changes 3<<EOF
+stderr
+*=Skipping inclusion of 'package-addons' '9-2' in 'CTest|everything|abacus', as it has already '9-2'.
+*=Skipping inclusion of 'package' '9-2' in 'CTest|everything|abacus', as it has already '9-2'.
+*=Skipping inclusion of 'package' '9-2' in 'CTest|everything|source', as it has already '9-2'.
+stdout
+EOF
+
+testrun - -b . include ATest testrevsigned.changes 3<<EOF
+return 255
+stderr
+-v1*=Ignoring signature with '12D6C95C8C737389EAAF535972F1D61F685AF714' on 'testrevsigned.changes', as the key is revoked.
+*=No rule allowing this package in found in auploaders!
+*=To ignore use --ignore=uploaders.
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . include BTest testrevsigned.changes 3<<EOF
+return 255
+stderr
+-v1*=Ignoring signature with '12D6C95C8C737389EAAF535972F1D61F685AF714' on 'testrevsigned.changes', as the key is revoked.
+*=No rule allowing this package in found in buploaders!
+*=To ignore use --ignore=uploaders.
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . include CTest testrevsigned.changes 3<<EOF
+stderr
+-v1*=Ignoring signature with '12D6C95C8C737389EAAF535972F1D61F685AF714' on 'testrevsigned.changes', as the key is revoked.
+*=Skipping inclusion of 'package-addons' '9-2' in 'CTest|everything|abacus', as it has already '9-2'.
+*=Skipping inclusion of 'package' '9-2' in 'CTest|everything|abacus', as it has already '9-2'.
+*=Skipping inclusion of 'package' '9-2' in 'CTest|everything|source', as it has already '9-2'.
+stdout
+EOF
+
+testrun - -b . include ATest testsigned.changes 3<<EOF
+return 255
+stderr
+*=No rule allowing this package in found in auploaders!
+*=To ignore use --ignore=uploaders.
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . include BTest testsigned.changes 3<<EOF
+stdout
+$(opa 'package-addons' x 'BTest' 'everything' 'abacus' 'deb')
+$(opa 'package' x 'BTest' 'everything' 'abacus' 'deb')
+$(opa 'package' unset 'BTest' 'everything' 'source' 'dsc')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/BTest"
+-v2*=Created directory "./dists/BTest/everything"
+-v2*=Created directory "./dists/BTest/everything/binary-abacus"
+-v6*= looking for changes in 'BTest|everything|abacus'...
+-v6*= creating './dists/BTest/everything/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/BTest/everything/source"
+-v6*= looking for changes in 'BTest|everything|source'...
+-v6*= creating './dists/BTest/everything/source/Sources' (gzipped)
+EOF
+
+testrun - -b . include CTest testsigned.changes 3<<EOF
+stderr
+*=Skipping inclusion of 'package-addons' '9-2' in 'CTest|everything|abacus', as it has already '9-2'.
+*=Skipping inclusion of 'package' '9-2' in 'CTest|everything|abacus', as it has already '9-2'.
+*=Skipping inclusion of 'package' '9-2' in 'CTest|everything|source', as it has already '9-2'.
+stdout
+EOF
+
+cp package* i/
+cp test.changes i/
+testrun - -b . processincoming ab 3<<EOF
+return 243
+stderr
+*=No distribution accepting 'test.changes' (i.e. none of the candidate distributions allowed inclusion)!
+-v0*=There have been errors!
+stdout
+EOF
+testrun - -b . processincoming abc 3<<EOF
+stdout
+-v3*=Will not put 'package' in 'CTest|everything|source', as already there with same version '9-2'.
+-v3*=Will not put 'package' in 'CTest|everything|abacus', as already there with same version '9-2'.
+-v3*=Will not put 'package-addons' in 'CTest|everything|abacus', as already there with same version '9-2'.
+-v0*=Skipping test.changes because all packages are skipped!
+-v3*=deleting './i/package_9-2.dsc'...
+-v3*=deleting './i/package-addons_9-2_all.deb'...
+-v3*=deleting './i/package_9-2.tar.gz'...
+-v3*=deleting './i/package_9-2_abacus.deb'...
+-v3*=deleting './i/test.changes'...
+EOF
+
+cp -i package* i/
+cp testrevsigned.changes i/
+testrun - -b . processincoming ab 3<<EOF
+return 243
+stderr
+*=No distribution accepting 'testrevsigned.changes' (i.e. none of the candidate distributions allowed inclusion)!
+-v0*=There have been errors!
+-v1*=Ignoring signature with '12D6C95C8C737389EAAF535972F1D61F685AF714' on 'testrevsigned.changes', as the key is revoked.
+#-v0*='testrevsigned.changes' would have been accepted into 'BTest' if signature with '12D6C95C8C737389EAAF535972F1D61F685AF714' was checkable and valid.
+stdout
+EOF
+testrun - -b . processincoming abc 3<<EOF
+stderr
+-v1*=Ignoring signature with '12D6C95C8C737389EAAF535972F1D61F685AF714' on 'testrevsigned.changes', as the key is revoked.
+#-v0*='testrevsigned.changes' would have been accepted into 'BTest' if signature with '12D6C95C8C737389EAAF535972F1D61F685AF714' was checkable and valid.
+stdout
+-v3*=Will not put 'package' in 'CTest|everything|source', as already there with same version '9-2'.
+-v3*=Will not put 'package' in 'CTest|everything|abacus', as already there with same version '9-2'.
+-v3*=Will not put 'package-addons' in 'CTest|everything|abacus', as already there with same version '9-2'.
+-v0*=Skipping testrevsigned.changes because all packages are skipped!
+-v3*=deleting './i/package_9-2.dsc'...
+-v3*=deleting './i/package-addons_9-2_all.deb'...
+-v3*=deleting './i/package_9-2.tar.gz'...
+-v3*=deleting './i/package_9-2_abacus.deb'...
+-v3*=deleting './i/testrevsigned.changes'...
+EOF
+
+cp -i package* i/
+cp testbadsigned.changes i/
+testrun - -b . processincoming ab 3<<EOF
+return 243
+stderr
+*=No distribution accepting 'testbadsigned.changes' (i.e. some distribution found but the package is not allowed there)!
+-v0*=There have been errors!
+stdout
+EOF
+testrun - -b . processincoming abc 3<<EOF
+stdout
+-v3*=Will not put 'package' in 'CTest|everything|source', as already there with same version '9-2'.
+-v3*=Will not put 'package' in 'CTest|everything|abacus', as already there with same version '9-2'.
+-v3*=Will not put 'package-addons' in 'CTest|everything|abacus', as already there with same version '9-2'.
+-v0*=Skipping testbadsigned.changes because all packages are skipped!
+-v3*=deleting './i/package_9-2.dsc'...
+-v3*=deleting './i/package-addons_9-2_all.deb'...
+-v3*=deleting './i/package_9-2.tar.gz'...
+-v3*=deleting './i/package_9-2_abacus.deb'...
+-v3*=deleting './i/testbadsigned.changes'...
+EOF
+
+rm -rf db conf dists pool gpgtestdir i tmp
+rm package-addons* package_* *.changes
+
+testsuccess
diff --git a/tests/signed.test b/tests/signed.test
new file mode 100644
index 0000000..8e7dba8
--- /dev/null
+++ b/tests/signed.test
@@ -0,0 +1,68 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir -p gpgtestdir
+export GNUPGHOME="`pwd`/gpgtestdir"
+gpg --import $TESTSDIR/good.key
+
+mkdir -p conf
+cat > conf/distributions <<CONFEND
+Codename: ATest
+Architectures: abacus source
+Components: everything
+SignWith: good@nowhere.tld
+CONFEND
+
+gpg --list-keys
+
+testrun - -b . export 3<<EOF
+stdout
+$(odb)
+-v1*=Exporting ATest...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/ATest"
+-v2*=Created directory "./dists/ATest/everything"
+-v2*=Created directory "./dists/ATest/everything/binary-abacus"
+-v6*= exporting 'ATest|everything|abacus'...
+-v6*= creating './dists/ATest/everything/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/ATest/everything/source"
+-v6*= exporting 'ATest|everything|source'...
+-v6*= creating './dists/ATest/everything/source/Sources' (gzipped)
+-v2*=Successfully created './dists/ATest/Release.gpg.new'
+-v2*=Successfully created './dists/ATest/InRelease.new'
+EOF
+
+find dists/ATest | sort > results
+cat > results.expected <<EOF
+dists/ATest
+dists/ATest/InRelease
+dists/ATest/Release
+dists/ATest/Release.gpg
+dists/ATest/everything
+dists/ATest/everything/binary-abacus
+dists/ATest/everything/binary-abacus/Packages
+dists/ATest/everything/binary-abacus/Packages.gz
+dists/ATest/everything/binary-abacus/Release
+dists/ATest/everything/source
+dists/ATest/everything/source/Release
+dists/ATest/everything/source/Sources.gz
+EOF
+
+dodiff results.expected results
+
+dodo gpg --verify dists/ATest/Release.gpg dists/ATest/Release
+dodo gpg --verify dists/ATest/InRelease
+
+cp dists/ATest/InRelease InRelease
+ed -s InRelease <<'EOF'
+H
+/^-----BEGIN PGP SIGNED MESSAGE-----$/,/^$/d
+/^-----BEGIN PGP SIGNATURE-----$/,$d
+w
+q
+EOF
+dodiff dists/ATest/Release InRelease
+
+rm -r conf db dists gpgtestdir InRelease results results.expected
+
+testsuccess
diff --git a/tests/snapshotcopyrestore.test b/tests/snapshotcopyrestore.test
new file mode 100644
index 0000000..fdb4155
--- /dev/null
+++ b/tests/snapshotcopyrestore.test
@@ -0,0 +1,597 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir -p conf
+cat > conf/options <<CONFEND
+outhook $SRCDIR/docs/outstore.py
+CONFEND
+export REPREPRO_OUT_DB=db.out
+cat > conf/distributions <<CONFEND
+Codename: A
+Architectures: abacus calculator
+Components: dog cat
+Log: logfile
+ -A=nonexistant -C=nocomponent --type=none --withcontrol noscript.sh
+
+Codename: B
+Architectures: abacus source
+Components: dog cat
+Contents:
+Log: logfile
+CONFEND
+mkdir logs
+
+testrun - -b . export 3<<EOF
+stderr
+*=Warning: unknown architecture 'nonexistant', ignoring notificator line at line 5 in ./conf/distributions
+stdout
+$(odb)
+-v1*=Exporting B...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/B"
+-v2*=Created directory "./dists/B/dog"
+-v2*=Created directory "./dists/B/dog/binary-abacus"
+-v6*= exporting 'B|dog|abacus'...
+-v6*= creating './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/B/dog/source"
+-v6*= exporting 'B|dog|source'...
+-v6*= creating './dists/B/dog/source/Sources' (gzipped)
+-v2*=Created directory "./dists/B/cat"
+-v2*=Created directory "./dists/B/cat/binary-abacus"
+-v6*= exporting 'B|cat|abacus'...
+-v2*=Created directory "./dists/B/cat/source"
+-v6*= creating './dists/B/cat/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= exporting 'B|cat|source'...
+-v6*= creating './dists/B/cat/source/Sources' (gzipped)
+-v1*= generating dog/Contents-abacus...
+-v1*= generating cat/Contents-abacus...
+-v1*=Exporting A...
+-v2*=Created directory "./dists/A"
+-v2*=Created directory "./dists/A/dog"
+-v2*=Created directory "./dists/A/dog/binary-abacus"
+-v6*= exporting 'A|dog|abacus'...
+-v6*= creating './dists/A/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/dog/binary-calculator"
+-v6*= exporting 'A|dog|calculator'...
+-v6*= creating './dists/A/dog/binary-calculator/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/cat"
+-v2*=Created directory "./dists/A/cat/binary-abacus"
+-v6*= exporting 'A|cat|abacus'...
+-v6*= creating './dists/A/cat/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/cat/binary-calculator"
+-v6*= exporting 'A|cat|calculator'...
+-v6*= creating './dists/A/cat/binary-calculator/Packages' (uncompressed,gzipped)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+dodo test -f db/checksums.db
+
+ed -s conf/distributions <<EOF
+g/^ -A=nonexistant/s/nonexistant/calculator/
+w
+q
+EOF
+
+touch importindex
+
+testrun - -b . _addpackage B importindex bar foo 3<<EOF
+returns 255
+stderr
+*=Warning: unknown component 'nocomponent', ignoring notificator line at line 5 in ./conf/distributions
+*=_addpackage needs -C and -A and -T set!
+-v0*=There have been errors!
+EOF
+
+ed -s conf/distributions <<EOF
+g/^ -A/s/nocomponent/cat/
+w
+q
+EOF
+
+testrun - -b . -A source -T dsc _addpackage B importindex bar foo 3<<EOF
+returns 255
+stderr
+*=Warning: unknown packagetype 'none', ignoring notificator line at line 5 in ./conf/distributions
+*=_addpackage needs -C and -A and -T set!
+-v0*=There have been errors!
+EOF
+
+# -A=calculator -C=cat --type=dsc --via=include --withcontrol noscript.sh
+ed -s conf/distributions <<EOF
+g/^ -A=/s/=none/=dsc --via=include/
+w
+q
+EOF
+
+
+testrun - -b . -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+returns 255
+stderr
+*=_addpackage needs -C and -A and -T set!
+-v0*=There have been errors!
+EOF
+
+testrun - -b . -T deb -C dog _addpackage B importindex bar foo 3<<EOF
+returns 255
+stderr
+*=_addpackage needs -C and -A and -T set!
+-v0*=There have been errors!
+EOF
+
+testrun - -b . -T deb -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+stderr
+stdout
+EOF
+
+cat > importindex <<EOF
+Test:
+EOF
+
+testrun - -b . -T deb -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+returns 249
+stderr
+*=Error parsing importindex line 1 to 1: Chunk without 'Package:' field!
+-v0*=There have been errors!
+stdout
+EOF
+
+cat > importindex <<EOF
+Package: another
+Version: 0
+Architecture: abacus
+MD5Sum: 0
+Size: 0
+Filename: none
+EOF
+
+testrun - -b . -T deb -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+stderr
+stdout
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+cat > importindex <<EOF
+Package: foo
+Version: 0
+Architecture: abacus
+EOF
+
+testrun - -b . -T deb -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+returns 255
+stderr
+*=Data does not look like binary control: 'Package: foo
+*=Version: 0
+*=Architecture: abacus'
+-v0*=There have been errors!
+stdout
+EOF
+
+cat > importindex <<EOF
+Package: foo
+Version: 0
+Architecture: abacus
+MD5sum: 0
+Size: 0
+Filename: none
+EOF
+
+testrun - -b . -T deb -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+returns 249
+stderr
+*=Error: cannot yet deal with files changing their position
+*=(pool/dog/f/foo/foo_0_abacus.deb vs none in foo version 0)
+-v0*=There have been errors!
+stdout
+EOF
+
+mkdir -p pool/dog/f/foo
+echo "some data" > foo_0_abacus.deb
+
+cat > importindex <<EOF
+Package: foo
+Version: 0
+Architecture: abacus
+MD5sum: $(md5 foo_0_abacus.deb)
+Size: $(stat -c "%s" foo_0_abacus.deb)
+Filename: pool/dog/f/foo/foo_0_abacus.deb
+EOF
+
+testrun - -b . -T deb -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+returns 249
+stderr
+*=Error: package foo version 0 lists file pool/dog/f/foo/foo_0_abacus.deb not yet in the pool!
+-v0*=There have been errors!
+stdout
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+testrun empty -b . dumpunreferenced
+
+mv foo_0_abacus.deb pool/dog/f/foo/foo_0_abacus.deb
+testrun - -b . _detect pool/dog/f/foo/foo_0_abacus.deb 3<<EOF
+stderr
+stdout
+$(ofa 'pool/dog/f/foo/foo_0_abacus.deb')
+-v0*=1 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+testrun - -b . dumpunreferenced 3<<EOF
+stderr
+stdout
+*=pool/dog/f/foo/foo_0_abacus.deb
+EOF
+
+# TODO: why is there no error for faulty .deb here?
+
+testrun - -b . -T deb -A abacus -C dog _addpackage B importindex bar foo 3<<EOF
+stderr
+*=Strange control data for 'foo': no Description at all
+stdout
+-v1*=Adding 'foo' '0' to 'B|dog|abacus'.
+$(opa 'foo' x 'B' 'dog' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating dog/Contents-abacus...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+testrun empty -b . dumpunreferenced
+
+echo "dsc-content" > pool/dog/f/foo/foo_1.dsc
+echo "tar-content" > pool/dog/f/foo/foo_1.tar.gz
+
+cat > importindex <<EOF
+Package: foo
+Version: 1
+Directory: pool/dog/f/foo
+Files:
+ $(mdandsize pool/dog/f/foo/foo_1.dsc) foo_1.dsc
+ $(mdandsize pool/dog/f/foo/foo_1.tar.gz) foo_1.tar.gz
+EOF
+
+testrun - -b . -T dsc -C dog _addpackage B importindex bar foo 3<<EOF
+returns 249
+stderr
+*=Error: package foo version 1 lists file pool/dog/f/foo/foo_1.dsc not yet in the pool!
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun empty -b . dumpunreferenced
+
+testrun - -b . _detect pool/dog/f/foo/foo_1.dsc 3<<EOF
+stderr
+stdout
+$(ofa 'pool/dog/f/foo/foo_1.dsc')
+-v0*=1 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+
+mv pool/dog/f/foo/foo_1.tar.gz foo_1.tar.gz
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+mv foo_1.tar.gz pool/dog/f/foo/foo_1.tar.gz
+
+testrun - -b . -T dsc -C dog _addpackage B importindex bar foo 3<<EOF
+returns 249
+stderr
+*=Error: package foo version 1 lists file pool/dog/f/foo/foo_1.tar.gz not yet in the pool!
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - -b . _detect pool/dog/f/foo/foo_1.tar.gz 3<<EOF
+stderr
+stdout
+$(ofa 'pool/dog/f/foo/foo_1.tar.gz')
+-v0*=1 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+testrun - -b . dumpunreferenced 3<<EOF
+stderr
+stdout
+*=pool/dog/f/foo/foo_1.dsc
+*=pool/dog/f/foo/foo_1.tar.gz
+EOF
+
+testrun - -b . -T dsc -C dog _addpackage B importindex bar foo 3<<EOF
+stderr
+stdout
+-v1*=Adding 'foo' '1' to 'B|dog|source'.
+$(opa 'foo' x 'B' 'dog' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+testrun empty -b . dumpunreferenced
+
+testrun - -b . gensnapshot B now 3<<EOF
+stdout
+-v2*=Created directory "./dists/B/snapshots"
+-v2*=Created directory "./dists/B/snapshots/now"
+-v2*=Created directory "./dists/B/snapshots/now/dog"
+-v2*=Created directory "./dists/B/snapshots/now/dog/binary-abacus"
+-v6*= exporting 'B|dog|abacus'...
+-v6*= creating './dists/B/snapshots/now/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/B/snapshots/now/dog/source"
+-v6*= exporting 'B|dog|source'...
+-v6*= creating './dists/B/snapshots/now/dog/source/Sources' (gzipped)
+-v2*=Created directory "./dists/B/snapshots/now/cat"
+-v2*=Created directory "./dists/B/snapshots/now/cat/binary-abacus"
+-v6*= exporting 'B|cat|abacus'...
+-v6*= creating './dists/B/snapshots/now/cat/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/B/snapshots/now/cat/source"
+-v6*= exporting 'B|cat|source'...
+-v6*= creating './dists/B/snapshots/now/cat/source/Sources' (gzipped)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+testrun - -b . dumpreferences 3<<EOF
+stdout
+*=B|dog|abacus pool/dog/f/foo/foo_0_abacus.deb
+*=s=B=now pool/dog/f/foo/foo_0_abacus.deb
+*=B|dog|source pool/dog/f/foo/foo_1.dsc
+*=s=B=now pool/dog/f/foo/foo_1.dsc
+*=B|dog|source pool/dog/f/foo/foo_1.tar.gz
+*=s=B=now pool/dog/f/foo/foo_1.tar.gz
+EOF
+
+testrun - -b . gensnapshot A now 3<<EOF
+stdout
+-v2*=Created directory "./dists/A/snapshots"
+-v2*=Created directory "./dists/A/snapshots/now"
+-v2*=Created directory "./dists/A/snapshots/now/dog"
+-v2*=Created directory "./dists/A/snapshots/now/dog/binary-abacus"
+-v6*= exporting 'A|dog|abacus'...
+-v6*= creating './dists/A/snapshots/now/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/snapshots/now/dog/binary-calculator"
+-v6*= exporting 'A|dog|calculator'...
+-v6*= creating './dists/A/snapshots/now/dog/binary-calculator/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/snapshots/now/cat"
+-v2*=Created directory "./dists/A/snapshots/now/cat/binary-abacus"
+-v6*= exporting 'A|cat|abacus'...
+-v6*= creating './dists/A/snapshots/now/cat/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/snapshots/now/cat/binary-calculator"
+-v6*= exporting 'A|cat|calculator'...
+-v6*= creating './dists/A/snapshots/now/cat/binary-calculator/Packages' (uncompressed,gzipped)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+testout "" -b . dumpreferences
+grep '^.|' results | sed -e 's/|[^ ]* / contains /' | sort | uniq > references.normal
+grep '^s=' results | sed -e 's/^s=\(.\)=[^ ]* /\1 contains /' | sort > references.snapshot
+dodiff -u references.normal references.snapshot
+rm references.normal references.snapshot
+# Remove contents from original, to make them more look alike:
+for n in dists/B/Release dists/B/snapshots/now/Release dists/A/Release dists/A/snapshots/now/Release ; do
+ ed -s $n <<EOF
+g/^Date: /s/ .*/ unified/
+g,^Suite: ./snapshots/now$,d
+w
+q
+EOF
+done
+mkdir tmp
+mv dists/B/Contents-abacus.gz tmp/
+mv dists/B/dog/Contents-abacus.gz tmp/dog
+mv dists/B/cat/Contents-abacus.gz tmp/cat
+mv dists/B/snapshots/now dists/B.snapshot
+mv dists/A/snapshots/now dists/A.snapshot
+printf 'g/Contents-/d\nw\nq\n' | ed -s dists/B/Release
+rmdir dists/B/snapshots
+rmdir dists/A/snapshots
+dodiff -r -u dists/B.snapshot dists/B
+dodiff -r -u dists/A.snapshot dists/A
+mkdir dists/B/snapshots
+mkdir dists/A/snapshots
+mv dists/B.snapshot dists/B/snapshots/now
+mv dists/A.snapshot dists/A/snapshots/now
+mv tmp/dog dists/B/dog/Contents-abacus.gz
+mv tmp/cat dists/B/cat/Contents-abacus.gz
+mv tmp/Contents-abacus.gz dists/B/
+
+testrun empty -b . dumpunreferenced
+
+testrun - -b . restore B before foo 3<<EOF
+stderr
+*=Could not find './dists/B/snapshots/before/dog/binary-abacus/Packages' nor './dists/B/snapshots/before/dog/binary-abacus/Packages.gz',
+*=ignoring that part of the snapshot.
+*=Could not find './dists/B/snapshots/before/dog/source/Sources' nor './dists/B/snapshots/before/dog/source/Sources.gz',
+*=Could not find './dists/B/snapshots/before/cat/binary-abacus/Packages' nor './dists/B/snapshots/before/cat/binary-abacus/Packages.gz',
+*=Could not find './dists/B/snapshots/before/cat/source/Sources' nor './dists/B/snapshots/before/cat/source/Sources.gz',
+stdout
+EOF
+
+testrun - -b . dumpreferences 3<<EOF
+stdout
+*=B|dog|abacus pool/dog/f/foo/foo_0_abacus.deb
+*=s=B=now pool/dog/f/foo/foo_0_abacus.deb
+*=B|dog|source pool/dog/f/foo/foo_1.dsc
+*=s=B=now pool/dog/f/foo/foo_1.dsc
+*=B|dog|source pool/dog/f/foo/foo_1.tar.gz
+*=s=B=now pool/dog/f/foo/foo_1.tar.gz
+EOF
+
+testrun - -b . restore B now foo 3<<EOF
+stderr
+*=Strange control data for 'foo': no Description at all
+*=Warning: replacing 'foo' version '0' with equal version '0' in 'B|dog|abacus'!
+*=Warning: replacing 'foo' version '1' with equal version '1' in 'B|dog|source'!
+stdout
+-v1*=Adding 'foo' '0' to 'B|dog|abacus'.
+$(opu 'foo' x x 'B' 'dog' 'abacus' 'deb')
+-v1*=Adding 'foo' '1' to 'B|dog|source'.
+$(opu 'foo' x x 'B' 'dog' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating dog/Contents-abacus...
+EOF
+
+testrun empty -b . dumpunreferenced
+testrun - -b . dumpreferences 3<<EOF
+stdout
+*=B|dog|abacus pool/dog/f/foo/foo_0_abacus.deb
+*=s=B=now pool/dog/f/foo/foo_0_abacus.deb
+*=B|dog|source pool/dog/f/foo/foo_1.dsc
+*=s=B=now pool/dog/f/foo/foo_1.dsc
+*=B|dog|source pool/dog/f/foo/foo_1.tar.gz
+*=s=B=now pool/dog/f/foo/foo_1.tar.gz
+EOF
+
+testrun - -b . restoresrc B now foo 0 1 3<<EOF
+stderr
+*=Strange control data for 'foo': no Description at all
+*=Warning: replacing 'foo' version '0' with equal version '0' in 'B|dog|abacus'!
+*=Warning: replacing 'foo' version '1' with equal version '1' in 'B|dog|source'!
+stdout
+-v1*=Adding 'foo' '0' to 'B|dog|abacus'.
+$(opu 'foo' x x 'B' 'dog' 'abacus' 'deb')
+-v1*=Adding 'foo' '1' to 'B|dog|source'.
+$(opu 'foo' x x 'B' 'dog' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating dog/Contents-abacus...
+EOF
+
+testrun - -b . restoresrc B now foo 0 3<<EOF
+stderr
+*=Strange control data for 'foo': no Description at all
+*=Warning: replacing 'foo' version '0' with equal version '0' in 'B|dog|abacus'!
+stdout
+-v1*=Adding 'foo' '0' to 'B|dog|abacus'.
+$(opu 'foo' x x 'B' 'dog' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating dog/Contents-abacus...
+EOF
+
+testrun - -b . restoresrc B now foo 1 3<<EOF
+stderr
+*=Warning: replacing 'foo' version '1' with equal version '1' in 'B|dog|source'!
+stdout
+-v1*=Adding 'foo' '1' to 'B|dog|source'.
+$(opu 'foo' x x 'B' 'dog' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+EOF
+testrun - -b . restorefilter B now 'Directory' 3<<EOF
+stderr
+*=Warning: replacing 'foo' version '1' with equal version '1' in 'B|dog|source'!
+stdout
+-v1*=Adding 'foo' '1' to 'B|dog|source'.
+$(opu 'foo' x x 'B' 'dog' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+EOF
+
+testrun - -b . remove B bar foo 3<<EOF
+stderr
+-v0*=Not removed as not found: bar
+stdout
+$(opd 'foo' unset B dog abacus deb)
+$(opd 'foo' unset B dog source dsc)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating dog/Contents-abacus...
+EOF
+
+testrun - -b . dumpreferences 3<<EOF
+stdout
+*=s=B=now pool/dog/f/foo/foo_0_abacus.deb
+*=s=B=now pool/dog/f/foo/foo_1.dsc
+*=s=B=now pool/dog/f/foo/foo_1.tar.gz
+EOF
+
+testrun empty -b . dumpunreferenced
+
+dodo test -f pool/dog/f/foo/foo_1.dsc
+
+testrun - -b . restore B now bar foo 3<<EOF
+stderr
+*=Strange control data for 'foo': no Description at all
+stdout
+-v1*=Adding 'foo' '0' to 'B|dog|abacus'.
+$(opa 'foo' x 'B' 'dog' 'abacus' 'deb')
+-v1*=Adding 'foo' '1' to 'B|dog|source'.
+$(opa 'foo' x 'B' 'dog' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating dog/Contents-abacus...
+EOF
+
+testrun empty -b . unreferencesnapshot B now
+
+testrun empty -b . dumpunreferenced
+
+testrun - -b . remove B bar foo 3<<EOF
+stderr
+-v0*=Not removed as not found: bar
+stdout
+$(opd 'foo' unset B dog abacus deb)
+$(opd 'foo' unset B dog source dsc)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating dog/Contents-abacus...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/dog/f/foo/foo_0_abacus.deb')
+$(ofd 'pool/dog/f/foo/foo_1.dsc')
+$(ofd 'pool/dog/f/foo/foo_1.tar.gz')
+-v2*=removed now empty directory ./pool/dog/f/foo
+-v2*=removed now empty directory ./pool/dog/f
+-v2*=removed now empty directory ./pool/dog
+-v2*=removed now empty directory ./pool
+EOF
+
+testrun empty -b . dumpunreferenced
+testrun empty -b . dumpreferences
+
+rm -r conf db dists importindex logs tmp db.out*
+testsuccess
diff --git a/tests/srcfilterlist.test b/tests/srcfilterlist.test
new file mode 100644
index 0000000..0808dfd
--- /dev/null
+++ b/tests/srcfilterlist.test
@@ -0,0 +1,221 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf
+cat > conf/options <<EOF
+export silent-never
+EOF
+
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: abacus source
+Components: main
+
+Codename: a
+Architectures: abacus source
+Components: main
+Pull: a
+
+Codename: b
+Architectures: abacus source
+Components: main
+Pull: b
+
+Codename: c
+Architectures: abacus source
+Components: main
+Pull: c
+
+Codename: d
+Architectures: abacus source
+Components: main
+Pull: d
+
+Codename: e
+Architectures: abacus source
+Components: main
+Pull: e
+
+Codename: f
+Architectures: abacus source
+Components: main
+Pull: f
+EOF
+
+DISTRI=test PACKAGE=a VERSION=1 REVISION=-1 FAKEVER=7-1 SECTION=base genpackage.sh
+mkdir -p pool/main/a/a
+testrun - --delete include test test.changes 3<<EOF
+stdout
+$(odb)
+$(ofa pool/main/a/a/a_1-1.tar.gz)
+$(ofa pool/main/a/a/a_1-1.dsc)
+$(ofa pool/main/a/a/a_1-1_abacus.deb)
+$(ofa pool/main/a/a/a-addons_7-1_all.deb)
+$(opa a 1-1 test main source dsc)
+$(opa a 1-1 test main abacus deb)
+$(opa a-addons 7-1 test main abacus deb)
+EOF
+DISTRI=test PACKAGE=b VERSION=1 REVISION=-1 FAKEVER=7-1 SECTION=base genpackage.sh
+mkdir -p pool/main/b/b
+testrun - --delete include test test.changes 3<<EOF
+stdout
+$(ofa pool/main/b/b/b_1-1.tar.gz)
+$(ofa pool/main/b/b/b_1-1.dsc)
+$(ofa pool/main/b/b/b_1-1_abacus.deb)
+$(ofa pool/main/b/b/b-addons_7-1_all.deb)
+$(opa b 1-1 test main source dsc)
+$(opa b 1-1 test main abacus deb)
+$(opa b-addons 7-1 test main abacus deb)
+EOF
+
+dodo test ! -d dists
+
+cat > conf/pulls <<EOF
+Name: a
+From: test
+FilterList: deinstall bin
+
+Name: b
+From: test
+FilterSrcList: deinstall src
+
+Name: c
+From: test
+FilterList: deinstall bin
+FilterSrcList: deinstall src
+
+Name: d
+From: test
+
+Name: e
+From: test
+FilterList: hold
+
+Name: f
+From: test
+FilterList: deinstall
+EOF
+cat > conf/bin <<EOF
+a = 1-1
+a-addons = 7-1
+b-addons = 1-1
+b = 7-1
+EOF
+cat > conf/src <<EOF
+b = 1-1
+a = 7-1
+a-addons = 7-1
+EOF
+
+testout - --restrict-bin a=1-1 dumppull 3</dev/null
+cat > results.expected <<EOF
+Updates needed for 'a|main|source':
+Updates needed for 'a|main|abacus':
+add 'a' - '1-1' 'a'
+Updates needed for 'b|main|source':
+Updates needed for 'b|main|abacus':
+Updates needed for 'c|main|source':
+Updates needed for 'c|main|abacus':
+add 'a' - '1-1' 'c'
+Updates needed for 'd|main|source':
+Updates needed for 'd|main|abacus':
+add 'a' - '1-1' 'd'
+Updates needed for 'e|main|source':
+Updates needed for 'e|main|abacus':
+add 'a' - '1-1' 'e'
+Updates needed for 'f|main|source':
+Updates needed for 'f|main|abacus':
+EOF
+dodiff results.expected results
+
+testout - --restrict-file-bin /dev/stdin dumppull 3</dev/null <<EOF
+a = 1-1
+EOF
+cat > results.expected <<EOF
+Updates needed for 'a|main|source':
+Updates needed for 'a|main|abacus':
+add 'a' - '1-1' 'a'
+Updates needed for 'b|main|source':
+Updates needed for 'b|main|abacus':
+Updates needed for 'c|main|source':
+Updates needed for 'c|main|abacus':
+add 'a' - '1-1' 'c'
+Updates needed for 'd|main|source':
+Updates needed for 'd|main|abacus':
+add 'a' - '1-1' 'd'
+Updates needed for 'e|main|source':
+Updates needed for 'e|main|abacus':
+add 'a' - '1-1' 'e'
+Updates needed for 'f|main|source':
+Updates needed for 'f|main|abacus':
+EOF
+dodiff results.expected results
+
+testout - --restrict a=1-1 --restrict-bin b=1-1 dumppull 3</dev/null
+cat > results.expected <<EOF
+Updates needed for 'a|main|source':
+add 'a' - '1-1' 'a'
+Updates needed for 'a|main|abacus':
+add 'a' - '1-1' 'a'
+add 'a-addons' - '7-1' 'a'
+Updates needed for 'b|main|source':
+Updates needed for 'b|main|abacus':
+add 'b' - '1-1' 'b'
+Updates needed for 'c|main|source':
+Updates needed for 'c|main|abacus':
+add 'a' - '1-1' 'c'
+add 'a-addons' - '7-1' 'c'
+Updates needed for 'd|main|source':
+add 'a' - '1-1' 'd'
+Updates needed for 'd|main|abacus':
+add 'a' - '1-1' 'd'
+add 'a-addons' - '7-1' 'd'
+add 'b' - '1-1' 'd'
+Updates needed for 'e|main|source':
+add 'a' - '1-1' 'e'
+Updates needed for 'e|main|abacus':
+add 'a' - '1-1' 'e'
+add 'a-addons' - '7-1' 'e'
+add 'b' - '1-1' 'e'
+Updates needed for 'f|main|source':
+Updates needed for 'f|main|abacus':
+EOF
+dodiff results.expected results
+
+testrun - pull 3<<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'a|main|source'
+-v5*= looking what to get from 'test|main|source'
+-v3*= pulling into 'a|main|abacus'
+-v5*= looking what to get from 'test|main|abacus'
+-v3*= pulling into 'b|main|source'
+-v3*= pulling into 'b|main|abacus'
+-v3*= pulling into 'c|main|source'
+-v3*= pulling into 'c|main|abacus'
+-v3*= pulling into 'd|main|source'
+-v3*= pulling into 'd|main|abacus'
+-v3*= pulling into 'e|main|source'
+-v3*= pulling into 'e|main|abacus'
+-v3*= pulling into 'f|main|source'
+-v3*= pulling into 'f|main|abacus'
+-v0*=Installing (and possibly deleting) packages...
+$(opa a 1-1 d main source dsc)
+$(opa a 1-1 d main abacus deb)
+$(opa a-addons 7-1 d main abacus deb)
+$(opa b 1-1 d main source dsc)
+$(opa b 1-1 d main abacus deb)
+$(opa b-addons 7-1 d main abacus deb)
+$(opa a 1-1 a main source dsc)
+$(opa a 1-1 a main abacus deb)
+$(opa a-addons 7-1 a main abacus deb)
+$(opa b 1-1 b main source dsc)
+$(opa b 1-1 b main abacus deb)
+$(opa b-addons 7-1 b main abacus deb)
+$(opa a 1-1 c main abacus deb)
+$(opa a-addons 7-1 c main abacus deb)
+$(opa b 1-1 c main source dsc)
+EOF
+
+rm -r db pool
+testsuccess
diff --git a/tests/subcomponents.test b/tests/subcomponents.test
new file mode 100644
index 0000000..016bd7a
--- /dev/null
+++ b/tests/subcomponents.test
@@ -0,0 +1,502 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+testrun - -b . _versioncompare 0 1 3<<EOF
+stdout
+*='0' is smaller than '1'.
+EOF
+dodo test ! -d db
+mkdir -p conf
+cat > conf/distributions <<EOF
+Codename: foo/updates
+Components: a bb ccc dddd
+UDebComponents: a dddd
+Architectures: x source
+EOF
+testrun - -b . export foo/updates 3<<EOF
+stderr
+stdout
+$(odb)
+-v1*=Exporting foo/updates...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/foo"
+-v2*=Created directory "./dists/foo/updates"
+-v2*=Created directory "./dists/foo/updates/a"
+-v2*=Created directory "./dists/foo/updates/a/binary-x"
+-v6*= exporting 'foo/updates|a|x'...
+-v6*= creating './dists/foo/updates/a/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/updates/a/debian-installer"
+-v2*=Created directory "./dists/foo/updates/a/debian-installer/binary-x"
+-v6*= exporting 'u|foo/updates|a|x'...
+-v6*= creating './dists/foo/updates/a/debian-installer/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/updates/a/source"
+-v6*= exporting 'foo/updates|a|source'...
+-v6*= creating './dists/foo/updates/a/source/Sources' (gzipped)
+-v2*=Created directory "./dists/foo/updates/bb"
+-v2*=Created directory "./dists/foo/updates/bb/binary-x"
+-v6*= exporting 'foo/updates|bb|x'...
+-v6*= creating './dists/foo/updates/bb/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/updates/bb/source"
+-v6*= exporting 'foo/updates|bb|source'...
+-v6*= creating './dists/foo/updates/bb/source/Sources' (gzipped)
+-v2*=Created directory "./dists/foo/updates/ccc"
+-v2*=Created directory "./dists/foo/updates/ccc/binary-x"
+-v6*= exporting 'foo/updates|ccc|x'...
+-v6*= creating './dists/foo/updates/ccc/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/updates/ccc/source"
+-v6*= exporting 'foo/updates|ccc|source'...
+-v6*= creating './dists/foo/updates/ccc/source/Sources' (gzipped)
+-v2*=Created directory "./dists/foo/updates/dddd"
+-v2*=Created directory "./dists/foo/updates/dddd/binary-x"
+-v6*= exporting 'foo/updates|dddd|x'...
+-v6*= creating './dists/foo/updates/dddd/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/updates/dddd/debian-installer"
+-v2*=Created directory "./dists/foo/updates/dddd/debian-installer/binary-x"
+-v6*= exporting 'u|foo/updates|dddd|x'...
+-v6*= creating './dists/foo/updates/dddd/debian-installer/binary-x/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/updates/dddd/source"
+-v6*= exporting 'foo/updates|dddd|source'...
+-v6*= creating './dists/foo/updates/dddd/source/Sources' (gzipped)
+EOF
+cat > results.expected <<EOF
+Codename: foo/updates
+Date: normalized
+Architectures: x
+Components: a bb ccc dddd
+MD5Sum:
+ $EMPTYMD5 a/binary-x/Packages
+ $EMPTYGZMD5 a/binary-x/Packages.gz
+ 62d4df25a6de22ca443076ace929ec5b 29 a/binary-x/Release
+ $EMPTYMD5 a/debian-installer/binary-x/Packages
+ $EMPTYGZMD5 a/debian-installer/binary-x/Packages.gz
+ $EMPTYMD5 a/source/Sources
+ $EMPTYGZMD5 a/source/Sources.gz
+ bc76dd633c41acb37f24e22bf755dc84 34 a/source/Release
+ $EMPTYMD5 bb/binary-x/Packages
+ $EMPTYGZMD5 bb/binary-x/Packages.gz
+ 6b882eefa465a6e3c43d512f7e8da6e4 30 bb/binary-x/Release
+ $EMPTYMD5 bb/source/Sources
+ $EMPTYGZMD5 bb/source/Sources.gz
+ 808be3988e695c1ef966f19641383275 35 bb/source/Release
+ $EMPTYMD5 ccc/binary-x/Packages
+ $EMPTYGZMD5 ccc/binary-x/Packages.gz
+ dec38be5c92799814c9113335317a319 31 ccc/binary-x/Release
+ $EMPTYMD5 ccc/source/Sources
+ $EMPTYGZMD5 ccc/source/Sources.gz
+ 650f349d34e8e929dfc732abbf90c74e 36 ccc/source/Release
+ $EMPTYMD5 dddd/binary-x/Packages
+ $EMPTYGZMD5 dddd/binary-x/Packages.gz
+ 3e4c48246400818d451e65fb03e48f01 32 dddd/binary-x/Release
+ $EMPTYMD5 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZMD5 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYMD5 dddd/source/Sources
+ $EMPTYGZMD5 dddd/source/Sources.gz
+ bb7b15c091463b7ea884ccca385f1f0a 37 dddd/source/Release
+SHA1:
+ $EMPTYSHA1 a/binary-x/Packages
+ $EMPTYGZSHA1 a/binary-x/Packages.gz
+ f312c487ee55fc60c23e9117c6a664cbbd862ae6 29 a/binary-x/Release
+ $EMPTYSHA1 a/debian-installer/binary-x/Packages
+ $EMPTYGZSHA1 a/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA1 a/source/Sources
+ $EMPTYGZSHA1 a/source/Sources.gz
+ 186977630f5f42744cd6ea6fcf8ea54960992a2f 34 a/source/Release
+ $EMPTYSHA1 bb/binary-x/Packages
+ $EMPTYGZSHA1 bb/binary-x/Packages.gz
+ c4c6cb0f765a9f71682f3d1bfd02279e58609e6b 30 bb/binary-x/Release
+ $EMPTYSHA1 bb/source/Sources
+ $EMPTYGZSHA1 bb/source/Sources.gz
+ 59260e2f6e121943909241c125c57aed6fca09ad 35 bb/source/Release
+ $EMPTYSHA1 ccc/binary-x/Packages
+ $EMPTYGZSHA1 ccc/binary-x/Packages.gz
+ 7d1913a67637add61ce5ef1ba82eeeb8bc5fe8c6 31 ccc/binary-x/Release
+ $EMPTYSHA1 ccc/source/Sources
+ $EMPTYGZSHA1 ccc/source/Sources.gz
+ a7df74b575289d0697214261e393bc390f428af9 36 ccc/source/Release
+ $EMPTYSHA1 dddd/binary-x/Packages
+ $EMPTYGZSHA1 dddd/binary-x/Packages.gz
+ fc2ab0a76469f8fc81632aa904ceb9c1125ac2c5 32 dddd/binary-x/Release
+ $EMPTYSHA1 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZSHA1 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA1 dddd/source/Sources
+ $EMPTYGZSHA1 dddd/source/Sources.gz
+ 1d44f88f82a325658ee96dd7e7cee975ffa50e4d 37 dddd/source/Release
+SHA256:
+ $EMPTYSHA2 a/binary-x/Packages
+ $EMPTYGZSHA2 a/binary-x/Packages.gz
+ d5e5ba98f784efc26ac8f5ff1f293fab43f37878c92b3da0a7fce39c1da0b463 29 a/binary-x/Release
+ $EMPTYSHA2 a/debian-installer/binary-x/Packages
+ $EMPTYGZSHA2 a/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA2 a/source/Sources
+ $EMPTYGZSHA2 a/source/Sources.gz
+ edd9dad3b1239657da74dfbf45af401ab810b54236b12386189accc0fbc4befa 34 a/source/Release
+ $EMPTYSHA2 bb/binary-x/Packages
+ $EMPTYGZSHA2 bb/binary-x/Packages.gz
+ 2d578ea088ccb77f24a437c4657663e9f5a76939c8a23745f8df9f425cc4c137 30 bb/binary-x/Release
+ $EMPTYSHA2 bb/source/Sources
+ $EMPTYGZSHA2 bb/source/Sources.gz
+ 4653987e3d0be59da18afcc446e59a0118dd995a13e976162749017e95e6709a 35 bb/source/Release
+ $EMPTYSHA2 ccc/binary-x/Packages
+ $EMPTYGZSHA2 ccc/binary-x/Packages.gz
+ e46b90afc77272a351bdde96253f57cba5852317546467fc61ae47d7696500a6 31 ccc/binary-x/Release
+ $EMPTYSHA2 ccc/source/Sources
+ $EMPTYGZSHA2 ccc/source/Sources.gz
+ a6ef831ba0cc6044019e4d598c5f2483872cf047cb65949bb68c73c028864d76 36 ccc/source/Release
+ $EMPTYSHA2 dddd/binary-x/Packages
+ $EMPTYGZSHA2 dddd/binary-x/Packages.gz
+ 70a6c3a457abe60f107f63f0cdb29ab040a4494fefc55922fff0164c97c7a124 32 dddd/binary-x/Release
+ $EMPTYSHA2 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZSHA2 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA2 dddd/source/Sources
+ $EMPTYGZSHA2 dddd/source/Sources.gz
+ 504549b725951e79fb2e43149bb0cf42619286284890666b8e9fe5fb0787f306 37 dddd/source/Release
+EOF
+normalizerelease dists/foo/updates/Release > results
+dodiff results.expected results
+cat > conf/distributions <<EOF
+Codename: foo/updates
+Components: a bb ccc dddd
+UDebComponents: a dddd
+Architectures: x source
+FakeComponentPrefix: updates
+EOF
+testrun - -b . export foo/updates 3<<EOF
+stderr
+stdout
+-v1*=Exporting foo/updates...
+-v6*= exporting 'foo/updates|a|x'...
+-v6*= replacing './dists/foo/updates/a/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'u|foo/updates|a|x'...
+-v6*= replacing './dists/foo/updates/a/debian-installer/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|a|source'...
+-v6*= replacing './dists/foo/updates/a/source/Sources' (gzipped)
+-v6*= exporting 'foo/updates|bb|x'...
+-v6*= replacing './dists/foo/updates/bb/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|bb|source'...
+-v6*= replacing './dists/foo/updates/bb/source/Sources' (gzipped)
+-v6*= exporting 'foo/updates|ccc|x'...
+-v6*= replacing './dists/foo/updates/ccc/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|ccc|source'...
+-v6*= replacing './dists/foo/updates/ccc/source/Sources' (gzipped)
+-v6*= exporting 'foo/updates|dddd|x'...
+-v6*= replacing './dists/foo/updates/dddd/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'u|foo/updates|dddd|x'...
+-v6*= replacing './dists/foo/updates/dddd/debian-installer/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|dddd|source'...
+-v6*= replacing './dists/foo/updates/dddd/source/Sources' (gzipped)
+EOF
+cat > results.expected <<EOF
+Codename: foo
+Date: normalized
+Architectures: x
+Components: updates/a updates/bb updates/ccc updates/dddd
+MD5Sum:
+ $EMPTYMD5 a/binary-x/Packages
+ $EMPTYGZMD5 a/binary-x/Packages.gz
+ 62d4df25a6de22ca443076ace929ec5b 29 a/binary-x/Release
+ $EMPTYMD5 a/debian-installer/binary-x/Packages
+ $EMPTYGZMD5 a/debian-installer/binary-x/Packages.gz
+ $EMPTYMD5 a/source/Sources
+ $EMPTYGZMD5 a/source/Sources.gz
+ bc76dd633c41acb37f24e22bf755dc84 34 a/source/Release
+ $EMPTYMD5 bb/binary-x/Packages
+ $EMPTYGZMD5 bb/binary-x/Packages.gz
+ 6b882eefa465a6e3c43d512f7e8da6e4 30 bb/binary-x/Release
+ $EMPTYMD5 bb/source/Sources
+ $EMPTYGZMD5 bb/source/Sources.gz
+ 808be3988e695c1ef966f19641383275 35 bb/source/Release
+ $EMPTYMD5 ccc/binary-x/Packages
+ $EMPTYGZMD5 ccc/binary-x/Packages.gz
+ dec38be5c92799814c9113335317a319 31 ccc/binary-x/Release
+ $EMPTYMD5 ccc/source/Sources
+ $EMPTYGZMD5 ccc/source/Sources.gz
+ 650f349d34e8e929dfc732abbf90c74e 36 ccc/source/Release
+ $EMPTYMD5 dddd/binary-x/Packages
+ $EMPTYGZMD5 dddd/binary-x/Packages.gz
+ 3e4c48246400818d451e65fb03e48f01 32 dddd/binary-x/Release
+ $EMPTYMD5 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZMD5 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYMD5 dddd/source/Sources
+ $EMPTYGZMD5 dddd/source/Sources.gz
+ bb7b15c091463b7ea884ccca385f1f0a 37 dddd/source/Release
+SHA1:
+ $EMPTYSHA1 a/binary-x/Packages
+ $EMPTYGZSHA1 a/binary-x/Packages.gz
+ f312c487ee55fc60c23e9117c6a664cbbd862ae6 29 a/binary-x/Release
+ $EMPTYSHA1 a/debian-installer/binary-x/Packages
+ $EMPTYGZSHA1 a/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA1 a/source/Sources
+ $EMPTYGZSHA1 a/source/Sources.gz
+ 186977630f5f42744cd6ea6fcf8ea54960992a2f 34 a/source/Release
+ $EMPTYSHA1 bb/binary-x/Packages
+ $EMPTYGZSHA1 bb/binary-x/Packages.gz
+ c4c6cb0f765a9f71682f3d1bfd02279e58609e6b 30 bb/binary-x/Release
+ $EMPTYSHA1 bb/source/Sources
+ $EMPTYGZSHA1 bb/source/Sources.gz
+ 59260e2f6e121943909241c125c57aed6fca09ad 35 bb/source/Release
+ $EMPTYSHA1 ccc/binary-x/Packages
+ $EMPTYGZSHA1 ccc/binary-x/Packages.gz
+ 7d1913a67637add61ce5ef1ba82eeeb8bc5fe8c6 31 ccc/binary-x/Release
+ $EMPTYSHA1 ccc/source/Sources
+ $EMPTYGZSHA1 ccc/source/Sources.gz
+ a7df74b575289d0697214261e393bc390f428af9 36 ccc/source/Release
+ $EMPTYSHA1 dddd/binary-x/Packages
+ $EMPTYGZSHA1 dddd/binary-x/Packages.gz
+ fc2ab0a76469f8fc81632aa904ceb9c1125ac2c5 32 dddd/binary-x/Release
+ $EMPTYSHA1 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZSHA1 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA1 dddd/source/Sources
+ $EMPTYGZSHA1 dddd/source/Sources.gz
+ 1d44f88f82a325658ee96dd7e7cee975ffa50e4d 37 dddd/source/Release
+SHA256:
+ $EMPTYSHA2 a/binary-x/Packages
+ $EMPTYGZSHA2 a/binary-x/Packages.gz
+ d5e5ba98f784efc26ac8f5ff1f293fab43f37878c92b3da0a7fce39c1da0b463 29 a/binary-x/Release
+ $EMPTYSHA2 a/debian-installer/binary-x/Packages
+ $EMPTYGZSHA2 a/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA2 a/source/Sources
+ $EMPTYGZSHA2 a/source/Sources.gz
+ edd9dad3b1239657da74dfbf45af401ab810b54236b12386189accc0fbc4befa 34 a/source/Release
+ $EMPTYSHA2 bb/binary-x/Packages
+ $EMPTYGZSHA2 bb/binary-x/Packages.gz
+ 2d578ea088ccb77f24a437c4657663e9f5a76939c8a23745f8df9f425cc4c137 30 bb/binary-x/Release
+ $EMPTYSHA2 bb/source/Sources
+ $EMPTYGZSHA2 bb/source/Sources.gz
+ 4653987e3d0be59da18afcc446e59a0118dd995a13e976162749017e95e6709a 35 bb/source/Release
+ $EMPTYSHA2 ccc/binary-x/Packages
+ $EMPTYGZSHA2 ccc/binary-x/Packages.gz
+ e46b90afc77272a351bdde96253f57cba5852317546467fc61ae47d7696500a6 31 ccc/binary-x/Release
+ $EMPTYSHA2 ccc/source/Sources
+ $EMPTYGZSHA2 ccc/source/Sources.gz
+ a6ef831ba0cc6044019e4d598c5f2483872cf047cb65949bb68c73c028864d76 36 ccc/source/Release
+ $EMPTYSHA2 dddd/binary-x/Packages
+ $EMPTYGZSHA2 dddd/binary-x/Packages.gz
+ 70a6c3a457abe60f107f63f0cdb29ab040a4494fefc55922fff0164c97c7a124 32 dddd/binary-x/Release
+ $EMPTYSHA2 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZSHA2 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA2 dddd/source/Sources
+ $EMPTYGZSHA2 dddd/source/Sources.gz
+ 504549b725951e79fb2e43149bb0cf42619286284890666b8e9fe5fb0787f306 37 dddd/source/Release
+EOF
+normalizerelease dists/foo/updates/Release > results
+dodiff results.expected results
+# Now try with suite
+cat > conf/distributions <<EOF
+Codename: foo/updates
+Suite: bla/updates
+Components: a bb ccc dddd
+UDebComponents: a dddd
+Architectures: x source
+FakeComponentPrefix: updates
+EOF
+testrun - -b . export foo/updates 3<<EOF
+stderr
+stdout
+-v1*=Exporting foo/updates...
+-v6*= exporting 'foo/updates|a|x'...
+-v6*= replacing './dists/foo/updates/a/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'u|foo/updates|a|x'...
+-v6*= replacing './dists/foo/updates/a/debian-installer/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|a|source'...
+-v6*= replacing './dists/foo/updates/a/source/Sources' (gzipped)
+-v6*= exporting 'foo/updates|bb|x'...
+-v6*= replacing './dists/foo/updates/bb/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|bb|source'...
+-v6*= replacing './dists/foo/updates/bb/source/Sources' (gzipped)
+-v6*= exporting 'foo/updates|ccc|x'...
+-v6*= replacing './dists/foo/updates/ccc/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|ccc|source'...
+-v6*= replacing './dists/foo/updates/ccc/source/Sources' (gzipped)
+-v6*= exporting 'foo/updates|dddd|x'...
+-v6*= replacing './dists/foo/updates/dddd/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'u|foo/updates|dddd|x'...
+-v6*= replacing './dists/foo/updates/dddd/debian-installer/binary-x/Packages' (uncompressed,gzipped)
+-v6*= exporting 'foo/updates|dddd|source'...
+-v6*= replacing './dists/foo/updates/dddd/source/Sources' (gzipped)
+EOF
+cat > results.expected <<EOF
+Suite: bla
+Codename: foo
+Date: normalized
+Architectures: x
+Components: updates/a updates/bb updates/ccc updates/dddd
+MD5Sum:
+ $EMPTYMD5 a/binary-x/Packages
+ $EMPTYGZMD5 a/binary-x/Packages.gz
+ $(md5releaseline foo/updates a/binary-x/Release)
+ $EMPTYMD5 a/debian-installer/binary-x/Packages
+ $EMPTYGZMD5 a/debian-installer/binary-x/Packages.gz
+ $EMPTYMD5 a/source/Sources
+ $EMPTYGZMD5 a/source/Sources.gz
+ $(md5releaseline foo/updates a/source/Release)
+ $EMPTYMD5 bb/binary-x/Packages
+ $EMPTYGZMD5 bb/binary-x/Packages.gz
+ $(md5releaseline foo/updates bb/binary-x/Release)
+ $EMPTYMD5 bb/source/Sources
+ $EMPTYGZMD5 bb/source/Sources.gz
+ $(md5releaseline foo/updates bb/source/Release)
+ $EMPTYMD5 ccc/binary-x/Packages
+ $EMPTYGZMD5 ccc/binary-x/Packages.gz
+ $(md5releaseline foo/updates ccc/binary-x/Release)
+ $EMPTYMD5 ccc/source/Sources
+ $EMPTYGZMD5 ccc/source/Sources.gz
+ $(md5releaseline foo/updates ccc/source/Release)
+ $EMPTYMD5 dddd/binary-x/Packages
+ $EMPTYGZMD5 dddd/binary-x/Packages.gz
+ $(md5releaseline foo/updates dddd/binary-x/Release)
+ $EMPTYMD5 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZMD5 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYMD5 dddd/source/Sources
+ $EMPTYGZMD5 dddd/source/Sources.gz
+ $(md5releaseline foo/updates dddd/source/Release)
+SHA1:
+ $EMPTYSHA1 a/binary-x/Packages
+ $EMPTYGZSHA1 a/binary-x/Packages.gz
+ $(sha1releaseline foo/updates a/binary-x/Release)
+ $EMPTYSHA1 a/debian-installer/binary-x/Packages
+ $EMPTYGZSHA1 a/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA1 a/source/Sources
+ $EMPTYGZSHA1 a/source/Sources.gz
+ $(sha1releaseline foo/updates a/source/Release)
+ $EMPTYSHA1 bb/binary-x/Packages
+ $EMPTYGZSHA1 bb/binary-x/Packages.gz
+ $(sha1releaseline foo/updates bb/binary-x/Release)
+ $EMPTYSHA1 bb/source/Sources
+ $EMPTYGZSHA1 bb/source/Sources.gz
+ $(sha1releaseline foo/updates bb/source/Release)
+ $EMPTYSHA1 ccc/binary-x/Packages
+ $EMPTYGZSHA1 ccc/binary-x/Packages.gz
+ $(sha1releaseline foo/updates ccc/binary-x/Release)
+ $EMPTYSHA1 ccc/source/Sources
+ $EMPTYGZSHA1 ccc/source/Sources.gz
+ $(sha1releaseline foo/updates ccc/source/Release)
+ $EMPTYSHA1 dddd/binary-x/Packages
+ $EMPTYGZSHA1 dddd/binary-x/Packages.gz
+ $(sha1releaseline foo/updates dddd/binary-x/Release)
+ $EMPTYSHA1 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZSHA1 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA1 dddd/source/Sources
+ $EMPTYGZSHA1 dddd/source/Sources.gz
+ $(sha1releaseline foo/updates dddd/source/Release)
+SHA256:
+ $EMPTYSHA2 a/binary-x/Packages
+ $EMPTYGZSHA2 a/binary-x/Packages.gz
+ $(sha2releaseline foo/updates a/binary-x/Release)
+ $EMPTYSHA2 a/debian-installer/binary-x/Packages
+ $EMPTYGZSHA2 a/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA2 a/source/Sources
+ $EMPTYGZSHA2 a/source/Sources.gz
+ $(sha2releaseline foo/updates a/source/Release)
+ $EMPTYSHA2 bb/binary-x/Packages
+ $EMPTYGZSHA2 bb/binary-x/Packages.gz
+ $(sha2releaseline foo/updates bb/binary-x/Release)
+ $EMPTYSHA2 bb/source/Sources
+ $EMPTYGZSHA2 bb/source/Sources.gz
+ $(sha2releaseline foo/updates bb/source/Release)
+ $EMPTYSHA2 ccc/binary-x/Packages
+ $EMPTYGZSHA2 ccc/binary-x/Packages.gz
+ $(sha2releaseline foo/updates ccc/binary-x/Release)
+ $EMPTYSHA2 ccc/source/Sources
+ $EMPTYGZSHA2 ccc/source/Sources.gz
+ $(sha2releaseline foo/updates ccc/source/Release)
+ $EMPTYSHA2 dddd/binary-x/Packages
+ $EMPTYGZSHA2 dddd/binary-x/Packages.gz
+ $(sha2releaseline foo/updates dddd/binary-x/Release)
+ $EMPTYSHA2 dddd/debian-installer/binary-x/Packages
+ $EMPTYGZSHA2 dddd/debian-installer/binary-x/Packages.gz
+ $EMPTYSHA2 dddd/source/Sources
+ $EMPTYGZSHA2 dddd/source/Sources.gz
+ $(sha2releaseline foo/updates dddd/source/Release)
+EOF
+normalizerelease dists/foo/updates/Release > results
+dodiff results.expected results
+testrun - -b . createsymlinks 3<<EOF
+stderr
+-v0*=Creating symlinks with '/' in them is not yet supported:
+-v0*=Not creating 'bla/updates' -> 'foo/updates' because of '/'.
+stdout
+EOF
+cat >> conf/distributions <<EOF
+
+Codename: foo
+Suite: bla
+Architectures: ooooooooooooooooooooooooooooooooooooooooo source
+Components:
+ x a
+EOF
+testrun - -b . createsymlinks 3<<EOF
+stderr
+-v2*=Not creating 'bla/updates' -> 'foo/updates' because of the '/' in it.
+-v2*=Hopefully something else will link 'bla' -> 'foo' then this is not needed.
+stdout
+-v1*=Created ./dists/bla->foo
+EOF
+# check a .dsc with nothing in it:
+cat > test.dsc <<EOF
+
+EOF
+testrun - -b . includedsc foo test.dsc 3<<EOF
+return 255
+stderr
+*=Could only find spaces within 'test.dsc'!
+-v0*=There have been errors!
+stdout
+EOF
+cat > test.dsc <<EOF
+Format: 0.0
+Source: test
+Version: 0
+Maintainer: me <guess@who>
+Section: section
+Priority: priority
+Files:
+EOF
+testrun - -C a -b . includedsc foo test.dsc 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/a"
+-v2*=Created directory "./pool/a/t"
+-v2*=Created directory "./pool/a/t/test"
+$(ofa 'pool/a/t/test/test_0.dsc')
+$(opa 'test' unset 'foo' 'a' 'source' 'dsc')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/foo/x"
+-v2*=Created directory "./dists/foo/x/binary-ooooooooooooooooooooooooooooooooooooooooo"
+-v6*= looking for changes in 'foo|x|ooooooooooooooooooooooooooooooooooooooooo'...
+-v6*= creating './dists/foo/x/binary-ooooooooooooooooooooooooooooooooooooooooo/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/x/source"
+-v6*= looking for changes in 'foo|x|source'...
+-v6*= creating './dists/foo/x/source/Sources' (gzipped)
+-v2*=Created directory "./dists/foo/a"
+-v2*=Created directory "./dists/foo/a/binary-ooooooooooooooooooooooooooooooooooooooooo"
+-v6*= looking for changes in 'foo|a|ooooooooooooooooooooooooooooooooooooooooo'...
+-v6*= creating './dists/foo/a/binary-ooooooooooooooooooooooooooooooooooooooooo/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/foo/a/source"
+-v6*= looking for changes in 'foo|a|source'...
+-v6*= creating './dists/foo/a/source/Sources' (gzipped)
+EOF
+testrun - -b . copy foo/updates foo test test test test 3<<EOF
+stderr
+-v0*=Hint: 'test' was listed multiple times, ignoring all but first!
+stdout
+-v3*=Not looking into 'foo|x|ooooooooooooooooooooooooooooooooooooooooo' as no matching target in 'foo/updates'!
+-v3*=Not looking into 'foo|x|source' as no matching target in 'foo/updates'!
+-v3*=Not looking into 'foo|a|ooooooooooooooooooooooooooooooooooooooooo' as no matching target in 'foo/updates'!
+-v1*=Adding 'test' '0' to 'foo/updates|a|source'.
+$(opa 'test' unset 'foo/updates' 'a' 'source' 'dsc')
+-v*=Exporting indices...
+-v6*= looking for changes in 'foo/updates|a|x'...
+-v6*= looking for changes in 'u|foo/updates|a|x'...
+-v6*= looking for changes in 'foo/updates|a|source'...
+-v6*= replacing './dists/foo/updates/a/source/Sources' (gzipped)
+-v6*= looking for changes in 'foo/updates|bb|x'...
+-v6*= looking for changes in 'foo/updates|bb|source'...
+-v6*= looking for changes in 'foo/updates|ccc|x'...
+-v6*= looking for changes in 'foo/updates|ccc|source'...
+-v6*= looking for changes in 'foo/updates|dddd|x'...
+-v6*= looking for changes in 'u|foo/updates|dddd|x'...
+-v6*= looking for changes in 'foo/updates|dddd|source'...
+EOF
+rm -r -f db conf dists pool
+testsuccess
diff --git a/tests/template.test b/tests/template.test
new file mode 100644
index 0000000..e2de105
--- /dev/null
+++ b/tests/template.test
@@ -0,0 +1,4 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+testsuccess
diff --git a/tests/test.inc b/tests/test.inc
new file mode 100644
index 0000000..42d51b5
--- /dev/null
+++ b/tests/test.inc
@@ -0,0 +1,237 @@
+# Shell script snippets used in the test scripts...
+
+set -e -u
+
+export LC_ALL=C
+
+testrun() {
+rules=$1
+shift
+if test "x$rules" = "x" ; then
+ "$TESTTOOL" -C $TRACKINGTESTOPTIONS $TESTOPTIONS "$REPREPRO" $REPREPROOPTIONS "$@"
+elif test "x$rules" = "x-" ; then
+ "$TESTTOOL" -r -C $TRACKINGTESTOPTIONS $TESTOPTIONS "$REPREPRO" $REPREPROOPTIONS "$@"
+else
+ "$TESTTOOL" -r -C $TRACKINGTESTOPTIONS $TESTOPTIONS "$REPREPRO" $REPREPROOPTIONS "$@" 3<"$rules".rules
+fi
+}
+testout() {
+rules=$1
+shift
+if test "x$rules" = "x" ; then
+ "$TESTTOOL" -o results $TRACKINGTESTOPTIONS $TESTOPTIONS "$REPREPRO" $REPREPROOPTIONS "$@"
+elif test "x$rules" = "x-" ; then
+ "$TESTTOOL" -o results -r $TRACKINGTESTOPTIONS $TESTOPTIONS "$REPREPRO" $REPREPROOPTIONS "$@"
+else
+ "$TESTTOOL" -o results -r $TRACKINGTESTOPTIONS $TESTOPTIONS "$REPREPRO" $REPREPROOPTIONS "$@" 3<"$rules".rules
+fi
+}
+dogrep() {
+echo grep -q "$@"
+grep -q "$@"
+}
+dongrep() {
+echo "!grep" -q "$@"
+! grep -q "$@"
+}
+dodiff() {
+echo diff -u "$@"
+diff -u "$@"
+}
+dodo() {
+echo "$@"
+"$@"
+}
+
+if test -z "$SRCDIR" || ! test -d "$SRCDIR" ; then
+ echo "SRCDIR='$SRCDIR' not a valid directory!" >&2
+ exit 1
+fi
+if test -z "$TESTSDIR" || ! test -d "$TESTSDIR" ; then
+ echo "TESTSDIR='$TESTSDIR' not a valid directory!" >&2
+ exit 1
+fi
+if test -z "$WORKDIR" || ! test -d "$WORKDIR" ; then
+ echo "WORKDIR='$WORKDIR' not a valid directory!" >&2
+ exit 1
+fi
+
+# avoid architecture dependency of the test-suite:
+export DEB_HOST_ARCH="abacus"
+
+export PATH="$TESTSDIR:$PATH"
+if ! [ -x "$REPREPRO" ] ; then
+ echo "Could not find $REPREPRO!" >&2
+ exit 1
+fi
+
+checknolog() {
+ dodo test ! -f logs/"$1"
+}
+checklog() {
+ sort > results.log.expected
+ LOGDATE="$(date +'%Y-%m-%d %H:')"
+ echo normalizing "$1": DATESTR is "$LOGDATE??:??"
+ sed -e 's/^'"$LOGDATE"'[0-9][0-9]:[0-9][0-9] /DATESTR /g' logs/"$1" | sort > results.log
+ dodiff results.log.expected results.log
+ rm logs/"$1" results.log
+}
+md5() {
+md5sum "$1" | cut -d' ' -f1
+}
+sha1() {
+sha1sum "$1" | cut -d' ' -f1
+}
+sha256() {
+sha256sum "$1" | cut -d' ' -f1
+}
+printindexpart() {
+ FILENAME="$1"
+ dpkg-deb -I "$FILENAME" control >"$FILENAME".control
+ ed -s "$FILENAME".control << EOF
+H
+/^Description:/ kd
+/^Priority/ m 'd-1
+/^Section/ m 'd-1
+'d i
+Filename: $FILENAME
+Size: $(stat -c "%s" "$FILENAME")
+SHA256: $(sha256 "$FILENAME")
+SHA1: $(sha1 "$FILENAME")
+MD5sum: $(md5 "$FILENAME")
+.
+$ a
+
+.
+w
+q
+EOF
+cat "$FILENAME".control
+rm "$FILENAME".control
+}
+withoutchecksums() {
+awk 'BEGIN{inheader=0} /^Checksums-.*:/ || (inheader && /^ /) {inheader = 1; next} {inheader = 0 ; print}' "$@"
+}
+mdandsize() {
+cat <<EOF
+$(md5sum "$1" | cut -d' ' -f1) $(stat -c "%s" "$1")
+EOF
+}
+sha() {
+echo -n ":1:"
+sha1sum "$1" | cut -d' ' -f1
+}
+sha1andsize() {
+cat <<EOF
+$(sha1sum "$1" | cut -d' ' -f1) $(stat -c "%s" "$1")
+EOF
+}
+sha1and7size() {
+cat <<EOF
+$(sha1sum "$1" | cut -d' ' -f1) $(stat -c "%7s" "$1")
+EOF
+}
+sha2() {
+echo -n ":2:"
+sha256sum "$1" | cut -d' ' -f1
+}
+sha2andsize() {
+cat <<EOF
+$(sha256sum "$1" | cut -d' ' -f1) $(stat -c "%s" "$1")
+EOF
+}
+sizeonly() {
+stat -c "%s" "$1"
+}
+sha2only() {
+sha256sum "$1" | cut -d' ' -f1
+}
+fullchecksum() {
+cat <<EOF
+$(sha "$1") $(sha2 "$1") $(md5sum "$1" | cut -d' ' -f1) $(stat -c "%s" "$1")
+EOF
+}
+md5releaseline() {
+ echo "$(mdandsize dists/"$1"/"$2") $2"
+}
+sha1releaseline() {
+ echo "$(sha1andsize dists/"$1"/"$2") $2"
+}
+sha2releaseline() {
+ echo "$(sha2andsize dists/"$1"/"$2") $2"
+}
+normalizerelease() {
+ sed -e 's/^Date: .*/Date: normalized/' "$1"
+}
+
+EMPTYMD5ONLY="d41d8cd98f00b204e9800998ecf8427e"
+EMPTYMD5="d41d8cd98f00b204e9800998ecf8427e 0"
+EMPTYGZMD5="7029066c27ac6f5ef18d660d5741979a 20"
+EMPTYBZ2MD5="4059d198768f9f8dc9372dc1c54bc3c3 14"
+EMPTYSHA1="da39a3ee5e6b4b0d3255bfef95601890afd80709 0"
+EMPTYGZSHA1="46c6643f07aa7f6bfe7118de926b86defc5087c4 20"
+EMPTYBZ2SHA1="64a543afbb5f4bf728636bdcbbe7a2ed0804adc2 14"
+EMPTYSHA2="e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 0"
+EMPTYGZSHA2="59869db34853933b239f1e2219cf7d431da006aa919635478511fabbfc8849d2 20"
+EMPTYBZ2SHA2="d3dda84eb03b9738d118eb2be78e246106900493c0ae07819ad60815134a8058 14"
+
+testsuccess() {
+ echo "Test$TESTNAME completed successfully"
+}
+
+odb() {
+ printf -- '-v2*=Created directory "./db"'
+}
+
+otd() {
+ local name="$1" version="$2" codename="$3"
+ printf -- "-d1*=db: '%s' '%s' removed from tracking.db(%s)." "$name" "$version" "$codename"
+}
+ottd() {
+ local name="$1" version="$2" codename="$3"
+ printf -- "-t1*=db: '%s' '%s' removed from tracking.db(%s)." "$name" "$version" "$codename"
+}
+ota() {
+ printf -- "-d1*=db: '%s' added to tracking.db(%s)." "$2" "$1"
+}
+otta() {
+ printf -- "-t1*=db: '%s' added to tracking.db(%s)." "$2" "$1"
+}
+ofa() {
+ printf -- "-d1*=db: '%s' added to checksums.db(pool)." "$1"
+}
+ofd() {
+ if ${2:-true} ; then
+ printf -- "-v1${3-*}"'=deleting and forgetting %s\n' "$1"
+ fi
+ printf -- "-d1${3-*}=db: '%s' removed from checksums.db(pool)." "$1"
+}
+opa() {
+ local name="$1" version="$2" codename="$3" component="$4" arch="$5" type="$6" u=""
+ if test "$type" = "udeb" ; then u='u|' ; fi
+ printf -- "-d1*=db: '%s' added to packages.db(%s%s|%s|%s)." \
+ "$name" "$u" "$codename" "$component" "$arch"
+}
+opu() {
+ local name="$1" oldversion="$2" version="$3" codename="$4" component="$5" arch="$6" type="$7" u=""
+ if test "$type" = "udeb" ; then u='u|' ; fi
+ printf -- "-d1*=db: '%s' removed from packages.db(%s%s|%s|%s).\n" \
+ "$name" "$u" "$codename" "$component" "$arch"
+ printf -- "-d1*=db: '%s' added to packages.db(%s%s|%s|%s)." \
+ "$name" "$u" "$codename" "$component" "$arch"
+}
+opd() {
+ local name="$1" version="$2" codename="$3" component="$4" arch="$5" type="$6" u=""
+ if test "$type" = "udeb" ; then u='u|' ; fi
+ printf -- "-v1*=removing '%s' from '%s%s|%s|%s'...\n" \
+ "$name" "$u" "$codename" "$component" "$arch"
+ printf -- "-d1*=db: '%s' removed from packages.db(%s%s|%s|%s)." \
+ "$name" "$u" "$codename" "$component" "$arch"
+}
+
+cat > empty.rules <<EOF
+stdout
+stderr
+returns 0
+EOF
+
diff --git a/tests/test.sh b/tests/test.sh
new file mode 100755
index 0000000..64851f6
--- /dev/null
+++ b/tests/test.sh
@@ -0,0 +1,271 @@
+#!/bin/dash
+
+# This needs installed:
+# apt, dpkg-dev, ed, python3-apt, xz, lzma, python3, dbX.Y-util
+# it will fail if run over a changing hour
+
+set -e -u
+
+export LC_ALL=C
+
+SRCDIR="$(readlink -e "$(dirname $0)/..")"
+WORKDIR="`pwd`/testdir"
+USE_VALGRIND=""
+VALGRIND_LEAK=summary
+VALGRIND_EXTRA_OPTIONS=""
+VALGRIND_SUP=""
+TESTOPTIONS=""
+VERBOSEDB="1"
+TESTSHELLOPTS=
+testtorun="all"
+verbosity=6
+deleteifmarked=true
+
+while [ $# -gt 0 ] ; do
+ case "$1" in
+ --srcdir)
+ shift
+ SRCDIR="$(readlink -e "$1")"
+ shift
+ ;;
+ --neverdelete)
+ deleteifmarked=false
+ shift
+ ;;
+ --test)
+ shift
+ testtorun="$1"
+ shift
+ ;;
+ --trace)
+ shift
+ TESTSHELLOPTS=-x
+ ;;
+ --delete)
+ if ! $deleteifmarked ; then
+ rm -r "$WORKDIR" || true
+ fi
+ shift
+ ;;
+ --valgrind)
+ USE_VALGRIND=1
+ shift
+ ;;
+ --valgrind)
+ USE_VALGRIND=1
+ VALGRIND_LEAK=full
+ shift
+ ;;
+ --valgrind-supp)
+ USE_VALGRIND=1
+ shift
+ VALGRIND_SUP="$1"
+ shift
+ ;;
+ --valgrind-opts)
+ shift
+ VALGRIND_EXTRA_OPTIONS="${VALGRIND_EXTRA_OPTIONS} $1"
+ shift
+ ;;
+ --verbosity)
+ shift
+ verbosity="$1"
+ shift
+ ;;
+ --noverbosedb)
+ VERBOSEDB=""
+ shift
+ ;;
+ --*)
+ echo "Unsupported option $1" >&2
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+if [ "2" -lt "$#" ] ; then
+ echo "Syntax: test.sh [<testtool-binary>] [<reprepro-binary>]" >&2
+ exit 1
+fi
+echo "SRCDIR is '$SRCDIR'"
+if [ ! -d "$SRCDIR" ] || [ ! -d "$SRCDIR/tests" ] ; then
+ echo "Error: Could not find source directory (tried: '$SRCDIR')!" >&2
+ exit 1
+fi
+TESTSDIR="$SRCDIR/tests"
+if [ "1" -le "$#" ] ; then
+ TESTTOOL="$(readlink -e "$1")"
+else
+ TESTTOOL=testtool
+fi
+if [ "2" -le "$#" ] ; then
+ REPREPRO="$(readlink -e "$2")"
+else
+ REPREPRO="$SRCDIR/reprepro"
+fi
+RREDTOOL="$(dirname "$REPREPRO")/rredtool"
+
+if [ -z "$TESTOPTIONS" ] ; then
+ if [ -z "$USE_VALGRIND" ] ; then
+ TESTOPTIONS="-e -a"
+ elif [ -z "$VALGRIND_SUP" ] ; then
+ # leak-check=full is better than leak-check=summary,
+ # sadly squeeze's valgrind counts them into the error number
+ # with full, and we want to ignore them for childs....
+ TESTOPTIONS="-e -a --debug ${VALGRIND_EXTRA_OPTIONS} --leak-check=${VALGRIND_LEAK} --suppressions=$TESTSDIR/valgrind.supp"
+ else
+ TESTOPTIONS="-e -a --debug ${VALGRIND_EXTRA_OPTIONS} --leak-check=${VALGRIND_LEAK} --suppressions=$VALGRIND_SUP"
+ fi
+fi
+case "$verbosity" in
+ -1) VERBOSITY="-s" ;;
+ 0) VERBOSITY="" ;;
+ 1) VERBOSITY="-v" ;;
+ 2) VERBOSITY="-vv" ;;
+ 3) VERBOSITY="-vvv" ;;
+ 4) VERBOSITY="-vvvv" ;;
+ 5) VERBOSITY="-vvvvv" ;;
+ 6) VERBOSITY="-vvvvvv" ;;
+ *) echo "Unsupported verbosity $verbosity" >&2
+ exit 1
+ ;;
+esac
+TESTOPTIONS="-D v=$verbosity $TESTOPTIONS"
+REPREPROOPTIONS="$VERBOSITY"
+if test -n "$VERBOSEDB" ; then
+ TESTOPTIONS="-D x=0 -D d=1 $TESTOPTIONS"
+ REPREPROOPTIONS="--verbosedb $REPREPROOPTIONS"
+else
+ TESTOPTIONS="-D x=0 -D d=0 $TESTOPTIONS"
+fi
+TRACKINGTESTOPTIONS="-D t=0"
+
+if ! [ -x "$REPREPRO" ] ; then
+ echo "Could not find $REPREPRO!" >&2
+ exit 1
+fi
+TESTTOOLVERSION="`$TESTTOOL --version`"
+case $TESTTOOLVERSION in
+ "testtool version "*) ;;
+ *) echo "Failed to get version of testtool($TESTTOOL)"
+ exit 1
+ ;;
+esac
+
+if test -d "$WORKDIR" && test -f "$WORKDIR/ThisDirectoryWillBeDeleted" && $deleteifmarked ; then
+ rm -r "$WORKDIR" || exit 3
+fi
+
+if ! which fakeroot >/dev/null 2>&1 ; then
+ echo "WARNING: fakeroot not installed, some tests might fail!"
+fi
+if ! which python3 >/dev/null 2>&1 ; then
+ echo "WARNING: python3 not installed, some tests might fail!"
+fi
+if ! which lzma >/dev/null 2>&1 ; then
+ echo "WARNING: lzma not installed, some tests might fail!"
+fi
+if ! which ed >/dev/null 2>&1 ; then
+ echo "WARNING: ed not installed, some tests might fail!"
+fi
+if ! which lunzip >/dev/null 2>&1 ; then
+ echo "WARNING: lunzip not installed, some tests might be incomplete!"
+else
+if ! which lzip >/dev/null 2>&1 ; then
+ echo "WARNING: lunzip installed but lunzip not, some tests might fail!"
+fi
+fi
+if ! dpkg -s python3-apt | grep -q -s "Status: .* ok installed" ; then
+ echo "WARNING: python3-apt not installed, some tests might fail!"
+fi
+if ! dpkg -s dpkg-dev | grep -q -s "Status: .* ok installed" ; then
+ echo "WARNING: dpkg-dev not installed, most tests might fail!"
+fi
+
+mkdir "$WORKDIR" || exit 1
+echo "Remove this file to avoid silent removal" > "$WORKDIR"/ThisDirectoryWillBeDeleted
+cd "$WORKDIR"
+
+# dpkg-deb doesn't like too restrictive directories
+umask 022
+
+number_tests=0
+number_missing=0
+number_success=0
+number_skipped=0
+number_failed=0
+
+runtest() {
+ if ! test -f "$SRCDIR/tests/$1.test" ; then
+ echo "Cannot find $SRCDIR/tests/$1.test!" >&2
+ number_missing="$(( $number_missing + 1 ))"
+ return
+ fi
+ number_tests="$(( $number_tests + 1 ))"
+ echo "Running test '$1'.."
+ TESTNAME=" $1"
+ mkdir "dir_$1"
+ rc=0
+ ( cd "dir_$1" || exit 1
+ export TESTNAME
+ export SRCDIR TESTSDIR
+ export TESTTOOL RREDTOOL REPREPRO
+ export TRACKINGTESTOPTIONS TESTOPTIONS REPREPROOPTIONS verbosity
+ WORKDIR="$WORKDIR/dir_$1" CALLEDFROMTESTSUITE=true dash $TESTSHELLOPTS "$SRCDIR/tests/$1.test"
+ ) > "log_$1" 2>&1 || rc=$?
+ if test "$rc" -ne 0 ; then
+ number_failed="$(( $number_failed + 1 ))"
+ echo "test '$1' failed (see $WORKDIR/log_$1 for details)!" >&2
+ elif grep -q -s '^SKIPPED: ' "log_$1" ; then
+ number_skipped="$(( $number_skipped + 1 ))"
+ echo "test '$1' skipped:"
+ sed -n -e 's/^SKIPPED://p' "log_$1"
+ rm -r "dir_$1" "log_$1"
+ else
+ number_success="$(( $number_success + 1 ))"
+ rm -r "dir_$1" "log_$1"
+ fi
+}
+
+if test x"$testtorun" != x"all" ; then
+ runtest "$testtorun"
+else
+ runtest export
+ runtest buildinfo
+ runtest updatepullreject
+ runtest descriptions
+ runtest easyupdate
+ runtest srcfilterlist
+ runtest uploaders
+ runtest wrongarch
+ runtest flood
+ runtest exporthooks
+ runtest updatecorners
+ runtest packagediff
+ runtest includeextra
+ runtest atoms
+ runtest trackingcorruption
+ runtest layeredupdate
+ runtest layeredupdate2
+ runtest uncompress
+ runtest check
+ runtest flat
+ runtest subcomponents
+ runtest snapshotcopyrestore
+ runtest various1
+ runtest various2
+ runtest various3
+ runtest copy
+ runtest buildneeding
+ runtest morgue
+ runtest diffgeneration
+ runtest onlysmalldeletes
+ runtest override
+ runtest includeasc
+ runtest listcodenames
+fi
+echo "$number_tests tests, $number_success succeded, $number_failed failed, $number_skipped skipped, $number_missing missing"
+exit 0
diff --git a/tests/trackingcorruption.test b/tests/trackingcorruption.test
new file mode 100644
index 0000000..6110e26
--- /dev/null
+++ b/tests/trackingcorruption.test
@@ -0,0 +1,79 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+mkdir -p conf
+echo "export silent-never" > conf/options
+cat > conf/distributions <<EOF
+Codename: breakme
+Components: something
+Architectures: abacus coal source
+Tracking: all
+EOF
+
+DISTRI=breakme PACKAGE=aa EPOCH="" VERSION=1 REVISION=-1 SECTION="base" genpackage.sh -sa
+
+testrun - include breakme test.changes 3<<EOF
+stdout
+$(odb)
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/something"
+-v2*=Created directory "./pool/something/a"
+-v2*=Created directory "./pool/something/a/aa"
+$(ofa 'pool/something/a/aa/aa-addons_1-1_all.deb')
+$(ofa 'pool/something/a/aa/aa_1-1_abacus.deb')
+$(ofa 'pool/something/a/aa/aa_1-1.tar.gz')
+$(ofa 'pool/something/a/aa/aa_1-1.dsc')
+$(opa 'aa-addons' x 'breakme' 'something' 'abacus' 'deb')
+$(opa 'aa-addons' x 'breakme' 'something' 'coal' 'deb')
+$(opa 'aa' x 'breakme' 'something' 'abacus' 'deb')
+$(opa 'aa' x 'breakme' 'something' 'source' 'dsc')
+$(ota 'breakme' 'aa')
+EOF
+rm aa_* aa-addons* test.changes
+
+dodo mv db/tracking.db .
+
+testrun - removesrc breakme aa 3<<EOF
+stderr
+*=Nothing about source package 'aa' found in the tracking data of 'breakme'!
+*=This either means nothing from this source in this version is there,
+*=or the tracking information might be out of date.
+EOF
+
+testrun - --keepunreferenced remove breakme aa aa-addons 3<<EOF
+stderr
+*=Could not find tracking data for aa_1-1 in breakme to remove old files from it.
+stdout
+$(opd 'aa' unset breakme something abacus deb)
+$(opd 'aa-addons' unset breakme something abacus deb)
+$(opd 'aa-addons' unset breakme something coal deb)
+$(opd 'aa' unset breakme something source dsc)
+EOF
+
+dodo mv tracking.db db/
+
+testrun - --keepunreferenced removesrc breakme aa 3<<EOF
+stderr
+*=Warning: tracking data might be inconsistent:
+*=cannot find 'aa' in 'breakme|something|abacus', but 'pool/something/a/aa/aa_1-1_abacus.deb' should be there.
+*=cannot find 'aa' in 'breakme|something|source', but 'pool/something/a/aa/aa_1-1.dsc' should be there.
+*=There was an inconsistency in the tracking data of 'breakme':
+*='pool/something/a/aa/aa-addons_1-1_all.deb' has refcount > 0, but was nowhere found.
+*='pool/something/a/aa/aa_1-1_abacus.deb' has refcount > 0, but was nowhere found.
+*='pool/something/a/aa/aa_1-1.dsc' has refcount > 0, but was nowhere found.
+*='pool/something/a/aa/aa_1-1.tar.gz' has refcount > 0, but was nowhere found.
+stdout
+$(otd 'aa' '1-1' 'breakme')
+-v1*=4 files lost their last reference.
+-v1*=(dumpunreferenced lists such files, use deleteunreferenced to delete them.)
+EOF
+
+testrun - retrack breakme 3<<EOF
+stderr
+stdout
+-v1*=Retracking breakme...
+EOF
+
+rm -r db conf pool
+testsuccess
diff --git a/tests/uncompress.test b/tests/uncompress.test
new file mode 100644
index 0000000..57ffabb
--- /dev/null
+++ b/tests/uncompress.test
@@ -0,0 +1,514 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+# First test if finding the binaries works properly...
+
+testrun - --lunzip=NONE --unxz=NONE __dumpuncompressors 3<<EOF
+stdout
+*=.gz: built-in + '/bin/gunzip'
+*=.bz2: built-in + '/bin/bunzip2'
+*=.lzma: built-in + '/usr/bin/unlzma'
+*=.xz: built-in
+*=.lz: not supported (install lzip or use --lunzip to tell where lunzip is).
+EOF
+
+testrun - --lunzip=NONE --gunzip=NONE --bunzip2=NONE --unlzma=NONE --unxz=NONE __dumpuncompressors 3<<EOF
+stdout
+*=.gz: built-in
+*=.bz2: built-in
+*=.lzma: built-in
+*=.xz: built-in
+*=.lz: not supported (install lzip or use --lunzip to tell where lunzip is).
+EOF
+
+testrun - --lunzip=NONE --gunzip=false --bunzip2=false --unlzma=false --unxz=NONE __dumpuncompressors 3<<EOF
+stdout
+*=.gz: built-in + '/bin/false'
+*=.bz2: built-in + '/bin/false'
+*=.lzma: built-in + '/bin/false'
+*=.xz: built-in
+*=.lz: not supported (install lzip or use --lunzip to tell where lunzip is).
+EOF
+
+touch fakeg fakeb fakel fakexz fakelz
+
+testrun - --lunzip=./fakelz --gunzip=./fakeg --bunzip2=./fakeb --unlzma=./fakel --unxz=./fakexz __dumpuncompressors 3<<EOF
+stdout
+*=.gz: built-in
+*=.bz2: built-in
+*=.lzma: built-in
+*=.xz: built-in
+*=.lz: not supported (install lzip or use --lunzip to tell where lunzip is).
+EOF
+
+chmod u+x fakeg fakeb fakel fakexz fakelz
+
+testrun - --lunzip=./fakelz --gunzip=./fakeg --bunzip2=./fakeb --unlzma=./fakel --unxz=./fakexz __dumpuncompressors 3<<EOF
+stdout
+*=.gz: built-in + './fakeg'
+*=.bz2: built-in + './fakeb'
+*=.lzma: built-in + './fakel'
+*=.xz: built-in + './fakexz'
+*=.lz: './fakelz'
+EOF
+
+rm fakeg fakeb fakel fakexz fakelz
+
+# Then test the builtin formats and the external one...
+
+echo "start" > testfile
+dd if=/dev/zero bs=1024 count=1024 >> testfile
+echo "" >> testfile
+echo "middle" >> testfile
+dd if=/dev/zero bs=1024 count=1024 >> testfile
+echo "" >> testfile
+echo "end" >> testfile
+
+echo "Ohm" > smallfile
+
+echo gzip -c testfile \> testfile.gz
+gzip -c testfile > testfile.gz
+echo bzip2 -c testfile \> testfile.bz2
+bzip2 -c testfile > testfile.bz2
+echo lzma -c testfile \> testfile.lzma
+lzma -c testfile > testfile.lzma
+echo xz -c testfile \> testfile.xz
+xz -c testfile > testfile.xz
+
+echo gzip -c smallfile \> smallfile.gz
+gzip -c smallfile > smallfile.gz
+echo bzip2 -c smallfile \> smallfile.bz2
+bzip2 -c smallfile > smallfile.bz2
+echo lzma -c smallfile \> smallfile.lzma
+lzma -c smallfile > smallfile.lzma
+echo xz -c smallfile \> smallfile.xz
+xz -c smallfile > smallfile.xz
+
+echo gzip -c \< /dev/null \> emptyfile.gz
+gzip -c < /dev/null > emptyfile.gz
+echo bzip2 -c \< /dev/null \> emptyfile.bz2
+bzip2 -c < /dev/null > emptyfile.bz2
+echo lzma -c \< /dev/null \> emptyfile.lzma
+lzma -c < /dev/null > emptyfile.lzma
+echo xz -c \< /dev/null \> emptyfile.xz
+xz -c < /dev/null > emptyfile.xz
+
+testrun - --lunzip /bin/cat __uncompress .lz notexists.lz notexists.lz.uncompressed 3<<EOF
+-v2*=Uncompress 'notexists.lz' into 'notexists.lz.uncompressed' using '/bin/cat'...
+*=Error 2 opening notexists.lz: No such file or directory
+-v0*=There have been errors!
+returns 254
+EOF
+
+if test -x /usr/bin/lzip ; then
+# uncompression message is different as this is no builtin.
+echo lzip -c testfile \> testfile.lz
+lzip -c testfile > testfile.lz
+echo lzip -c smallfile \> smallfile.lz
+lzip -c smallfile > smallfile.lz
+testrun - __uncompress .lz testfile.lz testfile.lz.uncompressed 3<<EOF
+-v2*=Uncompress 'testfile.lz' into 'testfile.lz.uncompressed' using '/usr/bin/lunzip'...
+EOF
+dodiff testfile testfile.lz.uncompressed
+rm *.uncompressed
+testrun - __uncompress .lz smallfile.lz smallfile.lz.uncompressed 3<<EOF
+-v2*=Uncompress 'smallfile.lz' into 'smallfile.lz.uncompressed' using '/usr/bin/lunzip'...
+EOF
+dodiff smallfile smallfile.lz.uncompressed
+rm *.uncompressed
+fi
+
+for ext in gz bz2 lzma xz ; do
+ testrun - __uncompress .${ext} testfile.${ext} testfile.${ext}.uncompressed 3<<EOF
+-v2*=Uncompress 'testfile.${ext}' into 'testfile.${ext}.uncompressed'...
+EOF
+ dodiff testfile testfile.${ext}.uncompressed
+ rm *.uncompressed
+
+ testrun - __uncompress .${ext} smallfile.${ext} smallfile.${ext}.uncompressed 3<<EOF
+-v2*=Uncompress 'smallfile.${ext}' into 'smallfile.${ext}.uncompressed'...
+EOF
+ dodiff smallfile smallfile.${ext}.uncompressed
+ rm *.uncompressed
+done
+
+# unlzma does not support concatenated files, so we do neither.
+for ext in gz bz2 xz ; do
+ cat testfile.${ext} emptyfile.${ext} > concatenatedtestfile.${ext}
+ testrun - __uncompress .${ext} concatenatedtestfile.${ext} concatenatedtestfile.${ext}.uncompressed 3<<EOF
+-v2*=Uncompress 'concatenatedtestfile.${ext}' into 'concatenatedtestfile.${ext}.uncompressed'...
+EOF
+ dodiff testfile concatenatedtestfile.${ext}.uncompressed
+ rm concatenated*
+
+ cat testfile testfile > concatenatedtestfile
+ cat testfile.${ext} testfile.${ext} > concatenatedtestfile.${ext}
+ testrun - __uncompress .${ext} concatenatedtestfile.${ext} concatenatedtestfile.${ext}.uncompressed 3<<EOF
+-v2*=Uncompress 'concatenatedtestfile.${ext}' into 'concatenatedtestfile.${ext}.uncompressed'...
+EOF
+ dodiff concatenatedtestfile concatenatedtestfile.${ext}.uncompressed
+ rm concatenated*
+done
+
+# Test for trailing garbage detection
+for ext in gz bz2 lzma ; do
+ cat testfile.${ext} smallfile > invalidtestfile.${ext}
+ testrun - __uncompress .${ext} invalidtestfile.${ext} invalidtestfile.${ext}.uncompressed 3<<EOF
+stderr
+-v2*=Uncompress 'invalidtestfile.${ext}' into 'invalidtestfile.${ext}.uncompressed'...
+*=Error reading from invalidtestfile.${ext}: Trailing garbage after compressed data!
+-v0*=There have been errors!
+returns 255
+EOF
+ dodo test ! -e invalidtestfile.${ext}.uncompressed
+ rm invalid*
+done
+
+# .xz does not see the trailing stuff, but an end of file while reading the header:
+cat testfile.xz smallfile > invalidtestfile.xz
+testrun - __uncompress .xz invalidtestfile.xz invalidtestfile.xz.uncompressed 3<<EOF
+stderr
+-v2*=Uncompress 'invalidtestfile.xz' into 'invalidtestfile.xz.uncompressed'...
+*=Error 10 decompressing lzma data
+*=Error reading from invalidtestfile.xz: Uncompression error!
+-v0*=There have been errors!
+returns 255
+EOF
+dodo test ! -e invalidtestfile.xz.uncompressed
+rm invalid*
+
+touch fake.lz
+testrun - --lunzip=false __uncompress .lz fake.lz fake.lz.uncompressed 3<<EOF
+-v2*=Uncompress 'fake.lz' into 'fake.lz.uncompressed' using '/bin/false'...
+*='/bin/false' < fake.lz > fake.lz.uncompressed exited with errorcode 1!
+-v0*=There have been errors!
+returns 255
+EOF
+dodo test ! -e fake.lz.uncompressed
+
+
+# Now check for compressed parts of an .a file:
+
+cat > control <<EOF
+Package: fake
+Version: fake
+Architecture: all
+EOF
+
+# looks like control.tar.lzma is not possible because the name is too
+# long for the old ar format dpkg-deb needs...
+echo tar -cf - ./control \| bzip2 \> control.tar.bz2
+tar -cf - ./control | bzip2 > control.tar.bz2
+echo tar -cf - testfile\* \| lzma \> data.tar.lzma
+tar -cf - testfile* | lzma > data.tar.lzma
+echo tar -cf - testfile\* \| bzip2 \> data.tar.bz2
+tar -cf - testfile* | bzip2 > data.tar.bz2
+echo tar -cf - testfile\* \| gzip \> data.tar.gz
+tar -cf - testfile* | gzip > data.tar.gz
+echo tar -cf - testfile\* \| xz \> data.tar.xz
+tar -cf - testfile* | xz > data.tar.xz
+echo 2.0 > debian-binary
+datatestlist="gz bz2 lzma xz"
+for ext in $datatestlist ; do
+ dodo ar qcfS fake_${ext}.deb debian-binary control.tar.bz2 data.tar.${ext}
+ # one .deb with trailing garbage at the end of the data tar:
+ echo "trailing garbage" >> data.tar.${ext}
+ dodo ar qcfS fake_${ext}_t.deb debian-binary control.tar.bz2 data.tar.${ext}
+ # and one .deb where the the length is correct but the .ar header differs
+ cp fake_${ext}_t.deb fake_${ext}_w.deb
+ origlength=$(stat -c '%s' fake_${ext}.deb)
+ newlength=$(stat -c '%s' fake_${ext}_w.deb)
+ if test $((origlength + 18)) -eq $((newlength)) ; then
+ # new length is 17 + one padding, so original did not have padding:
+ truncate -s "$origlength" fake_${ext}_w.deb
+ else
+ # also remove the padding byte:
+ truncate -s "$((origlength - 1))" fake_${ext}_w.deb
+ fi
+done
+rm debian-binary control *.tar.*
+
+# TODO: there could be a problem here with .deb files that have data after the
+# ./control file in data.tar and using an external uncompressor.
+# But how to test this when there is no way to trigger it in the default built?
+
+testrun - __extractcontrol fake_gz.deb 3<<EOF
+stdout
+*=Package: fake
+*=Version: fake
+*=Architecture: all
+*=
+EOF
+for ext in $datatestlist ; do
+testrun - __extractfilelist fake_${ext}.deb 3<<EOF
+stdout
+*=/testfile
+*=/testfile.bz2
+*=/testfile.gz
+*=/testfile.lzma
+=/testfile.xz
+=/testfile.lz
+EOF
+ if test $ext = xz ; then
+testrun - __extractfilelist fake_${ext}_t.deb 3<<EOF
+stderr
+*=Error 9 decompressing lzma data
+*=Error reading data.tar from fake_xz_t.deb: Uncompression error
+-v0*=There have been errors!
+return 255
+EOF
+ else
+testrun - __extractfilelist fake_${ext}_t.deb 3<<EOF
+stderr
+*=Error reading data.tar from fake_${ext}_t.deb: Trailing garbage after compressed data
+-v0*=There have been errors!
+return 255
+EOF
+ fi
+ if test $ext = xz ; then
+ : # xz has too large blocks to trigger this (tar is done before this is read)
+ else
+testrun - __extractfilelist fake_${ext}_w.deb 3<<EOF
+*=Error reading data.tar from fake_${ext}_w.deb: Compressed data of unexpected length
+-v0*=There have been errors!
+return 255
+EOF
+ fi
+done
+
+rm fake_*.deb
+
+for compressor in lz lzma ; do
+case $compressor in
+ lz)
+ compressor_program=lzip
+ uncompressor=lunzip
+ ;;
+ lzma)
+ compressor_program=lzma
+ uncompressor=""
+ ;;
+esac
+export uncompressor
+if ! test -x /usr/bin/$compressor_program ; then
+ echo "SKIPPING $compressor because /usr/bin/$compressor_program is missing!"
+ continue
+fi
+
+# Now check extracting Section/Priority from an .dsc
+mkdir debian
+cat > debian/control <<EOF
+Package: fake
+Maintainer: Me
+Section: admin
+Priority: extra
+
+Package: abinary
+Architecture: all
+EOF
+echo generating fake dirs
+for n in $(seq 100000) ; do echo "/$n" ; done > debian/dirs
+dd if=/dev/zero of=debian/zzz bs=1024 count=4096
+tar -cf - debian | $compressor_program > fake_1-1.debian.tar.$compressor
+mkdir fake-1
+mkdir fake-1.orig
+cp -al debian fake-1/debian
+cp -al debian fake-1.orig/debian
+sed -e 's/1/2/' fake-1/debian/dirs > fake-1/debian.dirs.new
+mv fake-1/debian.dirs.new fake-1/debian/dirs
+diff -ruN fake-1.orig fake-1 | $compressor_program > fake_1-1.diff.$compressor
+rm -r debian
+
+# .debian.tar and .diff usually do not happen at the same time, but easier testing...
+cat > fake_1-1.dsc << EOF
+Format: 3.0
+Source: fake
+Binary: abinary
+Architecture: all
+Version: 17
+Maintainer: Me
+Files:
+ $(mdandsize fake_1-1.diff.${compressor}) fake_1-1.diff.${compressor}
+ $(mdandsize fake_1-1.debian.tar.${compressor}) fake_1-1.debian.tar.${compressor}
+ 00000000000000000000000000000000 0 fake_1.orig.tar.${compressor}
+EOF
+
+testrun - __extractsourcesection fake_1-1.dsc 3<<EOF
+stdout
+*=Section: admin
+*=Priority: extra
+EOF
+
+# It would be nice to damage the .lzma file here, but that has a problem:
+# A random damage to the file will usually lead to some garbage output
+# before lzma realizes the error.
+# Once reprepro sees the garbage (which will usually not be a valid diff)
+# it will decide it is a format it does not understand and abort further
+# reading giving up.
+# This is a race condition with one of the following results:
+# reprepro is much faster: no error output (as unknown format is no error,
+# but only no success)
+# reprepro a bit faster: unlzma can still output an error, but not
+# is terminated by reprepro before issuing an error code.
+# unlzma is faster: reprepro will see an child returning with error...
+#
+# Thus we can only fake a damaged file by replacing the uncompressor:
+
+if test -n "${uncompressor}" ; then
+testrun - --${uncompressor}=brokenuncompressor.sh __extractsourcesection fake_1-1.dsc 3<<EOF
+returns 255
+*=brokenuncompressor.sh: claiming broken archive
+*=Error reading from ./fake_1-1.diff.${compressor}: $TESTSDIR/brokenuncompressor.sh exited with code 1!
+-v0*=There have been errors!
+stdout
+EOF
+fi
+
+mv fake_1-1.debian.tar.${compressor} save.tar.${compressor}
+
+# a missing file is no error, but no success either...
+testrun - __extractsourcesection fake_1-1.dsc 3<<EOF
+stdout
+EOF
+
+cp save.tar.${compressor} fake_1.orig.tar.${compressor}
+# a missing file is no error, but no success either (and not reading further files)
+testrun - __extractsourcesection fake_1-1.dsc 3<<EOF
+stdout
+EOF
+
+dodo mkdir debian
+dodo touch debian/test
+echo tar -cf - debian \| ${compressor_program} \> fake_1-1.debian.tar.${compressor}
+tar -cf - debian | ${compressor_program} > fake_1-1.debian.tar.${compressor}
+rm -r debian
+
+testrun - __extractsourcesection fake_1-1.dsc 3<<EOF
+stdout
+*=Section: admin
+*=Priority: extra
+EOF
+
+if test -n "${uncompressor}" ; then
+touch breakon2nd
+testrun - --${uncompressor}=brokenuncompressor.sh __extractsourcesection fake_1-1.dsc 3<<EOF
+returns 255
+*=brokenuncompressor.sh: claiming broken archive
+*=Error reading from ./fake_1-1.debian.tar.${compressor}: $TESTSDIR/brokenuncompressor.sh exited with code 1!
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - --${uncompressor}=brokenuncompressor.sh __extractsourcesection fake_1-1.dsc 3<<EOF
+returns 255
+*=brokenuncompressor.sh: claiming broken archive
+*=Error reading from ./fake_1-1.diff.${compressor}: $TESTSDIR/brokenuncompressor.sh exited with code 1!
+-v0*=There have been errors!
+stdout
+EOF
+fi
+
+
+# sadly different output depending on libarchive version....
+# dd if=/dev/zero of=fake_1-1.debian.tar.lzma bs=5 count=1
+#
+# testrun - __extractsourcesection fake_1-1.dsc 3<<EOF
+# returns 255
+# *=/usr/bin/unlzma: Read error
+# *=Error 84 trying to extract control information from ./fake_1-1.debian.tar.${compressor}:
+# *=Empty input file: Invalid or incomplete multibyte or wide character
+# -v0*=There have been errors!
+# stdout
+# EOF
+
+mv save.tar.${compressor} fake_1-1.debian.tar.${compressor}
+rm fake_1.orig.tar.${compressor}
+
+# now check only partial reading of the .diff
+# (i.e. diff containing a control):
+rm fake-1/debian/control
+cat > fake-1/debian/control <<EOF
+Package: fake
+Maintainer: MeToo
+Section: base
+Priority: required
+
+Package: abinary
+Architecture: all
+EOF
+cat > fake-1/debian/aaaaa <<EOF
+also test debian/control not being the first file...
+EOF
+diff -ruN fake-1.orig fake-1 | ${compressor_program} > fake_1-1.diff.${compressor}
+rm -r fake-1 fake-1.orig
+
+
+cat > fake_1-1.dsc << EOF
+Format: 3.0
+Source: fake
+Binary: abinary
+Architecture: all
+Version: 17
+Maintainer: Me
+Files:
+ $(mdandsize fake_1-1.diff.${compressor}) fake_1-1.diff.${compressor}
+ $(mdandsize fake_1-1.debian.tar.${compressor}) fake_1-1.debian.tar.${compressor}
+ 00000000000000000000000000000000 0 fake_1.orig.tar.${compressor}
+EOF
+
+testrun - __extractsourcesection fake_1-1.dsc 3<<EOF
+stdout
+*=Section: base
+*=Priority: required
+EOF
+
+if test -n "$uncompressor" ; then
+testrun - --${uncompressor}=false __extractsourcesection fake_1-1.dsc 3<<EOF
+returns 255
+*=Error reading from ./fake_1-1.diff.${compressor}: /bin/false exited with code 1!
+-v0*=There have been errors!
+stdout
+EOF
+fi
+
+done
+
+rm testfile* smallfile* emptyfile*
+
+cat > fake_1-2.diff <<EOF
+--- bla/Makefile
++++ bla/Makefile
+@@ -1000,1 +1000,1 @@
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+--- bla/debian/control
++++ bla/debian/control
+@@ -0,0 +1,10 @@
++Source: fake
++Section: sssss
++# new-fangled comment
++Priority: ppp
++Homepage: gopher://never-never-land/
++
+EOF
+dodo gzip fake_1-2.diff
+
+cat > fake_1-2.dsc << EOF
+Format: 3.0
+Source: fake
+Binary: abinary
+Architecture: all
+Version: 17
+Maintainer: Me
+Files:
+ $(mdandsize fake_1-2.diff.gz) fake_1-2.diff.gz
+ 00000000000000000000000000000000 0 fake_1.orig.tar.gz
+EOF
+
+testrun - __extractsourcesection fake_1-2.dsc 3<<EOF
+stdout
+*=Section: sssss
+*=Priority: ppp
+EOF
+
+rm fake*
+testsuccess
diff --git a/tests/updatecorners.test b/tests/updatecorners.test
new file mode 100644
index 0000000..ee6f905
--- /dev/null
+++ b/tests/updatecorners.test
@@ -0,0 +1,176 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+# test some corner cases in updating:
+# IgnoreInRelease, force, errors, resuming...
+
+mkdir -p conf test/dists/a/c/source test/test lists
+
+echo "test" > test/test/test.dsc
+echo "fake-gz-file" > test/test/test.tar.gz
+
+cat >test/dists/a/c/source/Sources <<EOF
+Package: test
+Version: 7777
+Priority: extra
+Section: somewhere
+Maintainer: noone
+Directory: test
+Files:
+ $(mdandsize test/test/test.dsc) test.dsc
+ $(mdandsize test/test/test.tar.gz) test.tar.gz
+EOF
+
+sourcesmd=$(md5 test/dists/a/c/source/Sources)
+sourcessize=$(stat -c "%s" test/dists/a/c/source/Sources)
+cat > test/dists/a/InRelease <<EOF
+Codename: a
+MD5Sum:
+ $sourcesmd $sourcessize c/source/Sources
+EOF
+lzma test/dists/a/c/source/Sources
+
+cat >conf/distributions <<EOF
+Codename: t
+Architectures: source
+Components: c
+Update: u
+EOF
+
+cat >conf/updates <<EOF
+Name: u
+Method: copy:$WORKDIR/test
+VerifyRelease: blindtrust
+Suite: a
+EOF
+
+testrun - -b . update 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/test/dists/a/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/test/dists/a/InRelease'
+*=WARNING: No signature found in ./lists/u_a_InRelease, assuming it is unsigned!
+*=aptmethod error receiving 'copy:$WORKDIR/test/dists/a/c/source/Sources':
+='Failed to stat - stat (2 No such file or directory)'
+='Failed to stat - stat (2: No such file or directory)'
+-v0*=There have been errors!
+stdout
+$(odb)
+returns 255
+EOF
+
+cat >>conf/updates <<EOF
+DownloadListsAs: .lzma
+EOF
+
+testrun - -b . update 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/test/dists/a/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/test/dists/a/InRelease'
+*=WARNING: No signature found in ./lists/u_a_InRelease, assuming it is unsigned!
+*=Error: './lists/u_a_InRelease' only lists unrequested compressions of 'c/source/Sources'.
+*=Try changing your DownloadListsAs to request e.g. '.'.
+-v0*=There have been errors!
+returns 255
+EOF
+ed -s conf/updates <<EOF
+g/^DownloadListsAs:/s/.lzma/force.gz force.lzma/
+w
+q
+EOF
+
+testrun - -b . update 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/test/dists/a/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/test/dists/a/InRelease'
+*=WARNING: No signature found in ./lists/u_a_InRelease, assuming it is unsigned!
+*=aptmethod error receiving 'copy:$WORKDIR/test/dists/a/c/source/Sources.gz':
+='Failed to stat - stat (2 No such file or directory)'
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:${WORKDIR}/test/dists/a/c/source/Sources.lzma'
+-v1*=aptmethod got 'copy:${WORKDIR}/test/dists/a/c/source/Sources.lzma'
+-v2*=Uncompress './lists/u_a_c_Sources.lzma' into './lists/u_a_c_Sources' using '/usr/bin/unlzma'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 't|c|source'
+-v5*= reading './lists/u_a_c_Sources'
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/c"
+-v2*=Created directory "./pool/c/t"
+-v2*=Created directory "./pool/c/t/test"
+stderr
+-v6*=aptmethod start 'copy:${WORKDIR}/test/test/test.dsc'
+-v1*=aptmethod got 'copy:${WORKDIR}/test/test/test.dsc'
+-v6*=aptmethod start 'copy:${WORKDIR}/test/test/test.tar.gz'
+-v1*=aptmethod got 'copy:${WORKDIR}/test/test/test.tar.gz'
+stdout
+-v0*=Getting packages...
+$(ofa 'pool/c/t/test/test.dsc')
+$(ofa 'pool/c/t/test/test.tar.gz')
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'test' x 't' 'c' 'source' 'dsc')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/t"
+-v2*=Created directory "./dists/t/c"
+-v2*=Created directory "./dists/t/c/source"
+-v6*= looking for changes in 't|c|source'...
+-v6*= creating './dists/t/c/source/Sources' (gzipped)
+EOF
+
+# test what happens if some compression is forces (i.e. not listed
+# in the InRelease file), but the downloaded file is not correct:
+
+ed -s test/dists/a/InRelease <<EOF
+,s/^ [^ ]*/ 00000000000000000000000000000000/
+w
+q
+EOF
+
+testrun - -b . update 3<<EOF
+stderr
+-v6*=aptmethod start 'copy:$WORKDIR/test/dists/a/InRelease'
+-v1*=aptmethod got 'copy:$WORKDIR/test/dists/a/InRelease'
+*=WARNING: No signature found in ./lists/u_a_InRelease, assuming it is unsigned!
+*=aptmethod error receiving 'copy:$WORKDIR/test/dists/a/c/source/Sources.gz':
+='Failed to stat - stat (2 No such file or directory)'
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:${WORKDIR}/test/dists/a/c/source/Sources.lzma'
+-v1*=aptmethod got 'copy:${WORKDIR}/test/dists/a/c/source/Sources.lzma'
+-v2*=Uncompress './lists/u_a_c_Sources.lzma' into './lists/u_a_c_Sources' using '/usr/bin/unlzma'...
+*=Wrong checksum of uncompressed content of './lists/u_a_c_Sources.lzma':
+*=md5 expected: 00000000000000000000000000000000, got: $sourcesmd
+-v0*=There have been errors!
+returns 254
+EOF
+
+rm test/dists/a/InRelease
+
+testrun - -b . update 3<<EOF
+stderr
+*=aptmethod error receiving 'copy:$WORKDIR/test/dists/a/InRelease':
+*=aptmethod error receiving 'copy:$WORKDIR/test/dists/a/Release':
+='Failed to stat - stat (2 No such file or directory)'
+='Failed to stat - stat (2: No such file or directory)'
+-v0*=There have been errors!
+returns 255
+EOF
+
+echo "IgnoreRelease: Yes" >> conf/updates
+
+testrun - -b . update 3<<EOF
+stderr
+*=aptmethod error receiving 'copy:$WORKDIR/test/dists/a/c/source/Sources.gz':
+='Failed to stat - stat (2 No such file or directory)'
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:${WORKDIR}/test/dists/a/c/source/Sources.lzma'
+-v1*=aptmethod got 'copy:${WORKDIR}/test/dists/a/c/source/Sources.lzma'
+-v2*=Uncompress './lists/u_a_c_Sources.lzma' into './lists/u_a_c_Sources' using '/usr/bin/unlzma'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 't|c|source'
+-v5*= reading './lists/u_a_c_Sources'
+EOF
+
+rm -r conf db test lists pool dists
+testsuccess
diff --git a/tests/updatepullreject.test b/tests/updatepullreject.test
new file mode 100644
index 0000000..a2b4bbd
--- /dev/null
+++ b/tests/updatepullreject.test
@@ -0,0 +1,555 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -d db
+mkdir -p conf
+cat > conf/distributions <<EOF
+Codename: 1234
+Components: component
+Architectures: something source
+DebIndices: Packages .xz
+DscIndices: Sources .xz
+Update: test
+EOF
+cat > conf/updates <<EOF
+Name: test
+GetInRelease: no
+VerifyRelease: blindtrust
+Method: file:$WORKDIR/in
+FilterList: error filterlist
+FilterSrcList: error filtersrclist
+Suite: 4321
+EOF
+echo > conf/filterlist
+echo > conf/filtersrclist
+
+testrun - -b . export 1234 3<<EOF
+stderr
+stdout
+$(odb)
+-v1*=Exporting 1234...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/1234"
+-v2*=Created directory "./dists/1234/component"
+-v2*=Created directory "./dists/1234/component/binary-something"
+-v6*= exporting '1234|component|something'...
+-v6*= creating './dists/1234/component/binary-something/Packages' (xzed)
+-v2*=Created directory "./dists/1234/component/source"
+-v6*= exporting '1234|component|source'...
+-v6*= creating './dists/1234/component/source/Sources' (xzed)
+EOF
+
+mkdir lists
+mkdir -p in/dists/4321
+touch in/dists/4321/Release
+
+testrun - -b . update 1234 3<<EOF
+returns 255
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+*=Missing checksums in Release file './lists/test_4321_Release'!
+-v0*=There have been errors!
+stdout
+EOF
+
+echo "SHA256:" > in/dists/4321/Release
+
+testrun - -b . update 1234 3<<EOF
+returns 254
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+*=Could not find 'component/binary-something/Packages' within './lists/test_4321_Release'
+-v0*=There have been errors!
+stdout
+EOF
+
+mkdir -p in/dists/4321/component/source in/dists/4321/component/binary-something
+xz -c < /dev/null > in/dists/4321/component/source/Sources.xz
+xz -c < /dev/null > in/dists/4321/component/binary-something/Packages.xz
+cat > in/dists/4321/Release <<EOF
+SHA256:
+ $EMPTYSHA2 component/source/Sources
+ $(sha2andsize in/dists/4321/component/source/Sources.xz) component/source/Sources.xz
+ $(sha2andsize in/dists/4321/component/binary-something/Packages.xz) component/binary-something/Packages.xz
+EOF
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/component/source/Sources.xz'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/component/source/Sources.xz'
+-v2*=Uncompress '$WORKDIR/in/dists/4321/component/source/Sources.xz' into './lists/test_4321_component_Sources' using '/usr/bin/unxz'...
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/component/binary-something/Packages.xz'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/component/binary-something/Packages.xz'
+-v2*=Uncompress '$WORKDIR/in/dists/4321/component/binary-something/Packages.xz' into './lists/test_4321_component_something_Packages' using '/usr/bin/unxz'...
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for '1234|component|source'
+-v5*= reading './lists/test_4321_component_Sources'
+-v3*= processing updates for '1234|component|something'
+-v5*= reading './lists/test_4321_component_something_Packages'
+EOF
+
+# an extremely manually built package to ensure only some things are there:
+mkdir -p build/test-1/src
+echo a > build/test-1/src/sourcefile
+srcorigname=test_1.orig.tar.xz
+tar -cJf build/${srcorigname} -C build test-1
+srcorigsha=$(sha2andsize build/${srcorigname})
+mkdir -p build/test-1/debian
+cat > build/test-1/debian/control <<EOF
+Source: test
+Maintainer: <me@me>
+
+Package: name
+Architecture: all
+EOF
+touch build/test-1/debian/changelog
+srcdebname=test_1-1.debian.tar.xz
+tar -cJf build/${srcdebname} -C build/test-1 debian
+srcdebsha=$(sha2andsize build/${srcdebname})
+srcdscname=test_1-1.dsc
+cat > build/${srcdscname} <<EOF
+Source: test
+EOF
+srcdscsha=$(sha2andsize build/${srcdscname})
+mkdir -p build/name/opt/
+echo trash > build/name/opt/trash
+mkdir -p build/name/DEBIAN
+cat > build/name/DEBIAN/control <<EOF
+Package: name
+Architecture: all
+Version: 17-2
+Source: test (1-1)
+Maintainer: <me@me>
+Description: some
+ description
+EOF
+bindebname=name_17-2_all.deb
+dpkg-deb -Z xz -b build/name build/${bindebname}
+bindebsha=$(sha2only build/${bindebname})
+bindebsize=$(sizeonly build/${bindebname})
+
+rm in/dists/4321/component/binary-something/Packages.xz
+cat > in/dists/4321/component/binary-something/Packages <<EOF
+Package: name
+Version: 17-2
+Source: test (1-1)
+Maintainer: <me@me>
+Architecture: all
+Size: ${bindebsize}
+SHA256: ${bindebsha}
+Filename: ../build/${bindebname}
+Description: some
+ description
+EOF
+packagessha=$(sha2andsize in/dists/4321/component/binary-something/Packages)
+xz in/dists/4321/component/binary-something/Packages
+packagesxzsha=$(sha2andsize in/dists/4321/component/binary-something/Packages.xz)
+
+rm in/dists/4321/component/source/Sources.xz
+cat > in/dists/4321/component/source/Sources <<EOF
+Package: test
+Version: 1-1
+Maintainer: <me@me>
+Directory: ../build
+Checksums-Sha256:
+ ${srcdscsha} ${srcdscname}
+ ${srcdebsha} ${srcdebname}
+ ${srcorigsha} ${srcorigname}
+EOF
+sourcessha=$(sha2andsize in/dists/4321/component/source/Sources)
+xz in/dists/4321/component/source/Sources
+sourcesxzsha=$(sha2andsize in/dists/4321/component/source/Sources.xz)
+
+cat > in/dists/4321/Release <<EOF
+SHA256:
+ $sourcessha component/source/Sources
+ $sourcesxzsha component/source/Sources.xz
+ $packagessha component/binary-something/Packages
+ $packagesxzsha component/binary-something/Packages.xz
+EOF
+
+mkdir -p pool/component/t/test
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/component/source/Sources.xz'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/component/source/Sources.xz'
+-v2*=Uncompress '$WORKDIR/in/dists/4321/component/source/Sources.xz' into './lists/test_4321_component_Sources' using '/usr/bin/unxz'...
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/component/binary-something/Packages.xz'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/component/binary-something/Packages.xz'
+-v2*=Uncompress '$WORKDIR/in/dists/4321/component/binary-something/Packages.xz' into './lists/test_4321_component_something_Packages' using '/usr/bin/unxz'...
+*=Package name marked to be unexpected('error'): 'test'!
+*=Stop reading further chunks from './lists/test_4321_component_Sources' due to previous errors.
+*=There have been errors!
+returns 255
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for '1234|component|source'
+-v5*= reading './lists/test_4321_component_Sources'
+EOF
+
+echo "test =1-1" > conf/filtersrclist
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+*=Package name marked to be unexpected('error'): 'name'!
+*=Stop reading further chunks from './lists/test_4321_component_something_Packages' due to previous errors.
+*=There have been errors!
+returns 255
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for '1234|component|source'
+-v5*= reading './lists/test_4321_component_Sources'
+-v3*= processing updates for '1234|component|something'
+-v5*= reading './lists/test_4321_component_something_Packages'
+EOF
+
+echo "name =17-2" > conf/filterlist
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+-v6=aptmethod start 'file:$WORKDIR/in/../build/${srcdscname}'
+-v1*=aptmethod got 'file:$WORKDIR/in/../build/${srcdscname}'
+-v2*=Linking file '$WORKDIR/in/../build/${srcdscname}' to './pool/component/t/test/${srcdscname}'...
+-v6=aptmethod start 'file:$WORKDIR/in/../build/${srcdebname}'
+-v1*=aptmethod got 'file:$WORKDIR/in/../build/${srcdebname}'
+-v2*=Linking file '$WORKDIR/in/../build/${srcdebname}' to './pool/component/t/test/${srcdebname}'...
+-v6=aptmethod start 'file:$WORKDIR/in/../build/${srcorigname}'
+-v1*=aptmethod got 'file:$WORKDIR/in/../build/${srcorigname}'
+-v2*=Linking file '$WORKDIR/in/../build/${srcorigname}' to './pool/component/t/test/${srcorigname}'...
+-v6=aptmethod start 'file:$WORKDIR/in/../build/${bindebname}'
+-v1*=aptmethod got 'file:$WORKDIR/in/../build/${bindebname}'
+-v2*=Linking file '$WORKDIR/in/../build/${bindebname}' to './pool/component/t/test/${bindebname}'...
+stdout
+$(ofa pool/component/t/test/${srcdscname})
+$(ofa pool/component/t/test/${srcdebname})
+$(ofa pool/component/t/test/${srcorigname})
+$(ofa pool/component/t/test/${bindebname})
+-v0*=Calculating packages to get...
+-v3*= processing updates for '1234|component|source'
+-v5*= reading './lists/test_4321_component_Sources'
+-v3*= processing updates for '1234|component|something'
+-v5*= reading './lists/test_4321_component_something_Packages'
+-v0*=Getting packages...
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'test' unset '1234' 'component' 'source' 'dsc')
+$(opa 'name' x '1234' 'component' 'something' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in '1234|component|something'...
+-v6*= replacing './dists/1234/component/binary-something/Packages' (xzed)
+-v6*= looking for changes in '1234|component|source'...
+-v6*= replacing './dists/1234/component/source/Sources' (xzed)
+EOF
+
+rm -r build in lists
+echo "update-rules done, now pull-rules"
+
+cat >> conf/distributions <<EOF
+
+Codename: dest
+Components: component
+Architectures: something source
+DebIndices: Packages .xz
+DscIndices: Sources .xz
+Pull: test
+EOF
+cat > conf/pulls <<EOF
+Name: test
+From: 1234
+FilterList: error filterlist
+FilterSrcList: error filtersrclist
+EOF
+echo > conf/filterlist
+echo > conf/filtersrclist
+
+testrun - -b . export dest 3<<EOF
+stderr
+stdout
+-v1*=Exporting dest...
+-v2*=Created directory "./dists/dest"
+-v2*=Created directory "./dists/dest/component"
+-v2*=Created directory "./dists/dest/component/binary-something"
+-v6*= exporting 'dest|component|something'...
+-v6*= creating './dists/dest/component/binary-something/Packages' (xzed)
+-v2*=Created directory "./dists/dest/component/source"
+-v6*= exporting 'dest|component|source'...
+-v6*= creating './dists/dest/component/source/Sources' (xzed)
+EOF
+
+testrun - -b . pull dest 3<<EOF
+stderr
+*=Package name marked to be unexpected('error'): 'test'!
+*=There have been errors!
+returns 255
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'dest|component|source'
+-v5*= looking what to get from '1234|component|source'
+EOF
+
+echo "test =1-1" > conf/filtersrclist
+
+testrun - -b . pull dest 3<<EOF
+stderr
+*=Package name marked to be unexpected('error'): 'name'!
+*=There have been errors!
+returns 255
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'dest|component|source'
+-v5*= looking what to get from '1234|component|source'
+-v3*= pulling into 'dest|component|something'
+-v5*= looking what to get from '1234|component|something'
+EOF
+
+echo "name =17-2" > conf/filterlist
+
+testrun - -b . pull dest 3<<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'dest|component|source'
+-v5*= looking what to get from '1234|component|source'
+-v3*= pulling into 'dest|component|something'
+-v5*= looking what to get from '1234|component|something'
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'test' unset 'dest' 'component' 'source' 'dsc')
+$(opa 'name' x 'dest' 'component' 'something' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'dest|component|something'...
+-v6*= replacing './dists/dest/component/binary-something/Packages' (xzed)
+-v6*= looking for changes in 'dest|component|source'...
+-v6*= replacing './dists/dest/component/source/Sources' (xzed)
+EOF
+
+echo "and now the same again with a new version"
+
+mkdir -p build/test-1/debian
+cat > build/test-1/debian/control <<EOF
+Source: test
+Maintainer: <me@me>
+
+Package: name
+Architecture: all
+EOF
+echo "2" > build/test-1/debian/changelog
+src2debname=test_1-2.debian.tar.xz
+tar -cJf build/${src2debname} -C build/test-1 debian
+src2debsha=$(sha2andsize build/${src2debname})
+src2dscname=test_1-2.dsc
+cat > build/${src2dscname} <<EOF
+Source: test
+Maintainer: <me@me>
+EOF
+src2dscsha=$(sha2andsize build/${src2dscname})
+mkdir -p build/name/opt/
+echo trash > build/name/opt/trash
+mkdir -p build/name/DEBIAN
+cat > build/name/DEBIAN/control <<EOF
+Package: name
+Architecture: all
+Version: 17-3
+Source: test (1-2)
+Maintainer: <me@me>
+Description: some
+ description
+EOF
+bin2debname=name_17-3_all.deb
+dpkg-deb -Z xz -b build/name build/${bin2debname}
+bin2debsha=$(sha2only build/${bin2debname})
+bin2debsize=$(sizeonly build/${bin2debname})
+
+mkdir -p in/dists/4321/component/binary-something in/dists/4321/component/source
+cat > in/dists/4321/component/binary-something/Packages <<EOF
+Package: name
+Version: 17-3
+Source: test (1-2)
+Maintainer: <me@me>
+Architecture: all
+Size: ${bin2debsize}
+SHA256: ${bin2debsha}
+Filename: ../build/${bin2debname}
+Description: some
+ description
+EOF
+packagessha=$(sha2andsize in/dists/4321/component/binary-something/Packages)
+xz in/dists/4321/component/binary-something/Packages
+packagesxzsha=$(sha2andsize in/dists/4321/component/binary-something/Packages.xz)
+
+cat > in/dists/4321/component/source/Sources <<EOF
+Package: test
+Version: 1-2
+Maintainer: <me@me>
+Directory: ../build
+Checksums-Sha256:
+ ${src2dscsha} ${src2dscname}
+ ${src2debsha} ${src2debname}
+ ${srcorigsha} ${srcorigname}
+EOF
+sourcessha=$(sha2andsize in/dists/4321/component/source/Sources)
+xz in/dists/4321/component/source/Sources
+sourcesxzsha=$(sha2andsize in/dists/4321/component/source/Sources.xz)
+
+cat > in/dists/4321/Release <<EOF
+SHA256:
+ $sourcessha component/source/Sources
+ $sourcesxzsha component/source/Sources.xz
+ $packagessha component/binary-something/Packages
+ $packagesxzsha component/binary-something/Packages.xz
+EOF
+
+echo "test warning" > conf/filtersrclist
+echo "name warning" > conf/filterlist
+dodo mkdir lists
+
+testrun - -b . update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/component/source/Sources.xz'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/component/source/Sources.xz'
+-v2*=Uncompress '$WORKDIR/in/dists/4321/component/source/Sources.xz' into './lists/test_4321_component_Sources' using '/usr/bin/unxz'...
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/component/binary-something/Packages.xz'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/component/binary-something/Packages.xz'
+-v2*=Uncompress '$WORKDIR/in/dists/4321/component/binary-something/Packages.xz' into './lists/test_4321_component_something_Packages' using '/usr/bin/unxz'...
+*=Loudly rejecting 'name' '17-3' to enter '1234|component|something'!
+*=Loudly rejecting 'test' '1-2' to enter '1234|component|source'!
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for '1234|component|source'
+-v5*= reading './lists/test_4321_component_Sources'
+-v3*= processing updates for '1234|component|something'
+-v5*= reading './lists/test_4321_component_something_Packages'
+EOF
+
+dodo echo check if the fallback to filtersrclist works:
+sed -e 's/^FilterList/#&/' -i conf/updates
+rm conf/filterlist
+
+testrun - -b . --noskipold update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+*=Loudly rejecting 'name' '17-3' to enter '1234|component|something'!
+*=Loudly rejecting 'test' '1-2' to enter '1234|component|source'!
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for '1234|component|source'
+-v5*= reading './lists/test_4321_component_Sources'
+-v3*= processing updates for '1234|component|something'
+-v5*= reading './lists/test_4321_component_something_Packages'
+EOF
+
+echo "test =1-2" > conf/filtersrclist
+
+testrun - -b . --noskipold update 1234 3<<EOF
+stderr
+-v6=aptmethod start 'file:$WORKDIR/in/dists/4321/Release'
+-v1*=aptmethod got 'file:$WORKDIR/in/dists/4321/Release'
+-v2*=Copy file '$WORKDIR/in/dists/4321/Release' to './lists/test_4321_Release'...
+-v6=aptmethod start 'file:$WORKDIR/in/../build/${src2dscname}'
+-v1*=aptmethod got 'file:$WORKDIR/in/../build/${src2dscname}'
+-v2*=Linking file '$WORKDIR/in/../build/${src2dscname}' to './pool/component/t/test/${src2dscname}'...
+-v6=aptmethod start 'file:$WORKDIR/in/../build/${src2debname}'
+-v1*=aptmethod got 'file:$WORKDIR/in/../build/${src2debname}'
+-v2*=Linking file '$WORKDIR/in/../build/${src2debname}' to './pool/component/t/test/${src2debname}'...
+-v6=aptmethod start 'file:$WORKDIR/in/../build/${bin2debname}'
+-v1*=aptmethod got 'file:$WORKDIR/in/../build/${bin2debname}'
+-v2*=Linking file '$WORKDIR/in/../build/${bin2debname}' to './pool/component/t/test/${bin2debname}'...
+stdout
+$(ofa pool/component/t/test/${src2dscname})
+$(ofa pool/component/t/test/${src2debname})
+$(ofa pool/component/t/test/${bin2debname})
+-v0*=Calculating packages to get...
+-v3*= processing updates for '1234|component|source'
+-v5*= reading './lists/test_4321_component_Sources'
+-v3*= processing updates for '1234|component|something'
+-v5*= reading './lists/test_4321_component_something_Packages'
+-v0*=Getting packages...
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opu 'test' x x '1234' 'component' 'source' 'dsc')
+$(opu 'name' x x '1234' 'component' 'something' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in '1234|component|something'...
+-v6*= replacing './dists/1234/component/binary-something/Packages' (xzed)
+-v6*= looking for changes in '1234|component|source'...
+-v6*= replacing './dists/1234/component/source/Sources' (xzed)
+EOF
+
+rm -r lists build in
+
+echo "test warning" > conf/filtersrclist
+echo "name warning" > conf/filterlist
+
+testrun - -b . pull dest 3<<EOF
+*=Loudly rejecting 'name' '17-3' to enter 'dest|component|something'!
+*=Loudly rejecting 'test' '1-2' to enter 'dest|component|source'!
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'dest|component|source'
+-v5*= looking what to get from '1234|component|source'
+-v3*= pulling into 'dest|component|something'
+-v5*= looking what to get from '1234|component|something'
+-v0*=Installing (and possibly deleting) packages...
+EOF
+
+sed -e 's/^FilterList/#&/' -i conf/pulls
+rm conf/filterlist
+
+testrun - -b . pull dest 3<<EOF
+*=Loudly rejecting 'name' '17-3' to enter 'dest|component|something'!
+*=Loudly rejecting 'test' '1-2' to enter 'dest|component|source'!
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'dest|component|source'
+-v5*= looking what to get from '1234|component|source'
+-v3*= pulling into 'dest|component|something'
+-v5*= looking what to get from '1234|component|something'
+-v0*=Installing (and possibly deleting) packages...
+EOF
+
+echo "test =1-2" > conf/filtersrclist
+
+testrun - -b . pull dest 3<<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'dest|component|source'
+-v5*= looking what to get from '1234|component|source'
+-v3*= pulling into 'dest|component|something'
+-v5*= looking what to get from '1234|component|something'
+-v0*=Installing (and possibly deleting) packages...
+$(opu 'test' x x 'dest' 'component' 'source' 'dsc')
+$(opu 'name' x x 'dest' 'component' 'something' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'dest|component|something'...
+-v6*= replacing './dists/dest/component/binary-something/Packages' (xzed)
+-v6*= looking for changes in 'dest|component|source'...
+-v6*= replacing './dists/dest/component/source/Sources' (xzed)
+-v0*=Deleting files no longer referenced...
+$(ofd pool/component/t/test/${srcdscname})
+$(ofd pool/component/t/test/${srcdebname})
+$(ofd pool/component/t/test/${bindebname})
+EOF
+
+rm -r -f db conf dists pool
+testsuccess
diff --git a/tests/uploaders.test b/tests/uploaders.test
new file mode 100644
index 0000000..6260996
--- /dev/null
+++ b/tests/uploaders.test
@@ -0,0 +1,253 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir -p conf
+cat > conf/distributions <<EOF
+Codename: test1
+Components: main
+Architectures: source coal
+Uploaders: uploaders1
+
+Codename: test2
+Components: main
+Architectures: source coal
+Uploaders: uploaders2
+EOF
+
+checknonetakes() {
+testrun - -b . __checkuploaders test1 test2 < "$1" 3<<EOF
+stdout
+*='testpackage' would NOT have been accepted by any of the distributions selected.
+EOF
+}
+
+check1takes() {
+testrun - -b . __checkuploaders test1 test2 < "$1" 3<<EOF
+stdout
+*='testpackage' would have been accepted by 'test1'
+EOF
+}
+check2takes() {
+testrun - -b . __checkuploaders test1 test2 < "$1" 3<<EOF
+stdout
+*='testpackage' would have been accepted by 'test2'
+EOF
+}
+
+cat > descr1 <<EOF
+source testpackage
+architecture source
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descr1 3<<EOF
+*=Error opening './conf/uploaders1': No such file or directory
+-v0*=There have been errors!
+returns 254
+EOF
+
+cat > conf/uploaders1 <<EOF
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descr1 3<<EOF
+*=Error opening './conf/uploaders2': No such file or directory
+-v0*=There have been errors!
+returns 254
+EOF
+
+cat > conf/uploaders2 <<EOF
+include uploaders2
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descr1 3<<EOF
+*=./conf/uploaders2:1:0: Too deeply nested include directives (> 100). Built some recursion?
+*=included from './conf/uploaders2' line 1
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat > conf/uploaders2 <<EOF
+allow source 'testpackage' by unsigned
+EOF
+
+check2takes descr1
+
+cat > descrbad <<EOF
+unknowncommand
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descrbad 3<<EOF
+*=Unparseable line 'unknowncommand'
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat > descrbad <<EOF
+architecture source
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descrbad 3<<EOF
+*=No source name specified!
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat > descres <<EOF
+source testpackage
+architecture source
+signature e0000000000000000
+EOF
+cat > descrs <<EOF
+source testpackage
+architecture source
+signature 0000000000000000
+EOF
+
+checknonetakes descres
+checknonetakes descrs
+
+echo "now test2 accepts all valid signatures for testpackage"
+cat >> conf/uploaders2 <<EOF
+allow source 'testpackage' by any key
+EOF
+
+check2takes descrs
+checknonetakes descres
+
+cat >>conf/uploaders1 <<EOF
+group test
+EOF
+testrun - -b . __checkuploaders test1 test2 < descrbad 3<<EOF
+*=./conf/uploaders1:1:11: missing 'add', 'contains', 'unused' or 'empty' keyword.
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat >conf/uploaders1 <<EOF
+group test add
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descrbad 3<<EOF
+*=./conf/uploaders1:1:15: key id or fingerprint expected!
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat >conf/uploaders1 <<EOF
+group test add 00000000
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descres 3<<EOF
+-v0*=./conf/uploaders1:1: Warning: group 'test' gets members but is not used in any rule
+stdout
+*='testpackage' would NOT have been accepted by any of the distributions selected.
+EOF
+
+cat >>conf/uploaders1 <<EOF
+group test unused
+EOF
+
+checknonetakes descres
+
+cat >>conf/uploaders1 <<EOF
+allow * by group test
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descrbad 3<<EOF
+*=./conf/uploaders1:3: cannot use group 'test' marked as unused!
+*=./conf/uploaders1:2: here it was marked as unused.
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat >conf/uploaders1 <<EOF
+group test add 00000000
+group test unused
+allow * by group tset
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descres 3<<EOF
+-v0*=./conf/uploaders1:3: Warning: group 'tset' gets used but never gets any members
+stdout
+*='testpackage' would NOT have been accepted by any of the distributions selected.
+EOF
+
+cat >>conf/uploaders1 <<EOF
+group tset contains test
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descres 3<<EOF
+*=./conf/uploaders1:4: cannot use group 'test' marked as unused!
+*=./conf/uploaders1:2: here it got marked as unused.
+-v0*=There have been errors!
+returns 255
+EOF
+
+sed -e '/unused/d' -i conf/uploaders1
+
+check1takes descrs
+checknonetakes descres
+
+cat >>conf/uploaders1 <<EOF
+group test contains indirection
+group indirection contains test
+EOF
+
+testrun - -b . __checkuploaders test1 test2 < descres 3<<EOF
+*=./conf/uploaders1:5: cannot add group 'test' to group 'indirection' as the later is already member of the former!
+-v0*=There have been errors!
+returns 255
+EOF
+
+cat >conf/uploaders1 <<EOF
+group group add 76543210
+group foo add 00000000
+group bla contains group
+group blub contains foo
+group g5 contains foo
+group g5 unused
+group g6 contains foo
+group g6 unused
+group g7 contains foo
+group g7 unused
+group g8 contains foo
+group g8 unused
+group g9 contains foo
+group g9 unused
+group g10 contains foo
+group g10 unused
+group g11 contains foo
+group g11 unused
+group g12 contains foo
+group g12 unused
+group g13 contains foo
+group g13 unused
+group g14 contains foo
+group g14 unused
+group g15 contains foo
+group g15 unused
+group g16 contains foo
+group g16 unused
+group g17 contains foo
+group g17 unused
+group g18 contains foo
+group g18 unused
+group g19 contains foo
+group g19 unused
+group g20 contains foo
+group g20 unused
+group g21 contains foo
+group g21 unused
+allow * by group bla
+allow architectures contain 'coal' by group blub
+EOF
+
+check2takes descrs
+sed -e 's/0000000000000000/fedcba9876543210/g' descrs >> descr2
+sed -e 's/0000000000000000/fedcba9876542210/g' descrs >> descr3
+echo "architecture coal" >> descrs
+check1takes descrs
+check1takes descr2
+check2takes descr3
+
+rm -r conf descr*
+testsuccess
diff --git a/tests/valgrind.supp b/tests/valgrind.supp
new file mode 100644
index 0000000..d6659b1
--- /dev/null
+++ b/tests/valgrind.supp
@@ -0,0 +1,128 @@
+{
+ libz-not-initializing-buffer
+ Memcheck:Cond
+ fun:inflateReset2
+ fun:inflateInit2_
+ obj:/usr/lib*/libz.so.*
+}
+{
+ still-careless-libdb
+ Memcheck:Param
+ pwrite64(buf)
+ fun:pwrite
+ fun:__os_io
+ obj:/usr/lib/x86_64-linux-gnu/libdb-*.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+}
+
+{
+ careless-libdb
+ Memcheck:Param
+ pwrite64(buf)
+ fun:__pwrite*_nocancel
+ fun:__os_io
+ obj:/usr/lib*/libdb-*.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+}
+{
+ more-careless-libdb6
+ Memcheck:Cond
+ fun:__bam_stkrel
+ obj:/usr/lib*/libdb-6*.so
+ fun:__dbc_iput
+ fun:__db_put
+}
+{
+ more-careless-libdb5
+ Memcheck:Cond
+ fun:__bam_stkrel
+ obj:/usr/lib*/libdb-5*.so
+ fun:__dbc_iput
+ fun:__db_put
+}
+{
+ stupid-db4.6
+ Memcheck:Param
+ pwrite64(buf)
+ obj:/lib/ld-2.7.so
+ fun:__os_io
+ obj:/usr/lib/libdb-4.6.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ stupid-db4.6-withlibc-dbg
+ Memcheck:Param
+ pwrite64(buf)
+ fun:pwrite64
+ fun:__os_io
+ obj:/usr/lib/libdb-4.6.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ stupid-db3
+ Memcheck:Param
+ pwrite64(buf)
+ fun:pwrite64
+ fun:__os_io
+ obj:/usr/lib/libdb-4.3.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ stupid-db3-withlibc-dbg
+ Memcheck:Param
+ pwrite64(buf)
+ fun:do_pwrite64
+ fun:__os_io
+ obj:/usr/lib/libdb-4.3.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ libz-looking-far
+ Memcheck:Cond
+ obj:/usr/lib/libz.so.*
+ obj:/usr/lib/libz.so.*
+ fun:deflate
+}
+{
+ gpgme11-gpgme_data_release_and_get_mem_leak
+ Memcheck:Leak
+ fun:calloc
+ obj:*/libgpgme.so.*
+ fun:gpgme_data_new
+ fun:gpgme_data_new_from_filepart
+ fun:gpgme_data_new_from_file
+ fun:signature_readsignedchunk
+}
+{
+ gpgme11-gpgme_data_release_and_get_mem_leak2
+ Memcheck:Leak
+ fun:calloc
+ obj:*/libgpgme.so.*
+ fun:gpgme_data_new
+ fun:signature_readsignedchunk
+}
+{
+ liblzma-thinks-random-jumps-are-fun
+ Memcheck:Cond
+ obj:/lib/x86_64-linux-gnu/liblzma.so.5*
+ obj:/lib/x86_64-linux-gnu/liblzma.so.5*
+ obj:/lib/x86_64-linux-gnu/liblzma.so.5*
+ obj:/lib/x86_64-linux-gnu/liblzma.so.5*
+ obj:/lib/x86_64-linux-gnu/liblzma.so.5*
+ fun:lzma_stream_encoder
+ fun:lzma_easy_encoder
+}
diff --git a/tests/various1.test b/tests/various1.test
new file mode 100644
index 0000000..96623d2
--- /dev/null
+++ b/tests/various1.test
@@ -0,0 +1,1430 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir -p conf
+cat > conf/distributions <<EOF
+
+#
+
+Codename: test
+Architectures:
+# This is an comment
+ a
+Components:
+ c
+
+#
+#
+
+EOF
+touch conf/updates
+dodo test ! -d db
+mkdir logs
+testrun - -b . checkupdate test 3<<EOF
+stderr
+*=Nothing to do, because none of the selected distributions has an Update: field.
+stdout
+$(odb)
+-v2=Created directory "./lists"
+EOF
+rm -r -f lists
+rm -r -f db conf
+dodo test ! -d d/ab
+mkdir -p conf
+cat > conf/options <<CONFEND
+outhook $SRCDIR/docs/outstore.py
+export changed
+CONFEND
+export REPREPRO_OUT_DB=db/out
+cat > conf/distributions <<CONFEND
+Codename: A
+Architectures: abacus calculator
+Components: dog cat
+Log: logfile
+ --bla
+CONFEND
+testrun - -b . export 3<<EOF
+return 255
+stdout
+stderr
+*=Unknown Log notifier option in ./conf/distributions, line 5, column 2: '--bla'
+-v0*=There have been errors!
+EOF
+cat > conf/distributions <<CONFEND
+Codename: A
+Architectures: abacus calculator
+Components: dog cat
+Log: logfile
+ -A
+CONFEND
+testrun - -b . export 3<<EOF
+return 255
+*=Log notifier option -A misses an argument in ./conf/distributions, line 5, column 4
+-v0*=There have been errors!
+EOF
+cat > conf/distributions <<CONFEND
+Codename: A
+Architectures: abacus calculator
+Components: dog cat
+Log: logfile
+ -A=abacus
+CONFEND
+testrun - -b . export 3<<EOF
+return 255
+*=Error parsing config file ./conf/distributions, line 5, column 11:
+*=Unexpected end of line: name of notifier script missing!
+-v0*=There have been errors!
+EOF
+cat > conf/distributions <<CONFEND
+Codename: A
+Architectures: abacus calculator
+Components: dog cat
+Log: logfile
+ -A=abacus --architecture=coal
+CONFEND
+testrun - -b . export 3<<EOF
+return 255
+*=Repeated notifier option --architecture in ./conf/distributions, line 5, column 12!
+-v0*=There have been errors!
+EOF
+cat > conf/distributions <<CONFEND
+Codename: getmoreatoms
+Architectures: funny coal
+Components: dog
+
+Codename: A
+Architectures: abacus calculator
+Components: dog cat
+Log: logfile
+# -A=nonexistant -C=nocomponent --type=none --withcontrol noscript.sh
+
+Codename: B
+Architectures: abacus source
+Components: dog cat
+Contents: compatsymlink percomponent
+Log: logfile
+CONFEND
+testrun - -b . export B A 3<<EOF
+stdout
+$(odb)
+-v1*=Exporting B...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/B"
+-v2*=Created directory "./dists/B/dog"
+-v2*=Created directory "./dists/B/dog/binary-abacus"
+-v6*= exporting 'B|dog|abacus'...
+-v6*= creating './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/B/dog/source"
+-v6*= exporting 'B|dog|source'...
+-v6*= creating './dists/B/dog/source/Sources' (gzipped)
+-v2*=Created directory "./dists/B/cat"
+-v2*=Created directory "./dists/B/cat/binary-abacus"
+-v6*= exporting 'B|cat|abacus'...
+-v2*=Created directory "./dists/B/cat/source"
+-v6*= creating './dists/B/cat/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= exporting 'B|cat|source'...
+-v6*= creating './dists/B/cat/source/Sources' (gzipped)
+-v1*= generating dog/Contents-abacus...
+-v1*= generating cat/Contents-abacus...
+-v1*=Exporting A...
+-v2*=Created directory "./dists/A"
+-v2*=Created directory "./dists/A/dog"
+-v2*=Created directory "./dists/A/dog/binary-abacus"
+-v6*= exporting 'A|dog|abacus'...
+-v6*= creating './dists/A/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/dog/binary-calculator"
+-v6*= exporting 'A|dog|calculator'...
+-v6*= creating './dists/A/dog/binary-calculator/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/cat"
+-v2*=Created directory "./dists/A/cat/binary-abacus"
+-v6*= exporting 'A|cat|abacus'...
+-v6*= creating './dists/A/cat/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/A/cat/binary-calculator"
+-v6*= exporting 'A|cat|calculator'...
+-v6*= creating './dists/A/cat/binary-calculator/Packages' (uncompressed,gzipped)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+dodo test -f db/checksums.db
+find dists -type f | LC_ALL=C sort -f > results
+cat > results.expected <<END
+dists/A/cat/binary-abacus/Packages
+dists/A/cat/binary-abacus/Packages.gz
+dists/A/cat/binary-abacus/Release
+dists/A/cat/binary-calculator/Packages
+dists/A/cat/binary-calculator/Packages.gz
+dists/A/cat/binary-calculator/Release
+dists/A/dog/binary-abacus/Packages
+dists/A/dog/binary-abacus/Packages.gz
+dists/A/dog/binary-abacus/Release
+dists/A/dog/binary-calculator/Packages
+dists/A/dog/binary-calculator/Packages.gz
+dists/A/dog/binary-calculator/Release
+dists/A/Release
+dists/B/cat/binary-abacus/Packages
+dists/B/cat/binary-abacus/Packages.gz
+dists/B/cat/binary-abacus/Release
+dists/B/cat/Contents-abacus.gz
+dists/B/cat/source/Release
+dists/B/cat/source/Sources.gz
+dists/B/dog/binary-abacus/Packages
+dists/B/dog/binary-abacus/Packages.gz
+dists/B/dog/binary-abacus/Release
+dists/B/dog/Contents-abacus.gz
+dists/B/dog/source/Release
+dists/B/dog/source/Sources.gz
+dists/B/Release
+END
+dodiff results.expected results
+find dists -type l -printf "%p -> %l" | LC_ALL=C sort -f > results
+cat > results.expected <<END
+dists/B/Contents-abacus.gz -> dog/Contents-abacus.gz
+END
+
+sed -e "s/^Contents: compatsymlink/Contents: allcomponents/" -i conf/distributions
+
+dodiff results.expected results
+testrun - -b . processincoming default 3<<EOF
+returns 254
+stderr
+*=Error opening config file './conf/incoming': No such file or directory(2)
+-v0*=There have been errors!
+stdout
+EOF
+touch conf/incoming
+testrun - -b . processincoming default 3<<EOF
+returns 249
+stderr
+*=No definition for 'default' found in './conf/incoming'!
+-v0*=There have been errors!
+stdout
+EOF
+cat > conf/incoming <<EOF
+Name: bla
+Tempdir: bla
+Incomingdir: bla
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 249
+stderr
+*=No definition for 'default' found in './conf/incoming'!
+-v0*=There have been errors!
+stdout
+EOF
+cat > conf/incoming <<EOF
+Name: bla
+Tempdir: bla
+Incomingdir: bla
+
+# a comment
+#
+
+Name: default
+
+Name: blub
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 249
+stderr
+*=Error parsing config file ./conf/incoming, line 9:
+*=Required field 'TempDir' not found in
+*=incoming rule starting in line 8 and ending in line 8.
+-v0*=There have been errors!
+EOF
+cat > conf/incoming <<EOF
+Name: bla
+Tempdir: bla
+Incomingdir: bla
+
+# a comment
+#
+
+Name: default
+TempDir: temp
+
+Name: blub
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 249
+stderr
+*=Error parsing config file ./conf/incoming, line 10:
+*=Required field 'IncomingDir' not found in
+*=incoming rule starting in line 8 and ending in line 9.
+-v0*=There have been errors!
+EOF
+cat > conf/incoming <<EOF
+# commentary
+Name: bla
+Tempdir: bla
+Incomingdir: bla
+Permit: unused_files bla older_version
+Cleanup: unused_files bla on_deny
+
+# a comment
+#
+
+Name: default
+TempDir: temp
+
+Name: blub
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 249
+stderr
+*=Warning: ignored error parsing config file ./conf/incoming, line 5, column 22:
+*=Unknown flag in Permit header. (but not within the rule we are interested in.)
+*=Warning: ignored error parsing config file ./conf/incoming, line 6, column 23:
+*=Unknown flag in Cleanup header. (but not within the rule we are interested in.)
+*=Error parsing config file ./conf/incoming, line 13:
+*=Required field 'IncomingDir' not found in
+*=incoming rule starting in line 11 and ending in line 12.
+-v0*=There have been errors!
+EOF
+cat > conf/incoming <<EOF
+Name: bla
+TempDir: bla
+IncomingDir: bla
+
+Name: default
+TempDir: temp
+IncomingDir: i
+
+Name: blub
+TempDir: blub
+IncomingDir: blub
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=There is neither an 'Allow' nor a 'Default' definition in rule 'default'
+*=(starting at line 5, ending at line 8 of ./conf/incoming)!
+*=Aborting as nothing would be let in.
+-v0*=There have been errors!
+EOF
+cat > conf/incoming <<EOF
+Name: bla
+TempDir: bla
+IncomingDir: blub
+
+Name: default
+TempDir: temp
+IncomingDir: i
+Allow: A B
+
+Name: blub
+TempDir: bla
+IncomingDir: blub
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 254
+stderr
+*=Cannot scan './i': No such file or directory
+-v0*=There have been errors!
+stdout
+-v2*=Created directory "./temp"
+EOF
+mkdir i
+testrun "" -b . processincoming default
+(cd i ; PACKAGE=bird EPOCH="" VERSION=1 REVISION="" SECTION="tasty" genpackage.sh)
+echo returned: $?
+DSCMD5S="$(mdandsize i/bird_1.dsc)"
+TARMD5S="$(mdandsize i/bird_1.tar.gz)"
+DSCSHA1S="$(sha1andsize i/bird_1.dsc)"
+TARSHA1S="$(sha1andsize i/bird_1.tar.gz)"
+DSCSHA2S="$(sha2andsize i/bird_1.dsc)"
+TARSHA2S="$(sha2andsize i/bird_1.tar.gz)"
+testrun - -b . processincoming default 3<<EOF
+returns 243
+stderr
+*=No distribution found for 'test.changes'!
+-v0*=There have been errors!
+stdout
+EOF
+sed -i -e 's/test1/A/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*='test.changes' lists architecture 'source' not found in distribution 'A'!
+-v0*=There have been errors!
+stdout
+EOF
+sed -i -e 's/Distribution: A/Distribution: B/' i/test.changes
+cp -a i i2
+checknolog logfile
+testrun - -b . processincoming default 3<<EOF
+stdout
+-v9*=Adding reference to 'pool/dog/b/bird/bird_1.dsc' by 'B|dog|source'
+-v9*=Adding reference to 'pool/dog/b/bird/bird_1.tar.gz' by 'B|dog|source'
+-v9*=Adding reference to 'pool/dog/b/bird/bird_1_abacus.deb' by 'B|dog|abacus'
+-v9*=Adding reference to 'pool/dog/b/bird/bird-addons_1_all.deb' by 'B|dog|abacus'
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/dog"
+-v2*=Created directory "./pool/dog/b"
+-v2*=Created directory "./pool/dog/b/bird"
+$(ofa 'pool/dog/b/bird/bird_1.dsc')
+$(ofa 'pool/dog/b/bird/bird_1.tar.gz')
+$(ofa 'pool/dog/b/bird/bird_1_abacus.deb')
+$(ofa 'pool/dog/b/bird/bird-addons_1_all.deb')
+$(opa 'bird' x 'B' 'dog' 'source' 'dsc')
+$(opa 'bird' x 'B' 'dog' 'abacus' 'deb')
+$(opa 'bird-addons' x 'B' 'dog' 'abacus' 'deb')
+-v3*=deleting './i/bird_1.dsc'...
+-v3*=deleting './i/bird_1.tar.gz'...
+-v3*=deleting './i/bird_1_abacus.deb'...
+-v3*=deleting './i/bird-addons_1_all.deb'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v1*= generating Contents-abacus...
+-v1*= generating dog/Contents-abacus...
+-v4*=Reading filelist for pool/dog/b/bird/bird_1_abacus.deb
+-d1*=db: 'pool/dog/b/bird/bird_1_abacus.deb' added to contents.cache.db(compressedfilelists).
+-v4*=Reading filelist for pool/dog/b/bird/bird-addons_1_all.deb
+-d1*=db: 'pool/dog/b/bird/bird-addons_1_all.deb' added to contents.cache.db(compressedfilelists).
+EOF
+checklog logfile <<EOF
+DATESTR add B dsc dog source bird 1
+DATESTR add B deb dog abacus bird 1
+DATESTR add B deb dog abacus bird-addons 1
+EOF
+find temp -type f > results
+dodiff /dev/null results
+find i -type f > results
+dodiff /dev/null results
+cat > results.expected <<EOF
+x tasty/bird,tasty/bird-addons
+a/1 tasty/bird,tasty/bird-addons
+dir/another tasty/bird,tasty/bird-addons
+dir/file tasty/bird,tasty/bird-addons
+dir/subdir/file tasty/bird,tasty/bird-addons
+EOF
+gunzip -c dists/B/Contents-abacus.gz > results
+dodiff results.expected results
+cat > results.expected <<EOF
+EOF
+gunzip -c dists/B/cat/Contents-abacus.gz > results
+dodiff results.expected results
+cat > results.expected <<EOF
+x tasty/bird,tasty/bird-addons
+a/1 tasty/bird,tasty/bird-addons
+dir/another tasty/bird,tasty/bird-addons
+dir/file tasty/bird,tasty/bird-addons
+dir/subdir/file tasty/bird,tasty/bird-addons
+EOF
+gunzip -c dists/B/dog/Contents-abacus.gz > results
+dodiff results.expected results
+printindexpart pool/dog/b/bird/bird_1_abacus.deb > results.expected
+printindexpart pool/dog/b/bird/bird-addons_1_all.deb >> results.expected
+dodiff results.expected dists/B/dog/binary-abacus/Packages
+withoutchecksums pool/dog/b/bird/bird_1.dsc | sed -e 's/ \+$//' > results.expected
+ed -s results.expected <<EOF
+H
+/^Source:/ m 0
+s/^Source: /Package: /
+/^Files:/ kf
+'f i
+Priority: superfluous
+Section: tasty
+Directory: pool/dog/b/bird
+.
+'f a
+ $DSCMD5S bird_1.dsc
+.
+$ a
+Checksums-Sha1:
+ $DSCSHA1S bird_1.dsc
+ $TARSHA1S bird_1.tar.gz
+Checksums-Sha256:
+ $DSCSHA2S bird_1.dsc
+ $TARSHA2S bird_1.tar.gz
+
+.
+w
+q
+EOF
+gunzip -c dists/B/dog/source/Sources.gz | sed -e 's/ \+$//' > results
+dodiff results.expected results
+
+echo "DebOverride: debo" >> conf/distributions
+echo "DscOverride: dsco" >> conf/distributions
+echo "bird Section cat/tasty" > conf/debo
+echo "bird Priority hungry" >> conf/debo
+echo "bird Task lunch" >> conf/debo
+echo "bird-addons Section cat/ugly" >> conf/debo
+echo "bird Section cat/nest" > conf/dsco
+echo "bird Priority hurry" >> conf/dsco
+echo "bird Homepage gopher://tree" >> conf/dsco
+
+mv i2/* i/
+rmdir i2
+testrun - -b . processincoming default 3<<EOF
+stdout
+-v2*=Created directory "./pool/cat"
+-v2*=Created directory "./pool/cat/b"
+-v2*=Created directory "./pool/cat/b/bird"
+$(ofa 'pool/cat/b/bird/bird_1.dsc')
+$(ofa 'pool/cat/b/bird/bird_1.tar.gz')
+$(ofa 'pool/cat/b/bird/bird_1_abacus.deb')
+$(ofa 'pool/cat/b/bird/bird-addons_1_all.deb')
+$(opa 'bird' x 'B' 'cat' 'source' 'dsc')
+$(opa 'bird' x 'B' 'cat' 'abacus' 'deb')
+$(opa 'bird-addons' x 'B' 'cat' 'abacus' 'deb')
+-v3*=deleting './i/bird_1.dsc'...
+-v3*=deleting './i/bird_1.tar.gz'...
+-v3*=deleting './i/bird_1_abacus.deb'...
+-v3*=deleting './i/bird-addons_1_all.deb'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= replacing './dists/B/cat/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/cat/source/Sources' (gzipped)
+-v1*= generating cat/Contents-abacus...
+-v1*= generating Contents-abacus...
+-v4*=Reading filelist for pool/cat/b/bird/bird_1_abacus.deb
+-d1*=db: 'pool/cat/b/bird/bird_1_abacus.deb' added to contents.cache.db(compressedfilelists).
+-v4*=Reading filelist for pool/cat/b/bird/bird-addons_1_all.deb
+-d1*=db: 'pool/cat/b/bird/bird-addons_1_all.deb' added to contents.cache.db(compressedfilelists).
+EOF
+checklog logfile <<EOF
+DATESTR add B dsc cat source bird 1
+DATESTR add B deb cat abacus bird 1
+DATESTR add B deb cat abacus bird-addons 1
+EOF
+find temp -type f > results
+dodiff /dev/null results
+find i -type f > results
+dodiff /dev/null results
+cat > results.expected <<EOF
+x tasty/bird,tasty/bird-addons,cat/tasty/bird,cat/ugly/bird-addons
+a/1 tasty/bird,tasty/bird-addons,cat/tasty/bird,cat/ugly/bird-addons
+dir/another tasty/bird,tasty/bird-addons,cat/tasty/bird,cat/ugly/bird-addons
+dir/file tasty/bird,tasty/bird-addons,cat/tasty/bird,cat/ugly/bird-addons
+dir/subdir/file tasty/bird,tasty/bird-addons,cat/tasty/bird,cat/ugly/bird-addons
+EOF
+gunzip -c dists/B/Contents-abacus.gz > results
+dodiff results.expected results
+cat > results.expected <<EOF
+x cat/tasty/bird,cat/ugly/bird-addons
+a/1 cat/tasty/bird,cat/ugly/bird-addons
+dir/another cat/tasty/bird,cat/ugly/bird-addons
+dir/file cat/tasty/bird,cat/ugly/bird-addons
+dir/subdir/file cat/tasty/bird,cat/ugly/bird-addons
+EOF
+gunzip -c dists/B/cat/Contents-abacus.gz > results
+dodiff results.expected results
+cat > results.expected <<EOF
+x tasty/bird,tasty/bird-addons
+a/1 tasty/bird,tasty/bird-addons
+dir/another tasty/bird,tasty/bird-addons
+dir/file tasty/bird,tasty/bird-addons
+dir/subdir/file tasty/bird,tasty/bird-addons
+EOF
+gunzip -c dists/B/dog/Contents-abacus.gz > results
+dodiff results.expected results
+printindexpart pool/cat/b/bird/bird_1_abacus.deb > results.expected
+printindexpart pool/cat/b/bird/bird-addons_1_all.deb >> results.expected
+ed -s results.expected <<EOF
+H
+/^Priority: / s/^Priority: superfluous$/Priority: hungry/
+i
+Task: lunch
+.
+/^Section: / s/^Section: tasty$/Section: cat\/tasty/
+/^Section: tasty/ s/^Section: tasty$/Section: cat\/ugly/
+w
+q
+EOF
+dodiff results.expected dists/B/cat/binary-abacus/Packages
+withoutchecksums pool/cat/b/bird/bird_1.dsc | sed -e 's/ \+$//' > results.expected
+ed -s results.expected <<EOF
+H
+/^Source:/ m 0
+s/^Source: /Package: /
+/^Files:/ kf
+'f i
+Homepage: gopher://tree
+Priority: hurry
+Section: cat/nest
+Directory: pool/cat/b/bird
+.
+'f a
+ $DSCMD5S bird_1.dsc
+.
+$ a
+Checksums-Sha1:
+ $DSCSHA1S bird_1.dsc
+ $TARSHA1S bird_1.tar.gz
+Checksums-Sha256:
+ $DSCSHA2S bird_1.dsc
+ $TARSHA2S bird_1.tar.gz
+
+.
+w
+q
+EOF
+BIRDDSCMD5S="$DSCMD5S"
+BIRDTARMD5S="$TARMD5S"
+BIRDDSCSHA1S="$DSCSHA1S"
+BIRDTARSHA1S="$TARSHA1S"
+BIRDDSCSHA2S="$DSCSHA2S"
+BIRDTARSHA2S="$TARSHA2S"
+gunzip -c dists/B/cat/source/Sources.gz | sed -e 's/ \+$//' > results
+dodiff results.expected results
+
+# now missing: checking what all can go wrong in a .changes or .dsc file...
+mkdir pkg
+mkdir pkg/a
+touch pkg/a/b
+mkdir pkg/DEBIAN
+cat > pkg/DEBIAN/control <<EOF
+Package: indebname
+Version: 1:0versionindeb~1
+Source: sourceindeb (0sourceversionindeb)
+EOF
+dpkg-deb --nocheck -b pkg i/debfilename_debfileversion~2_coal.deb
+DEBMD5="$(md5sum i/debfilename_debfileversion~2_coal.deb | cut -d' ' -f1)"
+DEBSIZE="$(stat -c '%s' i/debfilename_debfileversion~2_coal.deb)"
+DEBMD5S="$DEBMD5 $DEBSIZE"
+cat > i/test.changes <<EOF
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Unexpected empty file 'test.changes'!
+-v0*=There have been errors!
+EOF
+echo > i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Could only find spaces within 'test.changes'!
+-v0*=There have been errors!
+EOF
+cat > i/test.changes <<EOF
+-chunk: 1
+
+
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+-v0=Data seems not to be signed trying to use directly....
+*=Strange content of 'test.changes': First non-space character is '-',
+*=but it does not begin with '-----BEGIN'.
+-v0*=There have been errors!
+EOF
+#*=First non-space character is a '-' but there is no empty line in
+#*='test.changes'.
+#*=Unable to extract any data from it!
+cat > i/test.changes <<EOF
+-chunk: 1
+
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+-v0=Data seems not to be signed trying to use directly....
+*=Strange content of 'test.changes': First non-space character is '-',
+*=but it does not begin with '-----BEGIN'.
+-v0*=There have been errors!
+EOF
+#*=First non-space character is a '-' but there is no empty line in
+#*='test.changes'.
+#*=Unable to extract any data from it!
+cat > i/test.changes <<EOF
+chunk: 1
+
+chunk: 2
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+-v0*=There have been errors!
+*=Error parsing 'test.changes': Seems not to be signed but has spurious empty line.
+EOF
+cat > i/test.changes <<EOF
+-----BEGIN FAKE GPG SIGNED MAIL
+type: funny
+
+This is some content
+-----BEGIN FAKE SIGNATURE
+Hahaha!
+-----END FAKE SIGNATURE
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+-v0=Data seems not to be signed trying to use directly....
+-v0=Cannot check signatures from 'test.changes' as compiled without support for libgpgme!
+-v0=Extracting the content manually without looking at the signature...
+*=In 'test.changes': Missing 'Source' field!
+-v0*=There have been errors!
+EOF
+cat > i/test.changes <<EOF
+-----BEGIN FAKE GPG SIGNED MAIL
+type: funny
+
+This is some content
+
+-----BEGIN FAKE SIGNATURE
+Hahaha!
+-----END FAKE SIGNATURE
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+-v0=Data seems not to be signed trying to use directly....
+-v0=Cannot check signatures from 'test.changes' as compiled without support for libgpgme!
+-v0=Extracting the content manually without looking at the signature...
+*=In 'test.changes': Missing 'Source' field!
+-v0*=There have been errors!
+EOF
+cat > i/test.changes <<EOF
+Format: 1.8
+Dummyfield: test
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=In 'test.changes': Missing 'Source' field!
+-v0*=There have been errors!
+EOF
+echo "Source: sourceinchanges" > i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=In 'test.changes': Missing 'Architecture' field!
+-v0*=There have been errors!
+EOF
+echo "Architecture: funny" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=In 'test.changes': Missing 'Version' field!
+-v0*=There have been errors!
+EOF
+echo "Binary: binaryinchanges" >> i/test.changes
+echo "Version: 999:0versioninchanges-0~" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=In 'test.changes': Missing 'Distribution' field!
+-v0*=There have been errors!
+EOF
+echo "Distribution: A" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=In 'test.changes': Missing 'Files' field!
+-v0*=There have been errors!
+EOF
+echo "Files:" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=In 'test.changes': Empty 'Files' section!
+-v0*=There have been errors!
+EOF
+# as it does not look for the file, but scanned the directory
+# and looked for it, there is no problem here, though it might
+# look like one
+echo " ffff 666 - - ../ööü_v_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 249
+stderr
+*=In 'test.changes': file '../ööü_v_all.deb' not found in the incoming dir!
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+printf ' ffff 666 - - \300\257.\300\257_v_funny.deb\n' >> i/test.changes
+touch "$(printf 'i/\300\257.\300\257_v_funny.deb')"
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*='test.changes' lists architecture 'funny' not found in distribution 'A'!
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+printf ' ffff 666 - - \300\257.\300\257_v_all.deb\n' >> i/test.changes
+mv "$(printf 'i/\300\257.\300\257_v_funny.deb')" "$(printf 'i/\300\257.\300\257_v_all.deb')"
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*='all' is not listed in the Architecture header of 'test.changes' but file 'À¯.À¯_v_all.deb' looks like it!
+-v0*=There have been errors!
+EOF
+sed -i -e 's/funny/all/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Invalid filename 'À¯.À¯_v_all.deb' listed in 'test.changes': contains 8-bit characters
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " ffff 1 - - debfilename_debfileversion~2_coal.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*='coal' is not listed in the Architecture header of 'test.changes' but file 'debfilename_debfileversion~2_coal.deb' looks like it!
+-v0*=There have been errors!
+EOF
+mv i/debfilename_debfileversion~2_coal.deb i/debfilename_debfileversion~2_all.deb
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " md5sum size - - debfilename_debfileversion~2_all.deb" >> i/test.changes
+# TODO: this error message has to be improved:
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Malformed md5 hash in 'md5sum size - - debfilename_debfileversion~2_all.deb'!
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " ffff 666 - - debfilename_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 254
+stderr
+*=ERROR: File 'debfilename_debfileversion~2_all.deb' does not match expectations:
+*=md5 expected: ffff, got: $DEBMD5
+*=size expected: 666, got: $DEBSIZE
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S - - debfilename_debfileversion~2_all.deb" >> i/test.changes
+# TODO: these will hopefully change to not divulge the place of the temp dir some day...
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No Maintainer field in ./temp/debfilename_debfileversion~2_all.deb's control file!
+-v0*=There have been errors!
+EOF
+echo "Maintainer: noone <me@nowhere>" >> pkg/DEBIAN/control
+dpkg-deb --nocheck -b pkg i/debfilename_debfileversion~2_all.deb
+DEBMD5S="$(md5sum i/debfilename_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/debfilename_debfileversion~2_all.deb)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S - - debfilename_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No Description field in ./temp/debfilename_debfileversion~2_all.deb's control file!
+-v0*=There have been errors!
+EOF
+echo ...
+echo "Description: test-package" >> pkg/DEBIAN/control
+echo " a package to test reprepro" >> pkg/DEBIAN/control
+dpkg-deb --nocheck -b pkg i/debfilename_debfileversion~2_all.deb
+DEBMD5S="$(md5sum i/debfilename_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/debfilename_debfileversion~2_all.deb)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S - - debfilename_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No Architecture field in ./temp/debfilename_debfileversion~2_all.deb's control file!
+-v0*=There have been errors!
+EOF
+echo "Architecture: coal" >> pkg/DEBIAN/control
+dpkg-deb -b pkg i/debfilename_debfileversion~2_all.deb
+DEBMD5S="$(md5sum i/debfilename_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/debfilename_debfileversion~2_all.deb)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S - - debfilename_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Name part of filename ('debfilename') and name within the file ('indebname') do not match for 'debfilename_debfileversion~2_all.deb' in 'test.changes'!
+-v0*=There have been errors!
+EOF
+mv i/debfilename_debfileversion~2_all.deb i/indebname_debfileversion~2_all.deb
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S - - indebname_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Architecture 'coal' of 'indebname_debfileversion~2_all.deb' does not match 'all' specified in 'test.changes'!
+-v0*=There have been errors!
+EOF
+sed -i -e "s/^Architecture: coal/Architecture: all/" pkg/DEBIAN/control
+dpkg-deb -b pkg i/indebname_debfileversion~2_all.deb
+DEBMD5S="$(md5sum i/indebname_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/indebname_debfileversion~2_all.deb)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S - - indebname_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Source header 'sourceinchanges' of 'test.changes' and source name 'sourceindeb' within the file 'indebname_debfileversion~2_all.deb' do not match!
+-v0*=There have been errors!
+EOF
+sed -i -e 's/sourceinchanges/sourceindeb/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Source version '999:0versioninchanges-0~' of 'test.changes' and source version '0sourceversionindeb' within the file 'indebname_debfileversion~2_all.deb' do not match!
+-v0*=There have been errors!
+EOF
+sed -i -e 's/999:0versioninchanges-0~/0sourceversionindeb/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Name 'indebname' of binary 'indebname_debfileversion~2_all.deb' is not listed in Binaries header of 'test.changes'!
+*=(use Permit: unlisted_binaries in conf/incoming to ignore this error)
+-v0*=There have been errors!
+EOF
+sed -i -e 's/binaryinchanges/indebname/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No section found for 'indebname' ('indebname_debfileversion~2_all.deb' in 'test.changes')!
+-v0*=There have been errors!
+EOF
+echo "Section: sectiontest" >> pkg/DEBIAN/control
+dpkg-deb -b pkg i/indebname_debfileversion~2_all.deb
+DEBMD5S="$(md5sum i/indebname_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/indebname_debfileversion~2_all.deb)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S - - indebname_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No section found for 'indebname' ('indebname_debfileversion~2_all.deb' in 'test.changes')!
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S test - indebname_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No priority found for 'indebname' ('indebname_debfileversion~2_all.deb' in 'test.changes')!
+-v0*=There have been errors!
+EOF
+echo "Priority: survival" >> pkg/DEBIAN/control
+dpkg-deb -b pkg i/indebname_debfileversion~2_all.deb
+DEBMD5S="$(md5sum i/indebname_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/indebname_debfileversion~2_all.deb)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S test - indebname_debfileversion~2_all.deb" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No priority found for 'indebname' ('indebname_debfileversion~2_all.deb' in 'test.changes')!
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DEBMD5S section priority indebname_debfileversion~2_all.deb" >> i/test.changes
+checknolog logfile
+testrun - -b . processincoming default 3<<EOF
+returns 0
+stderr
+stdout
+-v2*=Created directory "./pool/dog/s"
+-v2*=Created directory "./pool/dog/s/sourceindeb"
+$(ofa 'pool/dog/s/sourceindeb/indebname_0versionindeb~1_all.deb')
+$(opa 'indebname' x 'A' 'dog' 'abacus' 'deb')
+$(opa 'indebname' x 'A' 'dog' 'calculator' 'deb')
+-v3*=deleting './i/indebname_debfileversion~2_all.deb'...
+-v3*=deleting './i/test.changes'...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'A|cat|abacus'...
+-v6*= replacing './dists/A/dog/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'A|cat|calculator'...
+-v6*= replacing './dists/A/dog/binary-calculator/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'A|dog|abacus'...
+-v6*= looking for changes in 'A|dog|calculator'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog logfile <<EOF
+DATESTR add A deb dog abacus indebname 1:0versionindeb~1
+DATESTR add A deb dog calculator indebname 1:0versionindeb~1
+EOF
+find pool/dog/s -type f > results
+echo "pool/dog/s/sourceindeb/indebname_0versionindeb~1_all.deb" > results.expected
+dodiff results.expected results
+
+touch i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+cat > i/test.changes <<EOF
+Format: 1.8
+Source: sourceinchanges
+Binary: nothing
+Architecture: all
+Version: 1:0versioninchanges
+Distribution: A
+Files:
+ ffff 666 - - dscfilename_fileversion~.dsc
+EOF
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*='source' is not listed in the Architecture header of 'test.changes' but file 'dscfilename_fileversion~.dsc' looks like it!
+-v0*=There have been errors!
+EOF
+sed -i -e 's/^Architecture: all$/Architecture: source/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*='test.changes' lists architecture 'source' not found in distribution 'A'!
+-v0*=There have been errors!
+EOF
+sed -i -e 's/^Distribution: A$/Distribution: B/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 254
+stderr
+*=ERROR: File 'dscfilename_fileversion~.dsc' does not match expectations:
+*=md5 expected: ffff, got: $EMPTYMD5ONLY
+*=size expected: 666, got: 0
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Unexpected empty file 'dscfilename_fileversion~.dsc'!
+-v0*=There have been errors!
+EOF
+#*=Could only find spaces within './temp/dscfilename_fileversion~.dsc'!
+echo "Format: " > i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Missing 'Source' field in dscfilename_fileversion~.dsc!
+-v0*=There have been errors!
+EOF
+echo "Source: nameindsc" > i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Cannot find 'Format' field in dscfilename_fileversion~.dsc!
+-v0*=There have been errors!
+EOF
+echo "Format: 1.0" >> i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Cannot find 'Maintainer' field in dscfilename_fileversion~.dsc!
+-v0*=There have been errors!
+EOF
+echo "Maintainer: guess who <me@nowhere>" >> i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Missing 'Version' field in dscfilename_fileversion~.dsc!
+-v0*=There have been errors!
+EOF
+echo "Standards-Version: 0" >> i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Missing 'Version' field in dscfilename_fileversion~.dsc!
+-v0*=There have been errors!
+EOF
+echo "Version: 0versionindsc" >> i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Missing 'Files' field in 'dscfilename_fileversion~.dsc'!
+-v0*=There have been errors!
+EOF
+echo "Files: " >> i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Name part of filename ('dscfilename') and name within the file ('nameindsc') do not match for 'dscfilename_fileversion~.dsc' in 'test.changes'!
+-v0*=There have been errors!
+EOF
+sed -i 's/^Source: nameindsc$/Source: dscfilename/g' i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Source header 'sourceinchanges' of 'test.changes' and name 'dscfilename' within the file 'dscfilename_fileversion~.dsc' do not match!
+-v0*=There have been errors!
+EOF
+sed -i 's/^Source: sourceinchanges$/Source: dscfilename/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Source version '1:0versioninchanges' of 'test.changes' and version '0versionindsc' within the file 'dscfilename_fileversion~.dsc' do not match!
+-v0*=There have been errors!
+EOF
+sed -i 's/^Version: 1:0versioninchanges$/Version: 0versionindsc/' i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No section found for 'dscfilename' ('dscfilename_fileversion~.dsc' in 'test.changes')!
+-v0*=There have been errors!
+EOF
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S dummy - dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No priority found for 'dscfilename' ('dscfilename_fileversion~.dsc' in 'test.changes')!
+-v0*=There have been errors!
+EOF
+printf "g/^Format:/d\nw\nq\n" | ed -s i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S dummy can't-live-without dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Cannot find 'Format' field in dscfilename_fileversion~.dsc!
+-v0*=There have been errors!
+EOF
+printf "1i\nFormat: 1.0\n.\nw\nq\n" | ed -s i/dscfilename_fileversion~.dsc
+DSCMD5S="$(mdandsize i/dscfilename_fileversion~.dsc )"
+OLDDSCFILENAMEMD5S="$DSCMD5S"
+OLDDSCFILENAMESHA1S="$(sha1andsize i/dscfilename_fileversion~.dsc)"
+OLDDSCFILENAMESHA2S="$(sha2andsize i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S dummy can't-live-without dscfilename_fileversion~.dsc" >> i/test.changes
+checknolog logfile
+testrun - -b . processincoming default 3<<EOF
+returns 0
+stderr
+stdout
+-v2*=Created directory "./pool/dog/d"
+-v2*=Created directory "./pool/dog/d/dscfilename"
+$(ofa 'pool/dog/d/dscfilename/dscfilename_0versionindsc.dsc')
+$(opa 'dscfilename' x 'B' 'dog' 'source' 'dsc')
+-v3*=deleting './i/dscfilename_fileversion~.dsc'...
+-v3*=deleting './i/test.changes'...
+-v0=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog logfile <<EOF
+DATESTR add B dsc dog source dscfilename 0versionindsc
+EOF
+# TODO: check Sources.gz
+cat >i/strangefile <<EOF
+just a line to make it non-empty
+EOF
+cat >i/dscfilename_fileversion~.dsc <<EOF
+Format: 1.0
+Source: dscfilename
+Maintainer: guess who <me@nowhere>
+Standards-Version: 0
+Version: 1:newversion~
+Files:
+ md5sumindsc sizeindsc strangefile
+EOF
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+cat >i/test.changes <<EOF
+Source: dscfilename
+Binary: nothing
+Architecture: source
+Version: 1:newversion~
+Distribution: B
+Files:
+ $DSCMD5S dummy can't-live-without dscfilename_fileversion~.dsc
+EOF
+# this is a stupid error message, needs to get some context
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Error parsing md5 checksum line ' md5sumindsc sizeindsc strangefile' within 'dscfilename_fileversion~.dsc'
+-v0*=There have been errors!
+EOF
+sed -i "s/ md5sumindsc / dddddddddddddddddddddddddddddddd /" i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S dummy unneeded dscfilename_fileversion~.dsc" >> i/test.changes
+# this is a stupid error message, needs to get some context
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=Error parsing md5 checksum line ' dddddddddddddddddddddddddddddddd sizeindsc strangefile' within 'dscfilename_fileversion~.dsc'
+-v0*=There have been errors!
+EOF
+sed -i "s/ sizeindsc / 666 /" i/dscfilename_fileversion~.dsc
+DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)"
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S dummy unneeded dscfilename_fileversion~.dsc" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=file 'strangefile' is needed for 'dscfilename_fileversion~.dsc', not yet registered in the pool and not found in 'test.changes'
+-v0*=There have been errors!
+EOF
+echo " 11111111111111111111111111111111 666 - - strangefile" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+*=No underscore found in file name in '11111111111111111111111111111111 666 - - strangefile'!
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " 11111111111111111111111111111111 666 - - strangefile_xyz" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 249
+stderr
+=Unknown file type: '11111111111111111111111111111111 666 - - strangefile_xyz', assuming source format...
+*=In 'test.changes': file 'strangefile_xyz' not found in the incoming dir!
+-v0*=There have been errors!
+EOF
+mv i/strangefile i/strangefile_xyz
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+=Unknown file type: '11111111111111111111111111111111 666 - - strangefile_xyz', assuming source format...
+*=file 'strangefile' is needed for 'dscfilename_fileversion~.dsc', not yet registered in the pool and not found in 'test.changes'
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " dddddddddddddddddddddddddddddddd 666 - - strangefile_xyz" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 254
+stderr
+=Unknown file type: 'dddddddddddddddddddddddddddddddd 666 - - strangefile_xyz', assuming source format...
+*=ERROR: File 'strangefile_xyz' does not match expectations:
+*=md5 expected: dddddddddddddddddddddddddddddddd, got: 31a1096ff883d52f0c1f39e652d6336f
+*=size expected: 666, got: 33
+-v0*=There have been errors!
+EOF
+printf '$d\nw\nq\n' | ed -s i/dscfilename_fileversion~.dsc
+echo " 31a1096ff883d52f0c1f39e652d6336f 33 strangefile_xyz" >> i/dscfilename_fileversion~.dsc
+DSCMD5S="$(mdandsize i/dscfilename_fileversion~.dsc)"
+DSCSHA1S="$(sha1andsize i/dscfilename_fileversion~.dsc)"
+DSCSHA2S="$(sha2andsize i/dscfilename_fileversion~.dsc)"
+DSCFILENAMEMD5S="$DSCMD5S"
+DSCFILENAMESHA1S="$DSCSHA1S"
+DSCFILENAMESHA2S="$DSCSHA2S"
+printf '$-1,$d\nw\nq\n' | ed -s i/test.changes
+echo " $DSCMD5S dummy unneeded dscfilename_fileversion~.dsc" >> i/test.changes
+echo " 33a1096ff883d52f0c1f39e652d6336f 33 - - strangefile_xyz" >> i/test.changes
+testrun - -b . processincoming default 3<<EOF
+returns 255
+stderr
+=Unknown file type: '33a1096ff883d52f0c1f39e652d6336f 33 - - strangefile_xyz', assuming source format...
+*=file 'strangefile_xyz' has conflicting checksums listed in 'test.changes' and 'dscfilename_fileversion~.dsc'!
+-v0*=There have been errors!
+EOF
+find pool -type f | LC_ALL=C sort -f > results
+cat > results.expected <<EOF
+pool/cat/b/bird/bird-addons_1_all.deb
+pool/cat/b/bird/bird_1.dsc
+pool/cat/b/bird/bird_1.tar.gz
+pool/cat/b/bird/bird_1_abacus.deb
+pool/dog/b/bird/bird-addons_1_all.deb
+pool/dog/b/bird/bird_1.dsc
+pool/dog/b/bird/bird_1.tar.gz
+pool/dog/b/bird/bird_1_abacus.deb
+pool/dog/d/dscfilename/dscfilename_0versionindsc.dsc
+pool/dog/s/sourceindeb/indebname_0versionindeb~1_all.deb
+EOF
+dodiff results.expected results
+find dists -type f | LC_ALL=C sort -f > results
+cat > results.expected <<EOF
+dists/A/cat/binary-abacus/Packages
+dists/A/cat/binary-abacus/Packages.gz
+dists/A/cat/binary-abacus/Release
+dists/A/cat/binary-calculator/Packages
+dists/A/cat/binary-calculator/Packages.gz
+dists/A/cat/binary-calculator/Release
+dists/A/dog/binary-abacus/Packages
+dists/A/dog/binary-abacus/Packages.gz
+dists/A/dog/binary-abacus/Release
+dists/A/dog/binary-calculator/Packages
+dists/A/dog/binary-calculator/Packages.gz
+dists/A/dog/binary-calculator/Release
+dists/A/Release
+dists/B/cat/binary-abacus/Packages
+dists/B/cat/binary-abacus/Packages.gz
+dists/B/cat/binary-abacus/Release
+dists/B/cat/Contents-abacus.gz
+dists/B/cat/source/Release
+dists/B/cat/source/Sources.gz
+dists/B/Contents-abacus.gz
+dists/B/dog/binary-abacus/Packages
+dists/B/dog/binary-abacus/Packages.gz
+dists/B/dog/binary-abacus/Release
+dists/B/dog/Contents-abacus.gz
+dists/B/dog/source/Release
+dists/B/dog/source/Sources.gz
+dists/B/Release
+EOF
+dodiff results.expected results
+gunzip -c dists/B/dog/source/Sources.gz | sed -e 's/ \+$//' > results
+withoutchecksums pool/dog/b/bird/bird_1.dsc | sed -e 's/ \+$//' >bird.preprocessed
+ed -s bird.preprocessed <<EOF
+H
+/^Source:/ m 0
+s/^Source: /Package: /
+/^Files:/ kf
+'f i
+Priority: superfluous
+Section: tasty
+Directory: pool/dog/b/bird
+.
+'f a
+ $BIRDDSCMD5S bird_1.dsc
+.
+$ a
+Checksums-Sha1:
+ $BIRDDSCSHA1S bird_1.dsc
+ $BIRDTARSHA1S bird_1.tar.gz
+Checksums-Sha256:
+ $BIRDDSCSHA2S bird_1.dsc
+ $BIRDTARSHA2S bird_1.tar.gz
+
+.
+w
+q
+EOF
+cat bird.preprocessed - > results.expected <<EOF
+Package: dscfilename
+Format: 1.0
+Maintainer: guess who <me@nowhere>
+Standards-Version: 0
+Version: 0versionindsc
+Priority: can't-live-without
+Section: dummy
+Directory: pool/dog/d/dscfilename
+Files:
+ $OLDDSCFILENAMEMD5S dscfilename_0versionindsc.dsc
+Checksums-Sha1:
+ $OLDDSCFILENAMESHA1S dscfilename_0versionindsc.dsc
+Checksums-Sha256:
+ $OLDDSCFILENAMESHA2S dscfilename_0versionindsc.dsc
+
+EOF
+dodiff results.expected results
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+printf '$d\nw\nq\n' | ed -s i/test.changes
+echo " 31a1096ff883d52f0c1f39e652d6336f 33 - - strangefile_xyz" >> i/test.changes
+checknolog logfile
+testrun - -b . processincoming default 3<<EOF
+returns 0
+stderr
+=Unknown file type: '31a1096ff883d52f0c1f39e652d6336f 33 - - strangefile_xyz', assuming source format...
+stdout
+$(ofa 'pool/dog/d/dscfilename/dscfilename_newversion~.dsc')
+$(ofa 'pool/dog/d/dscfilename/strangefile_xyz')
+$(opu 'dscfilename' x x 'B' 'dog' 'source' 'dsc')
+-v3*=deleting './i/dscfilename_fileversion~.dsc'...
+-v3*=deleting './i/test.changes'...
+-v3*=deleting './i/strangefile_xyz'...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'B|dog|abacus'...
+-v6*= looking for changes in 'B|dog|source'...
+-v6*= replacing './dists/B/dog/source/Sources' (gzipped)
+-v6*= looking for changes in 'B|cat|abacus'...
+-v6*= looking for changes in 'B|cat|source'...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/dog/d/dscfilename/dscfilename_0versionindsc.dsc')
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog logfile <<EOF
+DATESTR replace B dsc dog source dscfilename 1:newversion~ 0versionindsc
+EOF
+
+find pool -type f | LC_ALL=C sort -f > results
+cat > results.expected <<EOF
+pool/cat/b/bird/bird-addons_1_all.deb
+pool/cat/b/bird/bird_1.dsc
+pool/cat/b/bird/bird_1.tar.gz
+pool/cat/b/bird/bird_1_abacus.deb
+pool/dog/b/bird/bird-addons_1_all.deb
+pool/dog/b/bird/bird_1.dsc
+pool/dog/b/bird/bird_1.tar.gz
+pool/dog/b/bird/bird_1_abacus.deb
+pool/dog/d/dscfilename/dscfilename_newversion~.dsc
+pool/dog/d/dscfilename/strangefile_xyz
+pool/dog/s/sourceindeb/indebname_0versionindeb~1_all.deb
+EOF
+dodiff results.expected results
+find dists -type f | LC_ALL=C sort -f > results
+cat > results.expected <<EOF
+dists/A/cat/binary-abacus/Packages
+dists/A/cat/binary-abacus/Packages.gz
+dists/A/cat/binary-abacus/Release
+dists/A/cat/binary-calculator/Packages
+dists/A/cat/binary-calculator/Packages.gz
+dists/A/cat/binary-calculator/Release
+dists/A/dog/binary-abacus/Packages
+dists/A/dog/binary-abacus/Packages.gz
+dists/A/dog/binary-abacus/Release
+dists/A/dog/binary-calculator/Packages
+dists/A/dog/binary-calculator/Packages.gz
+dists/A/dog/binary-calculator/Release
+dists/A/Release
+dists/B/cat/binary-abacus/Packages
+dists/B/cat/binary-abacus/Packages.gz
+dists/B/cat/binary-abacus/Release
+dists/B/cat/Contents-abacus.gz
+dists/B/cat/source/Release
+dists/B/cat/source/Sources.gz
+dists/B/Contents-abacus.gz
+dists/B/dog/binary-abacus/Packages
+dists/B/dog/binary-abacus/Packages.gz
+dists/B/dog/binary-abacus/Release
+dists/B/dog/Contents-abacus.gz
+dists/B/dog/source/Release
+dists/B/dog/source/Sources.gz
+dists/B/Release
+EOF
+dodiff results.expected results
+gunzip -c dists/B/dog/source/Sources.gz | sed -e 's/ \+$//' > results
+cat bird.preprocessed - > results.expected <<EOF
+Package: dscfilename
+Format: 1.0
+Maintainer: guess who <me@nowhere>
+Standards-Version: 0
+Version: 1:newversion~
+Priority: unneeded
+Section: dummy
+Directory: pool/dog/d/dscfilename
+Files:
+ $DSCFILENAMEMD5S dscfilename_newversion~.dsc
+ 31a1096ff883d52f0c1f39e652d6336f 33 strangefile_xyz
+Checksums-Sha1:
+ $DSCFILENAMESHA1S dscfilename_newversion~.dsc
+ 4453da6ca46859b207c5b55af6213ff8369cd383 33 strangefile_xyz
+Checksums-Sha256:
+ $DSCFILENAMESHA2S dscfilename_newversion~.dsc
+ c40fcf711220c0ce210159d43b22f1f59274819bf3575e11cc0057ed1988a575 33 strangefile_xyz
+
+EOF
+dodiff results.expected results
+
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+rm -r conf db pool dists i pkg logs temp
+rm results.expected results.log.expected results bird.preprocessed
+testsuccess
diff --git a/tests/various2.test b/tests/various2.test
new file mode 100644
index 0000000..c67184e
--- /dev/null
+++ b/tests/various2.test
@@ -0,0 +1,2462 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+echo "Running various other old test..."
+mkdir -p conf logs
+cat > conf/options <<CONFEND
+outhook $SRCDIR/docs/outstore.py
+export changed
+CONFEND
+export REPREPRO_OUT_DB=db.out
+cat > conf/distributions <<CONFEND
+Codename: test1
+Architectures: abacus source
+Components: stupid ugly
+Update: Test2toTest1
+DebIndices: Packages Release . .gz .bz2
+UDebIndices: Packages .gz .bz2
+DscIndices: Sources Release .gz .bz2
+Tracking: keep includechanges includebyhand
+Log: log1
+
+Codename: test2
+Architectures: abacus coal source
+Components: stupid ugly
+Origin: Brain
+Label: Only a test
+Suite: broken
+Version: 9999999.02
+DebIndices: Packages Release . .gz $SRCDIR/docs/bzip.example testhook
+UDebIndices: Packages .gz
+DscIndices: Sources Release . .gz $SRCDIR/docs/bzip.example testhook
+Description: test with all fields set
+DebOverride: binoverride
+DscOverride: srcoverride
+Log: log2
+CONFEND
+
+cat > conf/testhook <<'EOF'
+#!/bin/sh
+echo "testhook got $#: '$1' '$2' '$3' '$4'"
+if test -f "$1/$3.deprecated" ; then
+ echo "$3.deprecated.tobedeleted" >&3
+fi
+echo "super-compressed" > "$1/$3.super.new"
+echo "$3.super.new" >&3
+EOF
+chmod a+x conf/testhook
+
+mkdir -p "dists/test2/stupid/binary-abacus"
+touch "dists/test2/stupid/binary-abacus/Packages.deprecated"
+cat > logs/fake.outlog << EOF
+BEGIN-DISTRIBUTION test2 dists/test2
+DISTFILE dists/test2 stupid/binary-abacus/Packages.deprecated dists/test2/stupid/binary-abacus/Packages.deprecated
+END-DISTRIBUTION test2 dists/test2
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py logs/fake.outlog
+rm logs/fake.outlog
+
+set -v
+checknolog logfile
+testrun - -b . export test1 test2 3<<EOF
+stdout
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'new'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'new'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'new'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'new'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'new'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'new'
+$(odb)
+-v1*=Exporting test2...
+-v6*= exporting 'test2|stupid|abacus'...
+-v6*= creating './dists/test2/stupid/binary-abacus/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v11*=Exporthook successfully returned!
+-v2*=Created directory "./dists/test2/stupid/binary-coal"
+-v6*= exporting 'test2|stupid|coal'...
+-v6*= creating './dists/test2/stupid/binary-coal/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v2*=Created directory "./dists/test2/stupid/source"
+-v6*= exporting 'test2|stupid|source'...
+-v6*= creating './dists/test2/stupid/source/Sources' (uncompressed,gzipped,script: bzip.example,testhook)
+-v2*=Created directory "./dists/test2/ugly"
+-v2*=Created directory "./dists/test2/ugly/binary-abacus"
+-v6*= exporting 'test2|ugly|abacus'...
+-v6*= creating './dists/test2/ugly/binary-abacus/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v2*=Created directory "./dists/test2/ugly/binary-coal"
+-v6*= exporting 'test2|ugly|coal'...
+-v6*= creating './dists/test2/ugly/binary-coal/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v2*=Created directory "./dists/test2/ugly/source"
+-v6*= exporting 'test2|ugly|source'...
+-v6*= creating './dists/test2/ugly/source/Sources' (uncompressed,gzipped,script: bzip.example,testhook)
+-v1*=Exporting test1...
+-v2*=Created directory "./dists/test1"
+-v2*=Created directory "./dists/test1/stupid"
+-v2*=Created directory "./dists/test1/stupid/binary-abacus"
+-v6*= exporting 'test1|stupid|abacus'...
+-v6*= creating './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v2*=Created directory "./dists/test1/stupid/source"
+-v6*= exporting 'test1|stupid|source'...
+-v6*= creating './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v2*=Created directory "./dists/test1/ugly"
+-v2*=Created directory "./dists/test1/ugly/binary-abacus"
+-v6*= exporting 'test1|ugly|abacus'...
+-v6*= creating './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v2*=Created directory "./dists/test1/ugly/source"
+-v6*= exporting 'test1|ugly|source'...
+-v6*= creating './dists/test1/ugly/source/Sources' (gzipped,bzip2ed)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+dodo test '!' -e "dists/test2/stupid/binary-abacus/Packages.deprecated"
+test -f dists/test1/Release
+test -f dists/test2/Release
+
+cat > dists/test1/stupid/binary-abacus/Release.expected <<END
+Component: stupid
+Architecture: abacus
+END
+dodiff dists/test1/stupid/binary-abacus/Release.expected dists/test1/stupid/binary-abacus/Release
+rm dists/test1/stupid/binary-abacus/Release.expected
+cat > dists/test1/ugly/binary-abacus/Release.expected <<END
+Component: ugly
+Architecture: abacus
+END
+dodiff dists/test1/ugly/binary-abacus/Release.expected dists/test1/ugly/binary-abacus/Release
+rm dists/test1/ugly/binary-abacus/Release.expected
+
+echo "super-compressed" > "fakesuper"
+FAKESUPERMD5="$(mdandsize fakesuper)"
+FAKESUPERSHA1="$(sha1andsize fakesuper)"
+FAKESUPERSHA2="$(sha2andsize fakesuper)"
+
+cat > Release.test1.expected <<END
+Codename: test1
+Date: normalized
+Architectures: abacus
+Components: stupid ugly
+MD5Sum:
+ $EMPTYMD5 stupid/binary-abacus/Packages
+ $EMPTYGZMD5 stupid/binary-abacus/Packages.gz
+ $EMPTYBZ2MD5 stupid/binary-abacus/Packages.bz2
+ $(mdandsize dists/test1/stupid/binary-abacus/Release) stupid/binary-abacus/Release
+ $EMPTYMD5 stupid/source/Sources
+ $EMPTYGZMD5 stupid/source/Sources.gz
+ $EMPTYBZ2MD5 stupid/source/Sources.bz2
+ e38c7da133734e1fd68a7e344b94fe96 39 stupid/source/Release
+ $EMPTYMD5 ugly/binary-abacus/Packages
+ $EMPTYGZMD5 ugly/binary-abacus/Packages.gz
+ $EMPTYBZ2MD5 ugly/binary-abacus/Packages.bz2
+ $(mdandsize dists/test1/ugly/binary-abacus/Release) ugly/binary-abacus/Release
+ $EMPTYMD5 ugly/source/Sources
+ $EMPTYGZMD5 ugly/source/Sources.gz
+ $EMPTYBZ2MD5 ugly/source/Sources.bz2
+ ed4ee9aa5d080f67926816133872fd02 37 ugly/source/Release
+SHA1:
+ $(sha1andsize dists/test1/stupid/binary-abacus/Packages) stupid/binary-abacus/Packages
+ $EMPTYGZSHA1 stupid/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA1 stupid/binary-abacus/Packages.bz2
+ $(sha1andsize dists/test1/stupid/binary-abacus/Release) stupid/binary-abacus/Release
+ $EMPTYSHA1 stupid/source/Sources
+ $EMPTYGZSHA1 stupid/source/Sources.gz
+ $EMPTYBZ2SHA1 stupid/source/Sources.bz2
+ ff71705a4cadaec55de5a6ebbfcd726caf2e2606 39 stupid/source/Release
+ $EMPTYSHA1 ugly/binary-abacus/Packages
+ $EMPTYGZSHA1 ugly/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA1 ugly/binary-abacus/Packages.bz2
+ $(sha1andsize dists/test1/ugly/binary-abacus/Release) ugly/binary-abacus/Release
+ $EMPTYSHA1 ugly/source/Sources
+ $EMPTYGZSHA1 ugly/source/Sources.gz
+ $EMPTYBZ2SHA1 ugly/source/Sources.bz2
+ b297876e9d6ee3ee6083160003755047ede22a96 37 ugly/source/Release
+SHA256:
+ $(sha2andsize dists/test1/stupid/binary-abacus/Packages) stupid/binary-abacus/Packages
+ $EMPTYGZSHA2 stupid/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA2 stupid/binary-abacus/Packages.bz2
+ $(sha2andsize dists/test1/stupid/binary-abacus/Release) stupid/binary-abacus/Release
+ $EMPTYSHA2 stupid/source/Sources
+ $EMPTYGZSHA2 stupid/source/Sources.gz
+ $EMPTYBZ2SHA2 stupid/source/Sources.bz2
+ b88352d8e0227a133e2236c3a8961581562ee285980fc20bb79626d0d208aa51 39 stupid/source/Release
+ $EMPTYSHA2 ugly/binary-abacus/Packages
+ $EMPTYGZSHA2 ugly/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA2 ugly/binary-abacus/Packages.bz2
+ $(sha2andsize dists/test1/ugly/binary-abacus/Release) ugly/binary-abacus/Release
+ $EMPTYSHA2 ugly/source/Sources
+ $EMPTYGZSHA2 ugly/source/Sources.gz
+ $EMPTYBZ2SHA2 ugly/source/Sources.bz2
+ edb5450a3f98a140b938c8266b8b998ba8f426c80ac733fe46423665d5770d9f 37 ugly/source/Release
+END
+cat > dists/test2/stupid/binary-abacus/Release.expected <<END
+Archive: broken
+Version: 9999999.02
+Component: stupid
+Origin: Brain
+Label: Only a test
+Architecture: abacus
+Description: test with all fields set
+END
+dodiff dists/test2/stupid/binary-abacus/Release.expected dists/test2/stupid/binary-abacus/Release
+rm dists/test2/stupid/binary-abacus/Release.expected
+cat > dists/test2/ugly/binary-abacus/Release.expected <<END
+Archive: broken
+Version: 9999999.02
+Component: ugly
+Origin: Brain
+Label: Only a test
+Architecture: abacus
+Description: test with all fields set
+END
+dodiff dists/test2/ugly/binary-abacus/Release.expected dists/test2/ugly/binary-abacus/Release
+rm dists/test2/ugly/binary-abacus/Release.expected
+cat > Release.test2.expected <<END
+Origin: Brain
+Label: Only a test
+Suite: broken
+Codename: test2
+Version: 9999999.02
+Date: normalized
+Architectures: abacus coal
+Components: stupid ugly
+Description: test with all fields set
+MD5Sum:
+ $EMPTYMD5 stupid/binary-abacus/Packages
+ $EMPTYGZMD5 stupid/binary-abacus/Packages.gz
+ $EMPTYBZ2MD5 stupid/binary-abacus/Packages.bz2
+ $FAKESUPERMD5 stupid/binary-abacus/Packages.super
+ $(mdandsize dists/test2/stupid/binary-abacus/Release) stupid/binary-abacus/Release
+ $EMPTYMD5 stupid/binary-coal/Packages
+ $EMPTYGZMD5 stupid/binary-coal/Packages.gz
+ $EMPTYBZ2MD5 stupid/binary-coal/Packages.bz2
+ $FAKESUPERMD5 stupid/binary-coal/Packages.super
+ 10ae2f283e1abdd3facfac6ed664035d 144 stupid/binary-coal/Release
+ $EMPTYMD5 stupid/source/Sources
+ $EMPTYGZMD5 stupid/source/Sources.gz
+ $EMPTYBZ2MD5 stupid/source/Sources.bz2
+ $FAKESUPERMD5 stupid/source/Sources.super
+ b923b3eb1141e41f0b8bb74297ac8a36 146 stupid/source/Release
+ $EMPTYMD5 ugly/binary-abacus/Packages
+ $EMPTYGZMD5 ugly/binary-abacus/Packages.gz
+ $EMPTYBZ2MD5 ugly/binary-abacus/Packages.bz2
+ $FAKESUPERMD5 ugly/binary-abacus/Packages.super
+ $(mdandsize dists/test2/ugly/binary-abacus/Release) ugly/binary-abacus/Release
+ $EMPTYMD5 ugly/binary-coal/Packages
+ $EMPTYGZMD5 ugly/binary-coal/Packages.gz
+ $EMPTYBZ2MD5 ugly/binary-coal/Packages.bz2
+ $FAKESUPERMD5 ugly/binary-coal/Packages.super
+ 7a05de3b706d08ed06779d0ec2e234e9 142 ugly/binary-coal/Release
+ $EMPTYMD5 ugly/source/Sources
+ $EMPTYGZMD5 ugly/source/Sources.gz
+ $EMPTYBZ2MD5 ugly/source/Sources.bz2
+ $FAKESUPERMD5 ugly/source/Sources.super
+ e73a8a85315766763a41ad4dc6744bf5 144 ugly/source/Release
+SHA1:
+ $EMPTYSHA1 stupid/binary-abacus/Packages
+ $EMPTYGZSHA1 stupid/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA1 stupid/binary-abacus/Packages.bz2
+ $FAKESUPERSHA1 stupid/binary-abacus/Packages.super
+ $(sha1andsize dists/test2/stupid/binary-abacus/Release) stupid/binary-abacus/Release
+ $EMPTYSHA1 stupid/binary-coal/Packages
+ $EMPTYGZSHA1 stupid/binary-coal/Packages.gz
+ $EMPTYBZ2SHA1 stupid/binary-coal/Packages.bz2
+ $FAKESUPERSHA1 stupid/binary-coal/Packages.super
+ $(sha1andsize dists/test2/stupid/binary-coal/Release) stupid/binary-coal/Release
+ $EMPTYSHA1 stupid/source/Sources
+ $EMPTYGZSHA1 stupid/source/Sources.gz
+ $EMPTYBZ2SHA1 stupid/source/Sources.bz2
+ $FAKESUPERSHA1 stupid/source/Sources.super
+ $(sha1andsize dists/test2/stupid/source/Release) stupid/source/Release
+ $EMPTYSHA1 ugly/binary-abacus/Packages
+ $EMPTYGZSHA1 ugly/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA1 ugly/binary-abacus/Packages.bz2
+ $FAKESUPERSHA1 ugly/binary-abacus/Packages.super
+ $(sha1andsize dists/test2/ugly/binary-abacus/Release) ugly/binary-abacus/Release
+ $EMPTYSHA1 ugly/binary-coal/Packages
+ $EMPTYGZSHA1 ugly/binary-coal/Packages.gz
+ $EMPTYBZ2SHA1 ugly/binary-coal/Packages.bz2
+ $FAKESUPERSHA1 ugly/binary-coal/Packages.super
+ $(sha1andsize dists/test2/ugly/binary-coal/Release) ugly/binary-coal/Release
+ $EMPTYSHA1 ugly/source/Sources
+ $EMPTYGZSHA1 ugly/source/Sources.gz
+ $EMPTYBZ2SHA1 ugly/source/Sources.bz2
+ $FAKESUPERSHA1 ugly/source/Sources.super
+ $(sha1andsize dists/test2/ugly/source/Release) ugly/source/Release
+SHA256:
+ $EMPTYSHA2 stupid/binary-abacus/Packages
+ $EMPTYGZSHA2 stupid/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA2 stupid/binary-abacus/Packages.bz2
+ $FAKESUPERSHA2 stupid/binary-abacus/Packages.super
+ $(sha2andsize dists/test2/stupid/binary-abacus/Release) stupid/binary-abacus/Release
+ $EMPTYSHA2 stupid/binary-coal/Packages
+ $EMPTYGZSHA2 stupid/binary-coal/Packages.gz
+ $EMPTYBZ2SHA2 stupid/binary-coal/Packages.bz2
+ $FAKESUPERSHA2 stupid/binary-coal/Packages.super
+ $(sha2andsize dists/test2/stupid/binary-coal/Release) stupid/binary-coal/Release
+ $EMPTYSHA2 stupid/source/Sources
+ $EMPTYGZSHA2 stupid/source/Sources.gz
+ $EMPTYBZ2SHA2 stupid/source/Sources.bz2
+ $FAKESUPERSHA2 stupid/source/Sources.super
+ $(sha2andsize dists/test2/stupid/source/Release) stupid/source/Release
+ $EMPTYSHA2 ugly/binary-abacus/Packages
+ $EMPTYGZSHA2 ugly/binary-abacus/Packages.gz
+ $EMPTYBZ2SHA2 ugly/binary-abacus/Packages.bz2
+ $FAKESUPERSHA2 ugly/binary-abacus/Packages.super
+ $(sha2andsize dists/test2/ugly/binary-abacus/Release) ugly/binary-abacus/Release
+ $EMPTYSHA2 ugly/binary-coal/Packages
+ $EMPTYGZSHA2 ugly/binary-coal/Packages.gz
+ $EMPTYBZ2SHA2 ugly/binary-coal/Packages.bz2
+ $FAKESUPERSHA2 ugly/binary-coal/Packages.super
+ $(sha2andsize dists/test2/ugly/binary-coal/Release) ugly/binary-coal/Release
+ $EMPTYSHA2 ugly/source/Sources
+ $EMPTYGZSHA2 ugly/source/Sources.gz
+ $EMPTYBZ2SHA2 ugly/source/Sources.bz2
+ $FAKESUPERSHA2 ugly/source/Sources.super
+ $(sha2andsize dists/test2/ugly/source/Release) ugly/source/Release
+END
+normalizerelease dists/test1/Release > dists/test1/Release.normalized
+normalizerelease dists/test2/Release > dists/test2/Release.normalized
+dodiff Release.test1.expected dists/test1/Release.normalized
+dodiff Release.test2.expected dists/test2/Release.normalized
+rm dists/*/Release.normalized
+
+PACKAGE=simple EPOCH="" VERSION=1 REVISION="" SECTION="stupid/base" genpackage.sh
+checknolog log1
+testrun - -b . include test1 test.changes 3<<EOF
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/stupid"
+-v2*=Created directory "./pool/stupid/s"
+-v2*=Created directory "./pool/stupid/s/simple"
+$(ofa 'pool/stupid/s/simple/simple-addons_1_all.deb')
+$(ofa 'pool/stupid/s/simple/simple_1_abacus.deb')
+$(ofa 'pool/stupid/s/simple/simple_1.tar.gz')
+$(ofa 'pool/stupid/s/simple/simple_1.dsc')
+$(ofa 'pool/stupid/s/simple/simple_1_source+all+abacus.changes')
+$(opa 'simple-addons' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa 'simple' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa 'simple' unset 'test1' 'stupid' 'source' 'dsc')
+$(ota 'test1' 'simple')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+echo returned: $?
+checklog log1 << EOF
+DATESTR add test1 deb stupid abacus simple-addons 1
+DATESTR add test1 deb stupid abacus simple 1
+DATESTR add test1 dsc stupid source simple 1
+EOF
+
+PACKAGE=bloat+-0a9z.app EPOCH=99: VERSION=0.9-A:Z+a:z REVISION=-0+aA.9zZ SECTION="ugly/base" genpackage.sh
+testrun - -b . include test1 test.changes 3<<EOF
+stdout
+-v2*=Created directory "./pool/ugly"
+-v2*=Created directory "./pool/ugly/b"
+-v2*=Created directory "./pool/ugly/b/bloat+-0a9z.app"
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:0.9-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes')
+$(opa 'bloat+-0a9z.app-addons' x 'test1' 'ugly' 'abacus' 'deb')
+$(opa 'bloat+-0a9z.app' x 'test1' 'ugly' 'abacus' 'deb')
+$(opa 'bloat+-0a9z.app' unset 'test1' 'ugly' 'source' 'dsc')
+$(ota 'test1' 'bloat+-0a9z.app')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= replacing './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|source'...
+-v6*= replacing './dists/test1/ugly/source/Sources' (gzipped,bzip2ed)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+echo returned: $?
+checklog log1 <<EOF
+DATESTR add test1 deb ugly abacus bloat+-0a9z.app-addons 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR add test1 deb ugly abacus bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR add test1 dsc ugly source bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+
+testrun - -b . -Tdsc remove test1 simple 3<<EOF
+stdout
+$(opd 'simple' unset test1 stupid source dsc)
+=[tracking_get test1 simple 1]
+=[tracking_get found test1 simple 1]
+=[tracking_save test1 simple 1]
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR remove test1 dsc stupid source simple 1
+EOF
+testrun - -b . -Tdeb remove test1 bloat+-0a9z.app 3<<EOF
+stdout
+$(opd 'bloat+-0a9z.app' unset test1 ugly abacus deb)
+=[tracking_get test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+=[tracking_get found test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+=[tracking_save test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= replacing './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR remove test1 deb ugly abacus bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+testrun - -b . -A source remove test1 bloat+-0a9z.app 3<<EOF
+stdout
+$(opd 'bloat+-0a9z.app' unset test1 ugly source dsc)
+=[tracking_get test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+=[tracking_get found test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+=[tracking_save test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+-v6*= replacing './dists/test1/ugly/source/Sources' (gzipped,bzip2ed)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR remove test1 dsc ugly source bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+testrun - -b . -A abacus remove test1 simple 3<<EOF
+stdout
+$(opd 'simple' unset test1 stupid abacus deb)
+=[tracking_get test1 simple 1]
+=[tracking_get found test1 simple 1]
+=[tracking_save test1 simple 1]
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR remove test1 deb stupid abacus simple 1
+EOF
+testrun - -b . -C ugly remove test1 bloat+-0a9z.app-addons 3<<EOF
+stdout
+$(opd 'bloat+-0a9z.app-addons' unset test1 ugly abacus deb)
+=[tracking_get test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+=[tracking_get found test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+=[tracking_save test1 bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ]
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= replacing './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR remove test1 deb ugly abacus bloat+-0a9z.app-addons 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+testrun - -b . -C stupid remove test1 simple-addons 3<<EOF
+stdout
+$(opd 'simple-addons' unset test1 stupid abacus deb)
+=[tracking_get test1 simple 1]
+=[tracking_get found test1 simple 1]
+=[tracking_save test1 simple 1]
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR remove test1 deb stupid abacus simple-addons 1
+EOF
+CURDATE="`TZ=GMT LC_ALL=C date +'%a, %d %b %Y %H:%M:%S UTC'`"
+normalizerelease dists/test1/Release > Release.test1.normalized
+dodiff Release.test1.expected Release.test1.normalized
+rm Release.test1.normalized
+
+cat > conf/srcoverride <<END
+simple Section ugly/games
+simple Priority optional
+simple Maintainer simple.source.maintainer
+bloat+-0a9z.app Section stupid/X11
+bloat+-0a9z.app Priority optional
+bloat+-0a9z.app X-addition totally-unsupported
+bloat+-0a9z.app Maintainer bloat.source.maintainer
+END
+cat > conf/binoverride <<END
+simple Maintainer simple.maintainer
+simple Section ugly/base
+simple Priority optional
+simple-addons Section ugly/addons
+simple-addons Priority optional
+simple-addons Maintainer simple.add.maintainer
+bloat+-0a9z.app Maintainer bloat.maintainer
+bloat+-0a9z.app Section stupid/base
+bloat+-0a9z.app Priority optional
+bloat+-0a9z.app-addons Section stupid/addons
+bloat+-0a9z.app-addons Maintainer bloat.add.maintainer
+bloat+-0a9z.app-addons Priority optional
+END
+
+testrun - -b . -Tdsc -A source includedsc test2 simple_1.dsc 3<<EOF
+stderr
+-v1=simple_1.dsc: component guessed as 'ugly'
+stdout
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'change'
+-v2*=Created directory "./pool/ugly/s"
+-v2*=Created directory "./pool/ugly/s/simple"
+$(ofa 'pool/ugly/s/simple/simple_1.dsc')
+$(ofa 'pool/ugly/s/simple/simple_1.tar.gz')
+$(opa 'simple' unset 'test2' 'ugly' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v11*=Exporthook successfully returned!
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+-v6*= replacing './dists/test2/ugly/source/Sources' (uncompressed,gzipped,script: bzip.example,testhook)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR add test2 dsc ugly source simple 1
+EOF
+testrun - -b . -Tdsc -A source includedsc test2 bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc 3<<EOF
+stderr
+-v1=bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc: component guessed as 'stupid'
+stdout
+-v2*=Created directory "./pool/stupid/b"
+-v2*=Created directory "./pool/stupid/b/bloat+-0a9z.app"
+$(ofa 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofa 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(opa 'bloat+-0a9z.app' unset 'test2' 'stupid' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v11*=Exporthook successfully returned!
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= replacing './dists/test2/stupid/source/Sources' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'change'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'old'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR add test2 dsc stupid source bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+testrun - -b . -Tdeb -A abacus includedeb test2 simple_1_abacus.deb 3<<EOF
+stderr
+-v1=simple_1_abacus.deb: component guessed as 'ugly'
+stdout
+$(ofa 'pool/ugly/s/simple/simple_1_abacus.deb')
+$(opa 'simple' x 'test2' 'ugly' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v11*=Exporthook successfully returned!
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= replacing './dists/test2/ugly/binary-abacus/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'change'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'old'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR add test2 deb ugly abacus simple 1
+EOF
+testrun - -b . -Tdeb -A coal includedeb test2 simple-addons_1_all.deb 3<<EOF
+stderr
+-v1=simple-addons_1_all.deb: component guessed as 'ugly'
+stdout
+$(ofa 'pool/ugly/s/simple/simple-addons_1_all.deb')
+$(opa 'simple-addons' x 'test2' 'ugly' 'coal' 'deb')
+-v0=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v11*=Exporthook successfully returned!
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= replacing './dists/test2/ugly/binary-coal/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|ugly|source'...
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'change'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'old'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR add test2 deb ugly coal simple-addons 1
+EOF
+testrun - -b . -Tdeb -A abacus includedeb test2 bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb 3<<EOF
+stderr
+-v1=bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb: component guessed as 'stupid'
+stdout
+$(ofa 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(opa 'bloat+-0a9z.app' x 'test2' 'stupid' 'abacus' 'deb')
+-v0=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v6*= replacing './dists/test2/stupid/binary-abacus/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v11*=Exporthook successfully returned!
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'change'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'old'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR add test2 deb stupid abacus bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+testrun - -b . -Tdeb -A coal includedeb test2 bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb 3<<EOF
+stderr
+-v1=bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb: component guessed as 'stupid'
+stdout
+$(ofa 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb')
+$(opa 'bloat+-0a9z.app-addons' x 'test2' 'stupid' 'coal' 'deb')
+-v0=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v11*=Exporthook successfully returned!
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= replacing './dists/test2/stupid/binary-coal/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'change'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'old'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR add test2 deb stupid coal bloat+-0a9z.app-addons 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+find dists/test2/ \( -name "Packages.gz" -o -name "Sources.gz" \) -print0 | xargs -0 zgrep '^\(Package\|Maintainer\|Section\|Priority\): ' | sort > results
+cat >results.expected <<END
+dists/test2/stupid/binary-abacus/Packages.gz:Maintainer: bloat.maintainer
+dists/test2/stupid/binary-abacus/Packages.gz:Package: bloat+-0a9z.app
+dists/test2/stupid/binary-abacus/Packages.gz:Priority: optional
+dists/test2/stupid/binary-abacus/Packages.gz:Section: stupid/base
+dists/test2/stupid/binary-coal/Packages.gz:Maintainer: bloat.add.maintainer
+dists/test2/stupid/binary-coal/Packages.gz:Package: bloat+-0a9z.app-addons
+dists/test2/stupid/binary-coal/Packages.gz:Priority: optional
+dists/test2/stupid/binary-coal/Packages.gz:Section: stupid/addons
+dists/test2/stupid/source/Sources.gz:Maintainer: bloat.source.maintainer
+dists/test2/stupid/source/Sources.gz:Package: bloat+-0a9z.app
+dists/test2/stupid/source/Sources.gz:Priority: optional
+dists/test2/stupid/source/Sources.gz:Section: stupid/X11
+dists/test2/ugly/binary-abacus/Packages.gz:Maintainer: simple.maintainer
+dists/test2/ugly/binary-abacus/Packages.gz:Package: simple
+dists/test2/ugly/binary-abacus/Packages.gz:Priority: optional
+dists/test2/ugly/binary-abacus/Packages.gz:Section: ugly/base
+dists/test2/ugly/binary-coal/Packages.gz:Maintainer: simple.add.maintainer
+dists/test2/ugly/binary-coal/Packages.gz:Package: simple-addons
+dists/test2/ugly/binary-coal/Packages.gz:Priority: optional
+dists/test2/ugly/binary-coal/Packages.gz:Section: ugly/addons
+dists/test2/ugly/source/Sources.gz:Maintainer: simple.source.maintainer
+dists/test2/ugly/source/Sources.gz:Package: simple
+dists/test2/ugly/source/Sources.gz:Priority: optional
+dists/test2/ugly/source/Sources.gz:Section: ugly/games
+END
+dodiff results.expected results
+rm results
+testout "" -b . listfilter test2 'Source(==simple)|(!Source,Package(==simple))'
+ls -la results
+cat > results.expected << END
+test2|ugly|abacus: simple 1
+test2|ugly|coal: simple-addons 1
+test2|ugly|source: simple 1
+END
+dodiff results.expected results
+testout "" -b . listfilter test2 'Source(==bloat+-0a9z.app)|(!Source,Package(==bloat+-0a9z.app))'
+cat > results.expected << END
+test2|stupid|abacus: bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+test2|stupid|coal: bloat+-0a9z.app-addons 99:0.9-A:Z+a:z-0+aA.9zZ
+test2|stupid|source: bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+END
+dodiff results.expected results
+
+cat >conf/updates <<END
+Name: Test2toTest1
+Method: copy:$WORKDIR
+VerifyRelease: blindtrust
+Suite: test2
+Architectures: coal>abacus abacus source
+FilterFormula: Priority(==optional),Package(>=alpha),Package(<=zeta)
+FilterList: error list
+ListHook: /bin/cp
+END
+
+cat >conf/list <<END
+simple-addons install
+bloat+-0a9z.app install
+simple install
+bloat+-0a9z.app-addons install
+END
+
+cp dists/test2/Release Release.test2.safe
+ed -s dists/test2/Release <<EOF
+g/stupid.source.Sources/s/^ ................................ / ffffffffffffffffffffffffffffffff /
+w
+q
+EOF
+
+testrun - -b . update test1 3<<EOF
+returns 254
+stderr
+=WARNING: Single-Instance not yet supported!
+=aptmethod error receiving 'copy:$WORKDIR/dists/test2/InRelease':
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/Release'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/ugly/source/Sources.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/ugly/source/Sources.bz2'
+-v2*=Uncompress './lists/Test2toTest1_test2_ugly_Sources.bz2' into './lists/Test2toTest1_test2_ugly_Sources' using '/bin/bunzip2'...
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/ugly/binary-abacus/Packages.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/ugly/binary-abacus/Packages.bz2'
+-v2*=Uncompress './lists/Test2toTest1_test2_ugly_abacus_Packages.bz2' into './lists/Test2toTest1_test2_ugly_abacus_Packages' using '/bin/bunzip2'...
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/ugly/binary-coal/Packages.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/ugly/binary-coal/Packages.bz2'
+-v2*=Uncompress './lists/Test2toTest1_test2_ugly_coal_Packages.bz2' into './lists/Test2toTest1_test2_ugly_coal_Packages' using '/bin/bunzip2'...
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+*=Wrong checksum during receive of 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2':
+*=md5 expected: ffffffffffffffffffffffffffffffff, got: $(md5 dists/test2/stupid/source/Sources.bz2)
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/stupid/binary-abacus/Packages.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/stupid/binary-abacus/Packages.bz2'
+-v2*=Uncompress './lists/Test2toTest1_test2_stupid_abacus_Packages.bz2' into './lists/Test2toTest1_test2_stupid_abacus_Packages' using '/bin/bunzip2'...
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/stupid/binary-coal/Packages.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/stupid/binary-coal/Packages.bz2'
+-v2*=Uncompress './lists/Test2toTest1_test2_stupid_coal_Packages.bz2' into './lists/Test2toTest1_test2_stupid_coal_Packages' using '/bin/bunzip2'...
+-v0*=There have been errors!
+stdout
+-v2*=Created directory "./lists"
+EOF
+cp Release.test2.safe dists/test2/Release
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+ed -s dists/test2/Release <<EOF
+g/stupid.source.Sources/s/^ ........................................ / 1111111111111111111111111111111111111111 /
+w
+q
+EOF
+
+testrun - -b . update test1 3<<EOF
+returns 254
+stderr
+=WARNING: Single-Instance not yet supported!
+=aptmethod error receiving 'copy:$WORKDIR/dists/test2/InRelease':
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/Release'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+*=Wrong checksum during receive of 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2':
+*=sha1 expected: 1111111111111111111111111111111111111111, got: $(sha1 dists/test2/stupid/source/Sources.bz2)
+-v0*=There have been errors!
+stdout
+EOF
+cp Release.test2.safe dists/test2/Release
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+ed -s dists/test2/Release <<EOF
+g/stupid.source.Sources/s/^ ................................................................ / 9999999999999999999999999999999999999999999999999999999999999999 /
+w
+q
+EOF
+
+testrun - -b . update test1 3<<EOF
+returns 254
+stderr
+=WARNING: Single-Instance not yet supported!
+=aptmethod error receiving 'copy:$WORKDIR/dists/test2/InRelease':
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/Release'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+*=Wrong checksum during receive of 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2':
+*=sha256 expected: 9999999999999999999999999999999999999999999999999999999999999999, got: $(sha256 dists/test2/stupid/source/Sources.bz2)
+-v0*=There have been errors!
+stdout
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+cp conf/updates conf/updates.safe
+cat >> conf/updates <<EOF
+IgnoreHashes: sha2
+EOF
+
+testrun - -b . update test1 3<<EOF
+returns 248
+stderr
+*=Error parsing config file ./conf/updates, line 9, column 15:
+*=Unknown flag in IgnoreHashes header.(allowed values: md5, sha1 and sha256)
+*=To ignore unknown fields use --ignore=unknownfield
+-v0*=There have been errors!
+stdout
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+cp conf/updates.safe conf/updates
+cat >> conf/updates <<EOF
+IgnoreHashes: sha1
+EOF
+
+testrun - -b . update test1 3<<EOF
+returns 254
+stderr
+=WARNING: Single-Instance not yet supported!
+=aptmethod error receiving 'copy:$WORKDIR/dists/test2/InRelease':
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/Release'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2'
+*=Wrong checksum during receive of 'copy:$WORKDIR/dists/test2/stupid/source/Sources.bz2':
+*=sha256 expected: 9999999999999999999999999999999999999999999999999999999999999999, got: $(sha256 dists/test2/stupid/source/Sources.bz2)
+-v0*=There have been errors!
+stdout
+EOF
+
+cp conf/updates.safe conf/updates
+cat >> conf/updates <<EOF
+IgnoreHashes: sha256
+EOF
+
+testrun - -b . update test1 3<<EOF
+stderr
+=WARNING: Single-Instance not yet supported!
+=aptmethod error receiving 'copy:$WORKDIR/dists/test2/InRelease':
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/Release'
+-v2*=Uncompress './lists/Test2toTest1_test2_stupid_Sources.bz2' into './lists/Test2toTest1_test2_stupid_Sources'...
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_ugly_Sources' './lists/_test1_ugly_source_Test2toTest1_Test2toTest1_test2_ugly_Sources'
+-v6*=Listhook successfully returned!
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_ugly_abacus_Packages' './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_abacus_Packages'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_ugly_coal_Packages' './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_coal_Packages'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_stupid_Sources' './lists/_test1_stupid_source_Test2toTest1_Test2toTest1_test2_stupid_Sources'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_stupid_abacus_Packages' './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_abacus_Packages'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_stupid_coal_Packages' './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_coal_Packages'
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test1|ugly|source'
+-v5*= reading './lists/_test1_ugly_source_Test2toTest1_Test2toTest1_test2_ugly_Sources'
+-v3*= processing updates for 'test1|ugly|abacus'
+-v5*= reading './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_abacus_Packages'
+-v5*= reading './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_coal_Packages'
+-v3*= processing updates for 'test1|stupid|source'
+-v5*= reading './lists/_test1_stupid_source_Test2toTest1_Test2toTest1_test2_stupid_Sources'
+-v3*= processing updates for 'test1|stupid|abacus'
+-v5*= reading './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_abacus_Packages'
+-v5*= reading './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_coal_Packages'
+-v0*=Getting packages...
+-v1=Freeing some memory...
+-v1*=Shutting down aptmethods...
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'simple' unset 'test1' 'ugly' 'source' 'dsc')
+$(opa 'simple' x 'test1' 'ugly' 'abacus' 'deb')
+$(opa 'simple-addons' x 'test1' 'ugly' 'abacus' 'deb')
+$(opa 'bloat+-0a9z.app' unset 'test1' 'stupid' 'source' 'dsc')
+$(opa 'bloat+-0a9z.app' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa 'bloat+-0a9z.app-addons' x 'test1' 'stupid' 'abacus' 'deb')
+-v1*=Retracking test1...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= replacing './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|source'...
+-v6*= replacing './dists/test1/ugly/source/Sources' (gzipped,bzip2ed)
+EOF
+mv Release.test2.safe dists/test2/Release
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+mv conf/updates.safe conf/updates
+
+checklog log1 <<EOF
+DATESTR add test1 dsc ugly source simple 1
+DATESTR add test1 deb ugly abacus simple 1
+DATESTR add test1 deb ugly abacus simple-addons 1
+DATESTR add test1 dsc stupid source bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR add test1 deb stupid abacus bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR add test1 deb stupid abacus bloat+-0a9z.app-addons 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+checknolog log1
+checknolog log2
+testrun - -b . update test1 3<<EOF
+=WARNING: Single-Instance not yet supported!
+=aptmethod error receiving 'copy:$WORKDIR/dists/test2/InRelease':
+='Failed to stat - stat (2: No such file or directory)'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/test2/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/test2/Release'
+stdout
+-v0*=Nothing to do found. (Use --noskipold to force processing)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 < /dev/null
+checknolog log2
+testrun - --nolistsdownload --noskipold -b . update test1 3<<EOF
+=WARNING: Single-Instance not yet supported!
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_ugly_Sources' './lists/_test1_ugly_source_Test2toTest1_Test2toTest1_test2_ugly_Sources'
+-v6*=Listhook successfully returned!
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_ugly_abacus_Packages' './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_abacus_Packages'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_ugly_coal_Packages' './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_coal_Packages'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_stupid_Sources' './lists/_test1_stupid_source_Test2toTest1_Test2toTest1_test2_stupid_Sources'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_stupid_abacus_Packages' './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_abacus_Packages'
+-v6*=Called /bin/cp './lists/Test2toTest1_test2_stupid_coal_Packages' './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_coal_Packages'
+stdout
+-v0*=Calculating packages to get...
+-v3*= processing updates for 'test1|ugly|source'
+-v5*= reading './lists/_test1_ugly_source_Test2toTest1_Test2toTest1_test2_ugly_Sources'
+-v3*= processing updates for 'test1|ugly|abacus'
+-v5*= reading './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_abacus_Packages'
+-v5*= reading './lists/_test1_ugly_abacus_Test2toTest1_Test2toTest1_test2_ugly_coal_Packages'
+-v3*= processing updates for 'test1|stupid|source'
+-v5*= reading './lists/_test1_stupid_source_Test2toTest1_Test2toTest1_test2_stupid_Sources'
+-v3*= processing updates for 'test1|stupid|abacus'
+-v5*= reading './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_abacus_Packages'
+-v5*= reading './lists/_test1_stupid_abacus_Test2toTest1_Test2toTest1_test2_stupid_coal_Packages'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 < /dev/null
+checknolog log2
+
+find dists/test2/ \( -name "Packages.gz" -o -name "Sources.gz" \) -print0 | xargs -0 zgrep '^Package: ' | sed -e 's/test2/test1/' -e "s/coal/abacus/" | sort > test2
+find dists/test1/ \( -name "Packages.gz" -o -name "Sources.gz" \) -print0 | xargs -0 zgrep '^Package: ' | sort > test1
+dodiff test2 test1
+
+testrun - -b . check test1 test2 3<<EOF
+stdout
+-v1*=Checking test2...
+-x1*=Checking packages in 'test2|stupid|abacus'...
+-x1*=Checking packages in 'test2|stupid|coal'...
+-x1*=Checking packages in 'test2|stupid|source'...
+-x1*=Checking packages in 'test2|ugly|abacus'...
+-x1*=Checking packages in 'test2|ugly|coal'...
+-x1*=Checking packages in 'test2|ugly|source'...
+-v1*=Checking test1...
+-x1*=Checking packages in 'test1|stupid|abacus'...
+-x1*=Checking packages in 'test1|stupid|source'...
+-x1*=Checking packages in 'test1|ugly|abacus'...
+-x1*=Checking packages in 'test1|ugly|source'...
+EOF
+testrun "" -b . checkpool
+testrun - -b . rereference test1 test2 3<<EOF
+stdout
+-v1*=Referencing test2...
+-v2=Rereferencing test2|stupid|abacus...
+-v2=Rereferencing test2|stupid|coal...
+-v2=Rereferencing test2|stupid|source...
+-v2=Rereferencing test2|ugly|abacus...
+-v2=Rereferencing test2|ugly|coal...
+-v2=Rereferencing test2|ugly|source...
+-v3*=Unlocking dependencies of test2|stupid|abacus...
+-v3*=Referencing test2|stupid|abacus...
+-v3*=Unlocking dependencies of test2|stupid|coal...
+-v3*=Referencing test2|stupid|coal...
+-v3*=Unlocking dependencies of test2|stupid|source...
+-v3*=Referencing test2|stupid|source...
+-v3*=Unlocking dependencies of test2|ugly|abacus...
+-v3*=Referencing test2|ugly|abacus...
+-v3*=Unlocking dependencies of test2|ugly|coal...
+-v3*=Referencing test2|ugly|coal...
+-v3*=Unlocking dependencies of test2|ugly|source...
+-v3*=Referencing test2|ugly|source...
+-v1*=Referencing test1...
+-v2=Rereferencing test1|stupid|abacus...
+-v2=Rereferencing test1|stupid|source...
+-v2=Rereferencing test1|ugly|abacus...
+-v2=Rereferencing test1|ugly|source...
+-v3*=Unlocking dependencies of test1|stupid|abacus...
+-v3*=Referencing test1|stupid|abacus...
+-v3*=Unlocking dependencies of test1|stupid|source...
+-v3*=Referencing test1|stupid|source...
+-v3*=Unlocking dependencies of test1|ugly|abacus...
+-v3*=Referencing test1|ugly|abacus...
+-v3*=Unlocking dependencies of test1|ugly|source...
+-v3*=Referencing test1|ugly|source...
+EOF
+testrun - -b . check test1 test2 3<<EOF
+stdout
+-v1*=Checking test1...
+-x1*=Checking packages in 'test2|stupid|abacus'...
+-x1*=Checking packages in 'test2|stupid|coal'...
+-x1*=Checking packages in 'test2|stupid|source'...
+-x1*=Checking packages in 'test2|ugly|abacus'...
+-x1*=Checking packages in 'test2|ugly|coal'...
+-x1*=Checking packages in 'test2|ugly|source'...
+-v1*=Checking test2...
+-x1*=Checking packages in 'test1|stupid|abacus'...
+-x1*=Checking packages in 'test1|stupid|source'...
+-x1*=Checking packages in 'test1|ugly|abacus'...
+-x1*=Checking packages in 'test1|ugly|source'...
+EOF
+
+testout "" -b . dumptracks
+cat >results.expected <<END
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:0.9-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc s 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz s 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb b 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb a 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:0.9-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes c 0
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb b 1
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb a 1
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc s 1
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz s 1
+
+Distribution: test1
+Source: simple
+Version: 1
+Files:
+ pool/stupid/s/simple/simple_1.dsc s 0
+ pool/stupid/s/simple/simple_1.tar.gz s 0
+ pool/stupid/s/simple/simple_1_abacus.deb b 0
+ pool/stupid/s/simple/simple-addons_1_all.deb a 0
+ pool/stupid/s/simple/simple_1_source+all+abacus.changes c 0
+ pool/ugly/s/simple/simple_1_abacus.deb b 1
+ pool/ugly/s/simple/simple-addons_1_all.deb a 1
+ pool/ugly/s/simple/simple_1.dsc s 1
+ pool/ugly/s/simple/simple_1.tar.gz s 1
+
+END
+dodiff results.expected results
+
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+testrun - -b . removealltracks test2 test1 3<<EOF
+stdout
+stderr
+*=Error: Requested removing of all tracks of distribution 'test1',
+*=which still has tracking enabled. Use --delete to delete anyway.
+-v0*=There have been errors!
+returns 255
+EOF
+testrun - --delete -b . removealltracks test2 test1 3<<EOF
+stdout
+-v0*=Deleting all tracks for test2...
+-v0*=Deleting all tracks for test1...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/stupid/s/simple/simple-addons_1_all.deb')
+$(ofd 'pool/stupid/s/simple/simple_1.dsc')
+$(ofd 'pool/stupid/s/simple/simple_1.tar.gz')
+$(ofd 'pool/stupid/s/simple/simple_1_abacus.deb')
+$(ofd 'pool/stupid/s/simple/simple_1_source+all+abacus.changes')
+-v2*=removed now empty directory ./pool/stupid/s/simple
+-v2*=removed now empty directory ./pool/stupid/s
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:0.9-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes')
+-v2*=removed now empty directory ./pool/ugly/b/bloat+-0a9z.app
+-v2*=removed now empty directory ./pool/ugly/b
+EOF
+echo returned: $?
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+testrun - -b . include test1 test.changes 3<<EOF
+stdout
+-v2*=Created directory "./pool/ugly/b"
+-v2*=Created directory "./pool/ugly/b/bloat+-0a9z.app"
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:0.9-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes')
+$(opa 'bloat+-0a9z.app-addons' x 'test1' 'ugly' 'abacus' 'deb')
+$(opa 'bloat+-0a9z.app' x 'test1' 'ugly' 'abacus' 'deb')
+$(opa 'bloat+-0a9z.app' unset 'test1' 'ugly' 'source' 'dsc')
+$(ota 'test1' 'bloat+-0a9z.app')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= replacing './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|source'...
+-v6*= replacing './dists/test1/ugly/source/Sources' (gzipped,bzip2ed)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR add test1 deb ugly abacus bloat+-0a9z.app-addons 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR add test1 deb ugly abacus bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR add test1 dsc ugly source bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+echo returned: $?
+OUTPUT=test2.changes PACKAGE=bloat+-0a9z.app EPOCH=99: VERSION=9.0-A:Z+a:z REVISION=-0+aA.9zZ SECTION="ugly/extra" genpackage.sh
+testrun - -b . include test1 test2.changes 3<<EOF
+stdout
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_9.0-A:Z+a:z-0+aA.9zZ_all.deb')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofa 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:9.0-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes')
+$(opu 'bloat+-0a9z.app-addons' x x 'test1' 'ugly' 'abacus' 'deb')
+$(opu 'bloat+-0a9z.app' x x 'test1' 'ugly' 'abacus' 'deb')
+$(opu 'bloat+-0a9z.app' x x 'test1' 'ugly' 'source' 'dsc')
+$(ota 'test1' 'bloat+-0a9z.app')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= replacing './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|source'...
+-v6*= replacing './dists/test1/ugly/source/Sources' (gzipped,bzip2ed)
+EOF
+echo returned: $?
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR replace test1 deb ugly abacus bloat+-0a9z.app-addons 99:9.0-A:Z+a:z-0+aA.9zZ 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR replace test1 deb ugly abacus bloat+-0a9z.app 99:9.0-A:Z+a:z-0+aA.9zZ 99:0.9-A:Z+a:z-0+aA.9zZ
+DATESTR replace test1 dsc ugly source bloat+-0a9z.app 99:9.0-A:Z+a:z-0+aA.9zZ 99:0.9-A:Z+a:z-0+aA.9zZ
+EOF
+testrun - -b . -S sectiontest -P prioritytest includedeb test1 simple_1_abacus.deb 3<<EOF
+stderr
+-v1*=simple_1_abacus.deb: component guessed as 'stupid'
+stdout
+-v2*=Created directory "./pool/stupid/s"
+-v2*=Created directory "./pool/stupid/s/simple"
+$(ofa 'pool/stupid/s/simple/simple_1_abacus.deb')
+$(opa 'simple' x 'test1' 'stupid' 'abacus' 'deb')
+$(ota 'test1' 'simple')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+echo returned: $?
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+dodo zgrep '^Section: sectiontest' dists/test1/stupid/binary-abacus/Packages.gz
+dodo zgrep '^Priority: prioritytest' dists/test1/stupid/binary-abacus/Packages.gz
+checklog log1 <<EOF
+DATESTR add test1 deb stupid abacus simple 1
+EOF
+testrun - -b . -S sectiontest -P prioritytest includedsc test1 simple_1.dsc 3<<EOF
+stderr
+-v1*=simple_1.dsc: component guessed as 'stupid'
+stdout
+$(ofa 'pool/stupid/s/simple/simple_1.dsc')
+$(ofa 'pool/stupid/s/simple/simple_1.tar.gz')
+$(opa 'simple' unset 'test1' 'stupid' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+echo returned: $?
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+dodo zgrep '^Section: sectiontest' dists/test1/stupid/source/Sources.gz
+dodo zgrep '^Priority: prioritytest' dists/test1/stupid/source/Sources.gz
+checklog log1 <<EOF
+DATESTR add test1 dsc stupid source simple 1
+EOF
+
+testout "" -b . dumptracks
+cat >results.expected <<END
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:0.9-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc s 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz s 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb b 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb a 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:0.9-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:9.0-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.dsc s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.tar.gz s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ_abacus.deb b 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_9.0-A:Z+a:z-0+aA.9zZ_all.deb a 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:9.0-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: simple
+Version: 1
+Files:
+ pool/stupid/s/simple/simple_1_abacus.deb b 1
+ pool/stupid/s/simple/simple_1.dsc s 1
+ pool/stupid/s/simple/simple_1.tar.gz s 1
+
+END
+dodiff results.expected results
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+
+echo "now testing .orig.tar.gz handling"
+tar -czf test_1.orig.tar.gz test.changes
+PACKAGE=test EPOCH="" VERSION=1 REVISION="-2" SECTION="stupid/base" genpackage.sh -sd
+testrun - -b . include test1 test.changes 3<<EOF
+returns 249
+stderr
+*=Unable to find pool/stupid/t/test/test_1.orig.tar.gz needed by test_1-2.dsc!
+*=Perhaps you forgot to give dpkg-buildpackage the -sa option,
+*= or you could try --ignore=missingfile to guess possible files to use.
+-v0*=There have been errors!
+stdout
+-v2*=Created directory "./pool/stupid/t"
+-v2*=Created directory "./pool/stupid/t/test"
+$(ofa 'pool/stupid/t/test/test-addons_1-2_all.deb')
+$(ofa 'pool/stupid/t/test/test_1-2_abacus.deb')
+$(ofa 'pool/stupid/t/test/test_1-2.diff.gz')
+$(ofa 'pool/stupid/t/test/test_1-2.dsc')
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/stupid/t/test/test-addons_1-2_all.deb')
+$(ofd 'pool/stupid/t/test/test_1-2_abacus.deb')
+$(ofd 'pool/stupid/t/test/test_1-2.diff.gz')
+$(ofd 'pool/stupid/t/test/test_1-2.dsc')
+-v2*=removed now empty directory ./pool/stupid/t/test
+-v2*=removed now empty directory ./pool/stupid/t
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checknolog log1
+checknolog log2
+testrun - -b . --ignore=missingfile include test1 test.changes 3<<EOF
+stderr
+*=Unable to find pool/stupid/t/test/test_1.orig.tar.gz!
+*=Perhaps you forgot to give dpkg-buildpackage the -sa option.
+*=--ignore=missingfile was given, searching for file...
+stdout
+-v2*=Created directory "./pool/stupid/t"
+-v2*=Created directory "./pool/stupid/t/test"
+$(ofa 'pool/stupid/t/test/test-addons_1-2_all.deb')
+$(ofa 'pool/stupid/t/test/test_1-2_abacus.deb')
+$(ofa 'pool/stupid/t/test/test_1-2.diff.gz')
+$(ofa 'pool/stupid/t/test/test_1-2.dsc')
+$(ofa 'pool/stupid/t/test/test_1.orig.tar.gz')
+$(ofa 'pool/stupid/t/test/test_1-2_source+all+abacus.changes')
+$(opa 'test-addons' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa 'test' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa 'test' unset 'test1' 'stupid' 'source' 'dsc')
+$(ota 'test1' 'test')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR add test1 deb stupid abacus test-addons 1-2
+DATESTR add test1 deb stupid abacus test 1-2
+DATESTR add test1 dsc stupid source test 1-2
+EOF
+dodo zgrep test_1-2.dsc dists/test1/stupid/source/Sources.gz
+
+tar -czf testb_2.orig.tar.gz test.changes
+PACKAGE=testb EPOCH="1:" VERSION=2 REVISION="-2" SECTION="stupid/base" genpackage.sh -sa
+testrun - -b . include test1 test.changes 3<<EOF
+stdout
+-v2*=Created directory "./pool/stupid/t/testb"
+$(ofa 'pool/stupid/t/testb/testb-addons_2-2_all.deb')
+$(ofa 'pool/stupid/t/testb/testb_2-2_abacus.deb')
+$(ofa 'pool/stupid/t/testb/testb_2-2.diff.gz')
+$(ofa 'pool/stupid/t/testb/testb_2-2.dsc')
+$(ofa 'pool/stupid/t/testb/testb_2.orig.tar.gz')
+$(ofa 'pool/stupid/t/testb/testb_1:2-2_source+all+abacus.changes')
+$(opa 'testb-addons' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa 'testb' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa 'testb' unset 'test1' 'stupid' 'source' 'dsc')
+$(ota 'test1' 'testb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR add test1 deb stupid abacus testb-addons 1:2-2
+DATESTR add test1 deb stupid abacus testb 1:2-2
+DATESTR add test1 dsc stupid source testb 1:2-2
+EOF
+dodo zgrep testb_2-2.dsc dists/test1/stupid/source/Sources.gz
+rm test2.changes
+PACKAGE=testb EPOCH="1:" VERSION=2 REVISION="-3" SECTION="stupid/base" OUTPUT="test2.changes" genpackage.sh -sd
+testrun - -b . include test1 test2.changes 3<<EOF
+stdout
+$(ofa 'pool/stupid/t/testb/testb-addons_2-3_all.deb')
+$(ofa 'pool/stupid/t/testb/testb_2-3_abacus.deb')
+$(ofa 'pool/stupid/t/testb/testb_2-3.diff.gz')
+$(ofa 'pool/stupid/t/testb/testb_2-3.dsc')
+$(ofa 'pool/stupid/t/testb/testb_1:2-3_source+all+abacus.changes')
+$(opu 'testb-addons' x x 'test1' 'stupid' 'abacus' 'deb')
+$(opu 'testb' x x 'test1' 'stupid' 'abacus' 'deb')
+$(opu 'testb' x x 'test1' 'stupid' 'source' 'dsc')
+$(ota 'test1' 'testb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR replace test1 deb stupid abacus testb-addons 1:2-3 1:2-2
+DATESTR replace test1 deb stupid abacus testb 1:2-3 1:2-2
+DATESTR replace test1 dsc stupid source testb 1:2-3 1:2-2
+EOF
+dodo zgrep testb_2-3.dsc dists/test1/stupid/source/Sources.gz
+
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+
+echo "now testing some error messages:"
+PACKAGE=4test EPOCH="1:" VERSION=0b.1 REVISION="-1" SECTION="stupid/base" genpackage.sh
+testrun - -b . include test1 test.changes 3<<EOF
+stdout
+-v2*=Created directory "./pool/stupid/4"
+-v2*=Created directory "./pool/stupid/4/4test"
+$(ofa 'pool/stupid/4/4test/4test-addons_0b.1-1_all.deb')
+$(ofa 'pool/stupid/4/4test/4test_0b.1-1_abacus.deb')
+$(ofa 'pool/stupid/4/4test/4test_0b.1-1.tar.gz')
+$(ofa 'pool/stupid/4/4test/4test_0b.1-1.dsc')
+$(ofa 'pool/stupid/4/4test/4test_1:0b.1-1_source+all+abacus.changes')
+$(opa '4test-addons' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa '4test' x 'test1' 'stupid' 'abacus' 'deb')
+$(opa '4test' unset 'test1' 'stupid' 'source' 'dsc')
+$(ota 'test1' '4test')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= looking for changes in 'test1|ugly|source'...
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR add test1 deb stupid abacus 4test-addons 1:0b.1-1
+DATESTR add test1 deb stupid abacus 4test 1:0b.1-1
+DATESTR add test1 dsc stupid source 4test 1:0b.1-1
+EOF
+
+cat >includeerror.rules <<EOF
+returns 255
+stderr
+-v0*=There have been errors!
+*=Error: Too few arguments for command 'include'!
+*=Syntax: reprepro [--delete] include <distribution> <.changes-file>
+EOF
+testrun includeerror -b . include unknown 3<<EOF
+testrun includeerror -b . include unknown test.changes test2.changes
+testrun - -b . include unknown test.changes 3<<EOF
+stderr
+-v0*=There have been errors!
+*=No distribution definition of 'unknown' found in './conf/distributions'!
+returns 249
+EOF
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+
+testout "" -b . dumptracks
+# TODO: check those if they are really expected...
+cat > results.expected <<EOF
+Distribution: test1
+Source: 4test
+Version: 1:0b.1-1
+Files:
+ pool/stupid/4/4test/4test_0b.1-1.dsc s 1
+ pool/stupid/4/4test/4test_0b.1-1.tar.gz s 1
+ pool/stupid/4/4test/4test_0b.1-1_abacus.deb b 1
+ pool/stupid/4/4test/4test-addons_0b.1-1_all.deb a 1
+ pool/stupid/4/4test/4test_1:0b.1-1_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:0.9-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc s 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz s 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb b 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb a 0
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:0.9-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:9.0-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.dsc s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.tar.gz s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ_abacus.deb b 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_9.0-A:Z+a:z-0+aA.9zZ_all.deb a 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:9.0-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: simple
+Version: 1
+Files:
+ pool/stupid/s/simple/simple_1_abacus.deb b 1
+ pool/stupid/s/simple/simple_1.dsc s 1
+ pool/stupid/s/simple/simple_1.tar.gz s 1
+
+Distribution: test1
+Source: test
+Version: 1-2
+Files:
+ pool/stupid/t/test/test_1-2.dsc s 1
+ pool/stupid/t/test/test_1.orig.tar.gz s 1
+ pool/stupid/t/test/test_1-2.diff.gz s 1
+ pool/stupid/t/test/test_1-2_abacus.deb b 1
+ pool/stupid/t/test/test-addons_1-2_all.deb a 1
+ pool/stupid/t/test/test_1-2_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: testb
+Version: 1:2-2
+Files:
+ pool/stupid/t/testb/testb_2-2.dsc s 0
+ pool/stupid/t/testb/testb_2.orig.tar.gz s 0
+ pool/stupid/t/testb/testb_2-2.diff.gz s 0
+ pool/stupid/t/testb/testb_2-2_abacus.deb b 0
+ pool/stupid/t/testb/testb-addons_2-2_all.deb a 0
+ pool/stupid/t/testb/testb_1:2-2_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: testb
+Version: 1:2-3
+Files:
+ pool/stupid/t/testb/testb_2-3.dsc s 1
+ pool/stupid/t/testb/testb_2.orig.tar.gz s 1
+ pool/stupid/t/testb/testb_2-3.diff.gz s 1
+ pool/stupid/t/testb/testb_2-3_abacus.deb b 1
+ pool/stupid/t/testb/testb-addons_2-3_all.deb a 1
+ pool/stupid/t/testb/testb_1:2-3_source+all+abacus.changes c 0
+
+EOF
+dodiff results.expected results
+testrun - -b . tidytracks 3<<EOF
+stdout
+-v0*=Looking for old tracks in test1...
+EOF
+testout "" -b . dumptracks
+dodiff results.expected results
+sed -i -e 's/^Tracking: keep/Tracking: all/' conf/distributions
+testrun - -b . tidytracks 3<<EOF
+stdout
+-v0*=Looking for old tracks in test1...
+$(otd 'testb' '1:2-2' 'test1')
+$(otd 'bloat+-0a9z.app' '99:0.9-A:Z+a:z-0+aA.9zZ' 'test1')
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:0.9-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes')
+$(ofd 'pool/stupid/t/testb/testb-addons_2-2_all.deb')
+$(ofd 'pool/stupid/t/testb/testb_2-2_abacus.deb')
+$(ofd 'pool/stupid/t/testb/testb_2-2.dsc')
+$(ofd 'pool/stupid/t/testb/testb_2-2.diff.gz')
+$(ofd 'pool/stupid/t/testb/testb_1:2-2_source+all+abacus.changes')
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+cp db/tracking.db db/saved2tracking.db
+cp db/references.db db/saved2references.db
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+testout "" -b . dumptracks
+cat > results.expected <<EOF
+Distribution: test1
+Source: 4test
+Version: 1:0b.1-1
+Files:
+ pool/stupid/4/4test/4test_0b.1-1.dsc s 1
+ pool/stupid/4/4test/4test_0b.1-1.tar.gz s 1
+ pool/stupid/4/4test/4test_0b.1-1_abacus.deb b 1
+ pool/stupid/4/4test/4test-addons_0b.1-1_all.deb a 1
+ pool/stupid/4/4test/4test_1:0b.1-1_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:9.0-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.dsc s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.tar.gz s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ_abacus.deb b 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_9.0-A:Z+a:z-0+aA.9zZ_all.deb a 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:9.0-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: simple
+Version: 1
+Files:
+ pool/stupid/s/simple/simple_1_abacus.deb b 1
+ pool/stupid/s/simple/simple_1.dsc s 1
+ pool/stupid/s/simple/simple_1.tar.gz s 1
+
+Distribution: test1
+Source: test
+Version: 1-2
+Files:
+ pool/stupid/t/test/test_1-2.dsc s 1
+ pool/stupid/t/test/test_1.orig.tar.gz s 1
+ pool/stupid/t/test/test_1-2.diff.gz s 1
+ pool/stupid/t/test/test_1-2_abacus.deb b 1
+ pool/stupid/t/test/test-addons_1-2_all.deb a 1
+ pool/stupid/t/test/test_1-2_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: testb
+Version: 1:2-3
+Files:
+ pool/stupid/t/testb/testb_2-3.dsc s 1
+ pool/stupid/t/testb/testb_2.orig.tar.gz s 1
+ pool/stupid/t/testb/testb_2-3.diff.gz s 1
+ pool/stupid/t/testb/testb_2-3_abacus.deb b 1
+ pool/stupid/t/testb/testb-addons_2-3_all.deb a 1
+ pool/stupid/t/testb/testb_1:2-3_source+all+abacus.changes c 0
+
+EOF
+dodiff results.expected results
+sed -i -e 's/^Tracking: all/Tracking: minimal/' conf/distributions
+testrun - -b . tidytracks 3<<EOF
+stdout
+-v0*=Looking for old tracks in test1...
+EOF
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+testout "" -b . dumptracks
+dodiff results.expected results
+sed -i -e 's/^Tracking: minimal includechanges/Tracking: minimal/' conf/distributions
+testrun - -b . tidytracks 3<<EOF
+stdout
+-v0*=Looking for old tracks in test1...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/stupid/4/4test/4test_1:0b.1-1_source+all+abacus.changes')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:9.0-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes')
+$(ofd 'pool/stupid/t/test/test_1-2_source+all+abacus.changes')
+$(ofd 'pool/stupid/t/testb/testb_1:2-3_source+all+abacus.changes')
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+testout "" -b . dumptracks
+cat > results.expected <<EOF
+Distribution: test1
+Source: 4test
+Version: 1:0b.1-1
+Files:
+ pool/stupid/4/4test/4test_0b.1-1.dsc s 1
+ pool/stupid/4/4test/4test_0b.1-1.tar.gz s 1
+ pool/stupid/4/4test/4test_0b.1-1_abacus.deb b 1
+ pool/stupid/4/4test/4test-addons_0b.1-1_all.deb a 1
+
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:9.0-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.dsc s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.tar.gz s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ_abacus.deb b 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_9.0-A:Z+a:z-0+aA.9zZ_all.deb a 1
+
+Distribution: test1
+Source: simple
+Version: 1
+Files:
+ pool/stupid/s/simple/simple_1_abacus.deb b 1
+ pool/stupid/s/simple/simple_1.dsc s 1
+ pool/stupid/s/simple/simple_1.tar.gz s 1
+
+Distribution: test1
+Source: test
+Version: 1-2
+Files:
+ pool/stupid/t/test/test_1-2.dsc s 1
+ pool/stupid/t/test/test_1.orig.tar.gz s 1
+ pool/stupid/t/test/test_1-2.diff.gz s 1
+ pool/stupid/t/test/test_1-2_abacus.deb b 1
+ pool/stupid/t/test/test-addons_1-2_all.deb a 1
+
+Distribution: test1
+Source: testb
+Version: 1:2-3
+Files:
+ pool/stupid/t/testb/testb_2-3.dsc s 1
+ pool/stupid/t/testb/testb_2.orig.tar.gz s 1
+ pool/stupid/t/testb/testb_2-3.diff.gz s 1
+ pool/stupid/t/testb/testb_2-3_abacus.deb b 1
+ pool/stupid/t/testb/testb-addons_2-3_all.deb a 1
+
+EOF
+dodiff results.expected results
+testrun - -b . tidytracks 3<<EOF
+stdout
+-v0*=Looking for old tracks in test1...
+EOF
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+# Earlier update rules made this tracking data outdated.
+# so copy it, so it can be replayed so that also outdated data
+# is tested to be handled correctly.
+mv db/tracking.db db/savedtracking.db
+mv db/references.db db/savedreferences.db
+# Try this with .changes files still listed
+mv db/saved2tracking.db db/tracking.db
+mv db/saved2references.db db/references.db
+sed -i -e 's/^Tracking: minimal/Tracking: minimal includechanges/' conf/distributions
+testrun - -b . retrack 3<<EOF
+stdout
+-v1*=Retracking test1...
+$(ota 'test1' 'bloat+-0a9z.app')
+-x1*= Tracking test1|stupid|abacus...
+-x1*= Tracking test1|stupid|source...
+-x1*= Tracking test1|ugly|abacus...
+-x1*= Tracking test1|ugly|source...
+EOF
+testout "" -b . dumptracks
+cat > results.expected <<EOF
+Distribution: test1
+Source: 4test
+Version: 1:0b.1-1
+Files:
+ pool/stupid/4/4test/4test_0b.1-1.dsc s 1
+ pool/stupid/4/4test/4test_0b.1-1.tar.gz s 1
+ pool/stupid/4/4test/4test_0b.1-1_abacus.deb b 1
+ pool/stupid/4/4test/4test-addons_0b.1-1_all.deb a 1
+ pool/stupid/4/4test/4test_1:0b.1-1_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:0.9-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb b 1
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb a 1
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc s 1
+ pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz s 1
+
+Distribution: test1
+Source: bloat+-0a9z.app
+Version: 99:9.0-A:Z+a:z-0+aA.9zZ
+Files:
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.dsc s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.tar.gz s 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ_abacus.deb b 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_9.0-A:Z+a:z-0+aA.9zZ_all.deb a 1
+ pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_99:9.0-A:Z+a:z-0+aA.9zZ_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: simple
+Version: 1
+Files:
+ pool/stupid/s/simple/simple_1_abacus.deb b 1
+ pool/stupid/s/simple/simple_1.dsc s 1
+ pool/stupid/s/simple/simple_1.tar.gz s 1
+ pool/ugly/s/simple/simple_1_abacus.deb b 1
+ pool/ugly/s/simple/simple-addons_1_all.deb a 1
+ pool/ugly/s/simple/simple_1.dsc s 1
+ pool/ugly/s/simple/simple_1.tar.gz s 1
+
+Distribution: test1
+Source: test
+Version: 1-2
+Files:
+ pool/stupid/t/test/test_1-2.dsc s 1
+ pool/stupid/t/test/test_1.orig.tar.gz s 1
+ pool/stupid/t/test/test_1-2.diff.gz s 1
+ pool/stupid/t/test/test_1-2_abacus.deb b 1
+ pool/stupid/t/test/test-addons_1-2_all.deb a 1
+ pool/stupid/t/test/test_1-2_source+all+abacus.changes c 0
+
+Distribution: test1
+Source: testb
+Version: 1:2-3
+Files:
+ pool/stupid/t/testb/testb_2-3.dsc s 1
+ pool/stupid/t/testb/testb_2.orig.tar.gz s 1
+ pool/stupid/t/testb/testb_2-3.diff.gz s 1
+ pool/stupid/t/testb/testb_2-3_abacus.deb b 1
+ pool/stupid/t/testb/testb-addons_2-3_all.deb a 1
+ pool/stupid/t/testb/testb_1:2-3_source+all+abacus.changes c 0
+
+EOF
+dodiff results.expected results
+
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+testout "" -b . dumpreferences
+sort results > results.expected
+testrun - -b . rereference 3<<EOF
+stdout
+-v1*=Referencing test1...
+-v3*=Unlocking dependencies of test1|stupid|abacus...
+=Rereferencing test1|stupid|abacus...
+-v3*=Referencing test1|stupid|abacus...
+-v3*=Unlocking dependencies of test1|stupid|source...
+=Rereferencing test1|stupid|source...
+-v3*=Referencing test1|stupid|source...
+-v3*=Unlocking dependencies of test1|ugly|abacus...
+=Rereferencing test1|ugly|abacus...
+-v3*=Referencing test1|ugly|abacus...
+-v3*=Unlocking dependencies of test1|ugly|source...
+=Rereferencing test1|ugly|source...
+-v3*=Referencing test1|ugly|source...
+-v1*=Referencing test2...
+-v3*=Unlocking dependencies of test2|stupid|abacus...
+=Rereferencing test2|stupid|abacus...
+-v3*=Referencing test2|stupid|abacus...
+-v3*=Unlocking dependencies of test2|stupid|coal...
+=Rereferencing test2|stupid|coal...
+-v3*=Referencing test2|stupid|coal...
+-v3*=Unlocking dependencies of test2|stupid|source...
+=Rereferencing test2|stupid|source...
+-v3*=Referencing test2|stupid|source...
+-v3*=Unlocking dependencies of test2|ugly|abacus...
+=Rereferencing test2|ugly|abacus...
+-v3*=Referencing test2|ugly|abacus...
+-v3*=Unlocking dependencies of test2|ugly|coal...
+=Rereferencing test2|ugly|coal...
+-v3*=Referencing test2|ugly|coal...
+-v3*=Unlocking dependencies of test2|ugly|source...
+=Rereferencing test2|ugly|source...
+-v3*=Referencing test2|ugly|source...
+EOF
+testout "" -b . dumpreferences
+sort results > results.sorted
+dodiff results.expected results.sorted
+rm db/references.db
+testrun - -b . rereference 3<<EOF
+stdout
+-v1*=Referencing test1...
+-v3*=Unlocking dependencies of test1|stupid|abacus...
+=Rereferencing test1|stupid|abacus...
+-v3*=Referencing test1|stupid|abacus...
+-v3*=Unlocking dependencies of test1|stupid|source...
+=Rereferencing test1|stupid|source...
+-v3*=Referencing test1|stupid|source...
+-v3*=Unlocking dependencies of test1|ugly|abacus...
+=Rereferencing test1|ugly|abacus...
+-v3*=Referencing test1|ugly|abacus...
+-v3*=Unlocking dependencies of test1|ugly|source...
+=Rereferencing test1|ugly|source...
+-v3*=Referencing test1|ugly|source...
+-v1*=Referencing test2...
+-v3*=Unlocking dependencies of test2|stupid|abacus...
+=Rereferencing test2|stupid|abacus...
+-v3*=Referencing test2|stupid|abacus...
+-v3*=Unlocking dependencies of test2|stupid|coal...
+=Rereferencing test2|stupid|coal...
+-v3*=Referencing test2|stupid|coal...
+-v3*=Unlocking dependencies of test2|stupid|source...
+=Rereferencing test2|stupid|source...
+-v3*=Referencing test2|stupid|source...
+-v3*=Unlocking dependencies of test2|ugly|abacus...
+=Rereferencing test2|ugly|abacus...
+-v3*=Referencing test2|ugly|abacus...
+-v3*=Unlocking dependencies of test2|ugly|coal...
+=Rereferencing test2|ugly|coal...
+-v3*=Referencing test2|ugly|coal...
+-v3*=Unlocking dependencies of test2|ugly|source...
+=Rereferencing test2|ugly|source...
+-v3*=Referencing test2|ugly|source...
+EOF
+testout "" -b . dumpreferences
+sort results > results.sorted
+dodiff results.expected results.sorted
+testout "" -b . dumpreferences
+sort results > results.sorted
+dodiff results.expected results.sorted
+
+sed -i -e 's/^Tracking: minimal/Tracking: keep includechanges/' conf/distributions
+mv db/savedtracking.db db/tracking.db
+mv db/savedreferences.db db/references.db
+
+mkdir conf2
+testrun - -b . --confdir ./conf2 update 3<<EOF
+returns 254
+stderr
+*=Error opening config file './conf2/distributions': No such file or directory(2)
+=(Have you forgotten to specify a basedir by -b?
+=To only set the conf/ dir use --confdir)
+-v0*=There have been errors!
+EOF
+touch conf2/distributions
+testrun - -b . --confdir ./conf2 update 3<<EOF
+returns 249
+stderr
+*=No distribution definitions found in ./conf2/distributions!
+-v0*=There have been errors!
+EOF
+echo 'Codename: foo' > conf2/distributions
+testrun - -b . --confdir ./conf2 update 3<<EOF
+stderr
+*=Error parsing config file ./conf2/distributions, line 2:
+*=Required field 'Architectures' not found in
+*=distribution definition starting in line 1 and ending in line 1.
+-v0*=There have been errors!
+returns 249
+EOF
+echo "Architectures: abacus fingers" >> conf2/distributions
+testrun - -b . --confdir ./conf2 update 3<<EOF
+*=Error parsing config file ./conf2/distributions, line 3:
+*=Required field 'Components' not found in
+*=distribution definition starting in line 1 and ending in line 2.
+-v0*=There have been errors!
+returns 249
+EOF
+undefinedtargeterror() {
+cat <<'EOF'
+*=This usually means you removed some component, architecture or even
+*=a whole distribution from conf/distributions.
+*=In that case you most likely want to call reprepro clearvanished to get rid
+*=of the databases belonging to those removed parts.
+*=(Another reason to get this error is using conf/ and db/ directories
+*= belonging to different reprepro repositories).
+EOF
+if $1 ; then
+cat << 'EOF'
+*=To ignore use --ignore=undefinedtarget.
+EOF
+else
+cat << 'EOF'
+*=Ignoring as --ignore=undefinedtarget given.
+EOF
+fi
+}
+echo 'Components: unneeded bloated i386' >> conf2/distributions
+testrun - -b . --confdir ./conf2 update 3<<EOF
+*=Error: packages database contains unused 'test1|stupid|abacus' database.
+$(undefinedtargeterror true)
+-v0*=There have been errors!
+returns 255
+EOF
+testrun - -b . --confdir ./conf2 --ignore=undefinedtarget update 3<<EOF
+*=Error: packages database contains unused 'test1|stupid|abacus' database.
+$(undefinedtargeterror false)
+*=Error: packages database contains unused 'test1|ugly|abacus' database.
+*=Error: packages database contains unused 'test1|ugly|source' database.
+*=Error: packages database contains unused 'test1|stupid|source' database.
+*=Error: packages database contains unused 'test2|stupid|abacus' database.
+*=Error: packages database contains unused 'test2|stupid|coal' database.
+*=Error: packages database contains unused 'test2|stupid|source' database.
+*=Error: packages database contains unused 'test2|ugly|abacus' database.
+*=Error: packages database contains unused 'test2|ugly|coal' database.
+*=Error: packages database contains unused 'test2|ugly|source' database.
+*=Error: tracking database contains unused 'test1' database.
+*=This either means you removed a distribution from the distributions config
+*=file without calling clearvanished (or at least removealltracks), you
+*=experienced a bug in retrack in versions < 3.0.0, you found a new bug or your
+*=config does not belong to this database.
+*=To ignore use --ignore=undefinedtracking.
+-v0*=There have been errors!
+returns 255
+EOF
+testrun - -b . --confdir ./conf2 --ignore=undefinedtarget --ignore=undefinedtracking update 3<<EOF
+*=Error: packages database contains unused 'test1|stupid|abacus' database.
+$(undefinedtargeterror false)
+*=Error: tracking database contains unused 'test1' database.
+*=This either means you removed a distribution from the distributions config
+*=file without calling clearvanished (or at least removealltracks), you
+*=experienced a bug in retrack in versions < 3.0.0, you found a new bug or your
+*=config does not belong to this database.
+*=Ignoring as --ignore=undefinedtracking given.
+*=Error: packages database contains unused 'test1|ugly|abacus' database.
+*=Error: packages database contains unused 'test1|ugly|source' database.
+*=Error: packages database contains unused 'test1|stupid|source' database.
+*=Error: packages database contains unused 'test2|stupid|abacus' database.
+*=Error: packages database contains unused 'test2|stupid|coal' database.
+*=Error: packages database contains unused 'test2|stupid|source' database.
+*=Error: packages database contains unused 'test2|ugly|abacus' database.
+*=Error: packages database contains unused 'test2|ugly|coal' database.
+*=Error: packages database contains unused 'test2|ugly|source' database.
+*=Error opening config file './conf2/updates': No such file or directory(2)
+-v0*=There have been errors!
+returns 254
+EOF
+touch conf2/updates
+testrun - -b . --confdir ./conf2 --ignore=undefinedtarget --ignore=undefinedtracking --noskipold update 3<<EOF
+stderr
+*=Error: packages database contains unused 'test1|stupid|abacus' database.
+$(undefinedtargeterror false)
+*=Error: packages database contains unused 'test1|ugly|abacus' database.
+*=Error: packages database contains unused 'test1|ugly|source' database.
+*=Error: packages database contains unused 'test1|stupid|source' database.
+*=Error: packages database contains unused 'test2|stupid|abacus' database.
+*=Error: packages database contains unused 'test2|stupid|coal' database.
+*=Error: packages database contains unused 'test2|stupid|source' database.
+*=Error: packages database contains unused 'test2|ugly|abacus' database.
+*=Error: packages database contains unused 'test2|ugly|coal' database.
+*=Error: packages database contains unused 'test2|ugly|source' database.
+*=Error: tracking database contains unused 'test1' database.
+*=This either means you removed a distribution from the distributions config
+*=file without calling clearvanished (or at least removealltracks), you
+*=experienced a bug in retrack in versions < 3.0.0, you found a new bug or your
+*=config does not belong to this database.
+*=Ignoring as --ignore=undefinedtracking given.
+*=Nothing to do, because no distribution has an Update: field.
+EOF
+testrun - -b . clearvanished 3<<EOF
+stdout
+*=Deleting vanished identifier 'foo|bloated|abacus'.
+*=Deleting vanished identifier 'foo|bloated|fingers'.
+*=Deleting vanished identifier 'foo|i386|abacus'.
+*=Deleting vanished identifier 'foo|i386|fingers'.
+*=Deleting vanished identifier 'foo|unneeded|abacus'.
+*=Deleting vanished identifier 'foo|unneeded|fingers'.
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+echo "Format: 2.0" > broken.changes
+testrun - -b . include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Date' field!
+=To ignore use --ignore=missingfield.
+-v0*=There have been errors!
+returns 255
+EOF
+echo "Date: today" >> broken.changes
+testrun - -b . include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Source' field
+-v0*=There have been errors!
+returns 255
+EOF
+echo "Source: nowhere" >> broken.changes
+testrun - -b . include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Architecture' field
+-v0*=There have been errors!
+returns 255
+EOF
+echo "Architecture: brain" >> broken.changes
+testrun - -b . include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Version' field
+-v0*=There have been errors!
+returns 255
+EOF
+echo "Version: 0old" >> broken.changes
+testrun - -b . include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Distribution' field
+-v0*=There have been errors!
+returns 255
+EOF
+echo "Distribution: old" >> broken.changes
+testrun - -b . include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Maintainer' field!
+=To ignore use --ignore=missingfield.
+-v0*=There have been errors!
+returns 255
+EOF
+echo "Distribution: old" >> broken.changes
+testrun - -b . --ignore=missingfield include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Maintainer' field!
+=Ignoring as --ignore=missingfield given.
+*=In 'broken.changes': Missing 'Files' field!
+-v0*=There have been errors!
+returns 255
+EOF
+echo "Files:" >> broken.changes
+testrun - -b . --ignore=missingfield include test2 broken.changes 3<<EOF
+*=In 'broken.changes': Missing 'Maintainer' field!
+*=broken.changes: Not enough files in .changes!
+=Ignoring as --ignore=missingfield given.
+-v0*=There have been errors!
+returns 255
+EOF
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+echo " $EMPTYMD5 section priority filename_version.tar.gz" >> broken.changes
+testrun - -b . --ignore=missingfield include test2 broken.changes 3<<EOF
+=In 'broken.changes': Missing 'Maintainer' field!
+=Ignoring as --ignore=missingfield given.
+*=Warning: File 'filename_version.tar.gz' looks like source but does not start with 'nowhere_'!
+=I hope you know what you do.
+# grr, this message has really to improve...
+*=.changes put in a distribution not listed within it!
+=To ignore use --ignore=wrongdistribution.
+-v0*=There have been errors!
+returns 255
+EOF
+cp conf/distributions conf/distributions.old
+cat >> conf/distributions <<EOF
+
+Codename: getmoreatoms
+Architectures: brain
+Components: test
+EOF
+testrun - -b . --ignore=unusedarch --ignore=surprisingarch --ignore=wrongdistribution --ignore=missingfield include test2 broken.changes 3<<EOF
+=Ignoring as --ignore=missingfield given.
+=In 'broken.changes': Missing 'Maintainer' field!
+=Warning: File 'filename_version.tar.gz' looks like source but does not start with 'nowhere_'!
+=I hope you know what you do.
+*=.changes put in a distribution not listed within it!
+*=Ignoring as --ignore=wrongdistribution given.
+*='filename_version.tar.gz' looks like part of an source package, but no dsc file listed in the .changes file!
+-v0*=There have been errors!
+returns 255
+EOF
+
+echo " $EMPTYMD5 section priority nowhere_0old.dsc" >> broken.changes
+touch nowhere_0old.dsc
+
+testrun - -b . --ignore=unusedarch --ignore=surprisingarch --ignore=wrongdistribution --ignore=missingfield include test2 broken.changes 3<<EOF
+=Ignoring as --ignore=missingfield given.
+=In 'broken.changes': Missing 'Maintainer' field!
+=Warning: File 'filename_version.tar.gz' looks like source but does not start with 'nowhere_'!
+=I hope you know what you do.
+*=.changes put in a distribution not listed within it!
+*=Ignoring as --ignore=wrongdistribution given.
+*=Architecture header lists architecture 'brain', but no files for it!
+*=Ignoring as --ignore=unusedarch given.
+*='filename_version.tar.gz' looks like architecture 'source', but this is not listed in the Architecture-Header!
+*='nowhere_0old.dsc' looks like architecture 'source', but this is not listed in the Architecture-Header!
+*=Ignoring as --ignore=surprisingarch given.
+*=Cannot find file './filename_version.tar.gz' needed by 'broken.changes'!
+-v0*=There have been errors!
+returns 249
+EOF
+
+touch filename_version.tar.gz
+testrun - -b . --ignore=unusedarch --ignore=surprisingarch --ignore=wrongdistribution --ignore=missingfield include test2 broken.changes 3<<EOF
+=Ignoring as --ignore=missingfield given.
+=In 'broken.changes': Missing 'Maintainer' field!
+=Warning: File 'filename_version.tar.gz' looks like source but does not start with 'nowhere_'!
+=Warning: File 'nowhere_0old.dsc' looks like source but does not start with 'nowhere_'!
+=Warning: File 'nowhere_0old.dsc' looks like source but does not start with 'nowhere_'!
+=I hope you know what you do.
+*=.changes put in a distribution not listed within it!
+*=Ignoring as --ignore=wrongdistribution given.
+*=Architecture header lists architecture 'brain', but no files for it!
+*=Ignoring as --ignore=unusedarch given.
+*='filename_version.tar.gz' looks like architecture 'source', but this is not listed in the Architecture-Header!
+*='nowhere_0old.dsc' looks like architecture 'source', but this is not listed in the Architecture-Header!
+*=Ignoring as --ignore=surprisingarch given.
+stdout
+-v2*=Created directory "./pool/stupid/n"
+-v2*=Created directory "./pool/stupid/n/nowhere"
+$(ofa 'pool/stupid/n/nowhere/filename_version.tar.gz')
+$(ofa 'pool/stupid/n/nowhere/nowhere_0old.dsc')
+stderr
+*=Unexpected empty file './pool/stupid/n/nowhere/nowhere_0old.dsc'!
+-v0*=There have been errors!
+returns 255
+stdout
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/stupid/n/nowhere/filename_version.tar.gz')
+$(ofd 'pool/stupid/n/nowhere/nowhere_0old.dsc')
+-v2*=removed now empty directory ./pool/stupid/n/nowhere
+-v2*=removed now empty directory ./pool/stupid/n
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+rm nowhere_0old.dsc
+mv conf/distributions.old conf/distributions
+testrun - -b . clearvanished 3<<EOF
+stderr
+stdout
+*=Deleting vanished identifier 'getmoreatoms|test|brain'.
+EOF
+mkdir -p pool/stupid/n/nowhere
+dodo test ! -f pool/stupid/n/nowhere/filename_version.tar.gz
+cp filename_version.tar.gz pool/stupid/n/nowhere/filename_version.tar.gz
+testrun - -b . _detect pool/stupid/n/nowhere/filename_version.tar.gz 3<<EOF
+stdout
+$(ofa 'pool/stupid/n/nowhere/filename_version.tar.gz')
+-v0*=1 files were added but not used.
+-v0*=The next deleteunreferenced call will delete them.
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+testout "" -b . dumpunreferenced
+cat >results.expected <<EOF
+pool/stupid/n/nowhere/filename_version.tar.gz
+EOF
+dodiff results.expected results
+testrun - -b . deleteunreferenced 3<<EOF
+stdout
+$(ofd 'pool/stupid/n/nowhere/filename_version.tar.gz')
+-v2*=removed now empty directory ./pool/stupid/n/nowhere
+-v2*=removed now empty directory ./pool/stupid/n
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+testout "" -b . dumpreferences
+# first remove file, then try to remove the package
+testrun - -b . _forget pool/ugly/s/simple/simple_1_abacus.deb 3<<EOF
+stdout
+$(ofd 'pool/ugly/s/simple/simple_1_abacus.deb' false)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+testrun - -b . remove test1 simple 3<<EOF
+# ???
+=Warning: tracking database of test1 missed files for simple_1.
+stdout
+$(opd 'simple' unset test1 stupid abacus deb)
+$(opd 'simple' unset test1 stupid source dsc)
+$(opd 'simple' unset test1 ugly abacus deb)
+$(opd 'simple' unset test1 ugly source dsc)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test1|stupid|abacus'...
+-v6*= replacing './dists/test1/stupid/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|stupid|source'...
+-v6*= replacing './dists/test1/stupid/source/Sources' (gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|abacus'...
+-v6*= replacing './dists/test1/ugly/binary-abacus/Packages' (uncompressed,gzipped,bzip2ed)
+-v6*= looking for changes in 'test1|ugly|source'...
+-v6*= replacing './dists/test1/ugly/source/Sources' (gzipped,bzip2ed)
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log1 <<EOF
+DATESTR remove test1 deb stupid abacus simple 1
+DATESTR remove test1 dsc stupid source simple 1
+DATESTR remove test1 deb ugly abacus simple 1
+DATESTR remove test1 dsc ugly source simple 1
+EOF
+testrun - -b . remove test2 simple 3<<EOF
+*=Unable to forget unknown filekey 'pool/ugly/s/simple/simple_1_abacus.deb'.
+-v0*=There have been errors!
+stdout
+$(opd 'simple' unset test2 ugly abacus deb)
+$(opd 'simple' unset test2 ugly source dsc)
+-v0=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= replacing './dists/test2/ugly/binary-abacus/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+-v6*= replacing './dists/test2/ugly/source/Sources' (uncompressed,gzipped,script: bzip.example,testhook)
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'change'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'change'
+-v0=Deleting files no longer referenced...
+$(ofd 'pool/ugly/s/simple/simple_1.dsc')
+$(ofd 'pool/ugly/s/simple/simple_1.tar.gz')
+-v1*=deleting and forgetting pool/ugly/s/simple/simple_1_abacus.deb
+returns 249
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR remove test2 deb ugly abacus simple 1
+DATESTR remove test2 dsc ugly source simple 1
+EOF
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+
+cat > broken.changes <<EOF
+Format: -1.0
+Date: yesterday
+Source: differently
+Version: 0another
+Architecture: source abacus
+Urgency: super-hyper-duper-important
+Maintainer: still me <guess@who>
+Description: missing
+Changes: missing
+Binary: none and nothing
+Distribution: test2
+Files:
+ `md5sum 4test_0b.1-1.dsc| cut -d" " -f 1` `stat -c%s 4test_0b.1-1.dsc` a b differently_0another.dsc
+ `md5sum 4test_0b.1-1_abacus.deb| cut -d" " -f 1` `stat -c%s 4test_0b.1-1_abacus.deb` a b 4test_0b.1-1_abacus.deb
+EOF
+#todo: make it work without this..
+cp 4test_0b.1-1.dsc differently_0another.dsc
+testrun - -b . include test2 broken.changes 3<<EOF
+=Looks like source but does not start with 'differently_' as I would have guessed!
+=I hope you know what you do.
+*=I don't know what to do having a .dsc without a .diff.gz or .tar.gz in 'broken.changes'!
+-v0*=There have been errors!
+returns 255
+EOF
+cat >> broken.changes <<EOF
+ `md5sum 4test_0b.1-1.tar.gz| cut -d" " -f 1` `stat -c%s 4test_0b.1-1.tar.gz` a b 4test_0b.1-1.tar.gz
+EOF
+testrun - -b . include test2 broken.changes 3<<EOF
+*=Warning: File '4test_0b.1-1.tar.gz' looks like source but does not start with 'differently_'!
+=I hope you know what you do.
+*='./pool/stupid/d/differently/4test_0b.1-1_abacus.deb' has packagename '4test' not listed in the .changes file!
+*=To ignore use --ignore=surprisingbinary.
+-v0*=There have been errors!
+stdout
+-v2*=Created directory "./pool/stupid/d"
+-v2*=Created directory "./pool/stupid/d/differently"
+$(ofa 'pool/stupid/d/differently/4test_0b.1-1.tar.gz')
+$(ofa 'pool/stupid/d/differently/4test_0b.1-1_abacus.deb')
+$(ofa 'pool/stupid/d/differently/differently_0another.dsc')
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/stupid/d/differently/4test_0b.1-1.tar.gz')
+$(ofd 'pool/stupid/d/differently/4test_0b.1-1_abacus.deb')
+$(ofd 'pool/stupid/d/differently/differently_0another.dsc')
+-v2*=removed now empty directory ./pool/stupid/d/differently
+-v2*=removed now empty directory ./pool/stupid/d
+returns 255
+EOF
+testrun - -b . --ignore=surprisingbinary include test2 broken.changes 3<<EOF
+*=Warning: File '4test_0b.1-1.tar.gz' looks like source but does not start with 'differently_'!
+=I hope you know what you do.
+*='./pool/stupid/d/differently/4test_0b.1-1_abacus.deb' has packagename '4test' not listed in the .changes file!
+*=Ignoring as --ignore=surprisingbinary given.
+*='./pool/stupid/d/differently/4test_0b.1-1_abacus.deb' lists source package '4test', but .changes says it is 'differently'!
+-v0*=There have been errors!
+stdout
+-v2*=Created directory "./pool/stupid/d"
+-v2*=Created directory "./pool/stupid/d/differently"
+$(ofa 'pool/stupid/d/differently/4test_0b.1-1.tar.gz')
+$(ofa 'pool/stupid/d/differently/4test_0b.1-1_abacus.deb')
+$(ofa 'pool/stupid/d/differently/differently_0another.dsc')
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/stupid/d/differently/4test_0b.1-1.tar.gz')
+$(ofd 'pool/stupid/d/differently/4test_0b.1-1_abacus.deb')
+$(ofd 'pool/stupid/d/differently/differently_0another.dsc')
+-v2*=removed now empty directory ./pool/stupid/d/differently
+-v2*=removed now empty directory ./pool/stupid/d
+returns 255
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+cat > broken.changes <<EOF
+Format: -1.0
+Date: yesterday
+Source: 4test
+Version: 0orso
+Architecture: source abacus
+Urgency: super-hyper-duper-important
+Maintainer: still me <guess@who>
+Description: missing
+Changes: missing
+Binary: 4test
+Distribution: test2
+Files:
+ `md5sum 4test_0b.1-1.dsc| cut -d" " -f 1` `stat -c%s 4test_0b.1-1.dsc` a b 4test_0orso.dsc
+ `md5sum 4test_0b.1-1_abacus.deb| cut -d" " -f 1` `stat -c%s 4test_0b.1-1_abacus.deb` a b 4test_0b.1-1_abacus.deb
+ `md5sum 4test_0b.1-1.tar.gz| cut -d" " -f 1` `stat -c%s 4test_0b.1-1.tar.gz` a b 4test_0b.1-1.tar.gz
+EOF
+cp 4test_0b.1-1.dsc 4test_0orso.dsc
+testrun - -b . include test2 broken.changes 3<<EOF
+*='./pool/stupid/4/4test/4test_0b.1-1_abacus.deb' lists source version '1:0b.1-1', but .changes says it is '0orso'!
+*=To ignore use --ignore=wrongsourceversion.
+-v0*=There have been errors!
+stdout
+$(ofa 'pool/stupid/4/4test/4test_0orso.dsc')
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/stupid/4/4test/4test_0orso.dsc')
+returns 255
+EOF
+testrun - -b . --ignore=wrongsourceversion include test2 broken.changes 3<<EOF
+*='./pool/stupid/4/4test/4test_0b.1-1_abacus.deb' lists source version '1:0b.1-1', but .changes says it is '0orso'!
+*=Ignoring as --ignore=wrongsourceversion given.
+*='4test_0orso.dsc' says it is version '1:0b.1-1', while .changes file said it is '0orso'
+*=To ignore use --ignore=wrongversion.
+-v0*=There have been errors!
+stdout
+$(ofa 'pool/stupid/4/4test/4test_0orso.dsc')
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/stupid/4/4test/4test_0orso.dsc')
+returns 255
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checknolog log1
+checknolog log2
+testrun - -b . --ignore=wrongsourceversion --ignore=wrongversion include test2 broken.changes 3<<EOF
+*='./pool/stupid/4/4test/4test_0b.1-1_abacus.deb' lists source version '1:0b.1-1', but .changes says it is '0orso'!
+*=Ignoring as --ignore=wrongsourceversion given.
+*='4test_0orso.dsc' says it is version '1:0b.1-1', while .changes file said it is '0orso'
+*=Ignoring as --ignore=wrongversion given.
+stdout
+$(ofa 'pool/stupid/4/4test/4test_0orso.dsc')
+$(opa '4test' x 'test2' 'stupid' 'abacus' 'deb')
+$(opa '4test' unset 'test2' 'stupid' 'source' 'dsc')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v6*= replacing './dists/test2/stupid/binary-abacus/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= replacing './dists/test2/stupid/source/Sources' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'change'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'change'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'old'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR add test2 deb stupid abacus 4test 1:0b.1-1
+DATESTR add test2 dsc stupid source 4test 1:0b.1-1
+EOF
+testrun - -b . remove test2 4test 3<<EOF
+stdout
+$(opd '4test' unset test2 stupid abacus deb)
+$(opd '4test' unset test2 stupid source dsc)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'test2|stupid|abacus'...
+-v6*= replacing './dists/test2/stupid/binary-abacus/Packages' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|stupid|coal'...
+-v6*= looking for changes in 'test2|stupid|source'...
+-v6*= replacing './dists/test2/stupid/source/Sources' (uncompressed,gzipped,script: bzip.example,testhook)
+-v6*= looking for changes in 'test2|ugly|abacus'...
+-v6*= looking for changes in 'test2|ugly|coal'...
+-v6*= looking for changes in 'test2|ugly|source'...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/stupid/4/4test/4test_0orso.dsc')
+*=testhook got 4: './dists/test2' 'stupid/binary-abacus/Packages.new' 'stupid/binary-abacus/Packages' 'change'
+*=testhook got 4: './dists/test2' 'stupid/binary-coal/Packages.new' 'stupid/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'stupid/source/Sources.new' 'stupid/source/Sources' 'change'
+*=testhook got 4: './dists/test2' 'ugly/binary-abacus/Packages.new' 'ugly/binary-abacus/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/binary-coal/Packages.new' 'ugly/binary-coal/Packages' 'old'
+*=testhook got 4: './dists/test2' 'ugly/source/Sources.new' 'ugly/source/Sources' 'old'
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+checklog log2 <<EOF
+DATESTR remove test2 deb stupid abacus 4test 1:0b.1-1
+DATESTR remove test2 dsc stupid source 4test 1:0b.1-1
+EOF
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+
+checknolog log1
+checknolog log2
+
+testout "" -b . dumptracks
+# TODO: check here for what should be here,
+# check the othe stuff, too
+#dodiff /dev/null results
+cat > conf/distributions <<EOF
+Codename: X
+Architectures: none
+Components: test
+EOF
+testrun - -b . --delete clearvanished 3<<EOF
+stderr
+-v4*=Strange, 'X|test|none' does not appear in packages.db yet.
+stdout
+*=Deleting vanished identifier 'test1|stupid|abacus'.
+*=Deleting vanished identifier 'test1|stupid|source'.
+*=Deleting vanished identifier 'test1|ugly|abacus'.
+*=Deleting vanished identifier 'test1|ugly|source'.
+*=Deleting vanished identifier 'test2|stupid|abacus'.
+*=Deleting vanished identifier 'test2|stupid|coal'.
+*=Deleting vanished identifier 'test2|stupid|source'.
+*=Deleting vanished identifier 'test2|ugly|abacus'.
+*=Deleting vanished identifier 'test2|ugly|coal'.
+*=Deleting vanished identifier 'test2|ugly|source'.
+*=Deleting tracking data for vanished distribution 'test1'.
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/stupid/4/4test/4test-addons_0b.1-1_all.deb')
+$(ofd 'pool/stupid/4/4test/4test_0b.1-1_abacus.deb')
+$(ofd 'pool/stupid/4/4test/4test_0b.1-1.dsc')
+$(ofd 'pool/stupid/4/4test/4test_0b.1-1.tar.gz')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_9.0-A:Z+a:z-0+aA.9zZ_all.deb')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofd 'pool/ugly/b/bloat+-0a9z.app/bloat+-0a9z.app_9.0-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(ofd 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app-addons_0.9-A:Z+a:z-0+aA.9zZ_all.deb')
+$(ofd 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ_abacus.deb')
+$(ofd 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.dsc')
+$(ofd 'pool/stupid/b/bloat+-0a9z.app/bloat+-0a9z.app_0.9-A:Z+a:z-0+aA.9zZ.tar.gz')
+$(ofd 'pool/stupid/s/simple/simple_1_abacus.deb')
+$(ofd 'pool/stupid/s/simple/simple_1.dsc')
+$(ofd 'pool/stupid/s/simple/simple_1.tar.gz')
+$(ofd 'pool/stupid/t/test/test-addons_1-2_all.deb')
+$(ofd 'pool/stupid/t/test/test_1-2_abacus.deb')
+$(ofd 'pool/stupid/t/test/test_1-2.dsc')
+$(ofd 'pool/stupid/t/test/test_1.orig.tar.gz')
+$(ofd 'pool/stupid/t/test/test_1-2.diff.gz')
+$(ofd 'pool/stupid/t/testb/testb-addons_2-3_all.deb')
+$(ofd 'pool/stupid/t/testb/testb_2-3_abacus.deb')
+$(ofd 'pool/stupid/t/testb/testb_2-3.dsc')
+$(ofd 'pool/stupid/t/testb/testb_2.orig.tar.gz')
+$(ofd 'pool/stupid/t/testb/testb_2-3.diff.gz')
+$(ofd 'pool/ugly/s/simple/simple-addons_1_all.deb')
+-v2*=removed now empty directory ./pool/stupid/4/4test
+-v2*=removed now empty directory ./pool/stupid/4
+-v2*=removed now empty directory ./pool/stupid/b/bloat+-0a9z.app
+-v2*=removed now empty directory ./pool/stupid/b
+-v2*=removed now empty directory ./pool/stupid/s/simple
+-v2*=removed now empty directory ./pool/stupid/s
+-v2*=removed now empty directory ./pool/stupid/t/testb
+-v2*=removed now empty directory ./pool/stupid/t/test
+-v2*=removed now empty directory ./pool/stupid/t
+-v2*=removed now empty directory ./pool/stupid
+-v2*=removed now empty directory ./pool/ugly/b/bloat+-0a9z.app
+-v2*=removed now empty directory ./pool/ugly/b
+-v2*=removed now empty directory ./pool/ugly/s/simple
+-v2*=removed now empty directory ./pool/ugly/s
+-v2*=removed now empty directory ./pool/ugly
+-v2*=removed now empty directory ./pool
+EOF
+REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check
+
+checknolog logfile
+checknolog log1
+checknolog log2
+
+testout "" -b . dumptracks
+dodiff /dev/null results
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+
+rm -r dists db conf conf2 logs lists
+rm 4test* bloat* simple* test_* test-* testb* differently* filename_version.tar.gz
+rm test1 test2 test2.changes broken.changes test.changes fakesuper db.out*
+rm results results.expected results.log.expected includeerror.rules results.sorted
+dodo test ! -d pool
+
+testsuccess
diff --git a/tests/various3.test b/tests/various3.test
new file mode 100644
index 0000000..53fab19
--- /dev/null
+++ b/tests/various3.test
@@ -0,0 +1,982 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+dodo test ! -e dists
+mkdir conf db logs lists
+
+for tracking in true false ; do
+if $tracking ; then
+echo "this is the test variant with tracking on"
+else
+echo "this is the test variant with tracking off"
+fi
+
+if $tracking ; then
+cat >> conf/distributions <<EOF
+
+Codename: a
+Architectures: abacus source
+Components: all
+Tracking: minimal
+Log: logab
+
+Codename: b
+Architectures: abacus
+Components: all
+Pull: froma
+Log: logab
+EOF
+if test x"${REPREPROOPTIONS#*--verbosedb}" != x"$REPREPROOPTIONS" ; then
+ TRACKINGTESTOPTIONS="-D t=1 -D u=1"
+else
+ TRACKINGTESTOPTIONS="-D t=0 -D u=1"
+fi
+else
+cat >> conf/distributions <<EOF
+
+Codename: a
+Architectures: abacus source
+Components: all
+Log: logab
+
+Codename: b
+Architectures: abacus
+Components: all
+Pull: froma
+Log: logab
+EOF
+TRACKINGTESTOPTIONS="-D t=0 -D u=0"
+fi
+
+checknolog logab
+cat > conf/pulls <<EOF
+Name: froma
+From: a
+Architectures: froma>toa froma>toa2 froma2>toa2
+Components: c1 c2
+UDebComponents: u1 u2
+EOF
+testrun - -b . --export=changed pull a b 3<<EOF
+stderr
+*=Error parsing ./conf/pulls, line 3, column 16: Unknown architecture 'froma' in Architectures.
+-v0*=There have been errors!
+return 255
+EOF
+cp conf/distributions conf/distributions.old
+cat >> conf/distributions <<EOF
+
+Codename: moreatoms
+Architectures: froma froma2 toa toa2
+Components: c1 c2 u1 u2
+EOF
+
+testrun - -b . --export=changed pull a b 3<<EOF
+stderr
+*=(This will simply be ignored and is not even checked when using --fast).
+*=Warning: pull rule 'froma' wants to get something from architecture 'froma',
+*=Warning: pull rule 'froma' wants to get something from architecture 'froma2',
+*=but there is no such architecture in distribution 'a'.
+*=Warning: pull rule 'froma' wants to get something from component 'c1',
+*=Warning: pull rule 'froma' wants to get something from component 'c2',
+*=but there is no such component in distribution 'a'.
+*=Warning: pull rule 'froma' wants to get something from udeb component 'u1',
+*=Warning: pull rule 'froma' wants to get something from udeb component 'u2',
+*=but there is no such udeb component in distribution 'a'.
+*=Warning: pull rule 'froma' wants to put something into architecture 'toa',
+*=but no distribution using this has such an architecture.
+*=Warning: pull rule 'froma' wants to put something into architecture 'toa2',
+*=Warning: pull rule 'froma' wants to put something into component 'c1',
+*=but no distribution using this has such an component.
+*=Warning: pull rule 'froma' wants to put something into component 'c2',
+*=Warning: pull rule 'froma' wants to put something into udeb component 'u1',
+*=but no distribution using this has such an udeb component.
+*=Warning: pull rule 'froma' wants to put something into udeb component 'u2',
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'b|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+EOF
+mv conf/distributions.old conf/distributions
+testrun - -b . clearvanished 3<<EOF
+stderr
+stdout
+*=Deleting vanished identifier 'moreatoms|c1|froma'.
+*=Deleting vanished identifier 'moreatoms|c1|froma2'.
+*=Deleting vanished identifier 'moreatoms|c1|toa'.
+*=Deleting vanished identifier 'moreatoms|c1|toa2'.
+*=Deleting vanished identifier 'moreatoms|c2|froma'.
+*=Deleting vanished identifier 'moreatoms|c2|froma2'.
+*=Deleting vanished identifier 'moreatoms|c2|toa'.
+*=Deleting vanished identifier 'moreatoms|c2|toa2'.
+*=Deleting vanished identifier 'moreatoms|u1|froma'.
+*=Deleting vanished identifier 'moreatoms|u1|froma2'.
+*=Deleting vanished identifier 'moreatoms|u1|toa'.
+*=Deleting vanished identifier 'moreatoms|u1|toa2'.
+*=Deleting vanished identifier 'moreatoms|u2|froma'.
+*=Deleting vanished identifier 'moreatoms|u2|froma2'.
+*=Deleting vanished identifier 'moreatoms|u2|toa'.
+*=Deleting vanished identifier 'moreatoms|u2|toa2'.
+EOF
+cat > conf/pulls <<EOF
+Name: froma
+From: a
+EOF
+testrun - -b . --export=changed pull a b 3<<EOF
+stderr
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'b|all|abacus'
+-v5*= looking what to get from 'a|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+EOF
+checklog logab < /dev/null
+test ! -d dists/a
+test ! -d dists/b
+testrun - -b . --export=lookedat pull b 3<<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'b|all|abacus'
+-v5*= looking what to get from 'a|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+-v0*=Exporting indices...
+-v2*=Created directory "./dists"
+-v2*=Created directory "./dists/b"
+-v2*=Created directory "./dists/b/all"
+-v2*=Created directory "./dists/b/all/binary-abacus"
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= creating './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+EOF
+checklog logab < /dev/null
+test ! -d dists/a
+test -d dists/b
+testrun - -b . --export=lookedat pull a b 3<<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'b|all|abacus'
+-v5*= looking what to get from 'a|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+-v0*=Exporting indices...
+-v6*= looking for changes in 'b|all|abacus'...
+-v2*=Created directory "./dists/a"
+-v2*=Created directory "./dists/a/all"
+-v2*=Created directory "./dists/a/all/binary-abacus"
+-v6*= looking for changes in 'a|all|abacus'...
+-v6*= creating './dists/a/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/a/all/source"
+-v6*= looking for changes in 'a|all|source'...
+-v6*= creating './dists/a/all/source/Sources' (gzipped)
+EOF
+checklog logab < /dev/null
+test -d dists/a
+test -d dists/b
+rm -r dists/a dists/b
+DISTRI=a PACKAGE=aa EPOCH="" VERSION=1 REVISION="-1" SECTION="stupid/base" genpackage.sh
+testrun - -b . --export=never --delete --delete include a test.changes 3<<EOF
+*=Warning: database 'a|all|abacus' was modified but no index file was exported.
+*=Warning: database 'a|all|source' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+stdout
+-v2*=Created directory "./pool"
+-v2*=Created directory "./pool/all"
+-v2*=Created directory "./pool/all/a"
+-v2*=Created directory "./pool/all/a/aa"
+$(ofa 'pool/all/a/aa/aa-addons_1-1_all.deb')
+$(ofa 'pool/all/a/aa/aa_1-1_abacus.deb')
+$(ofa 'pool/all/a/aa/aa_1-1.tar.gz')
+$(ofa 'pool/all/a/aa/aa_1-1.dsc')
+$(opa 'aa-addons' x 'a' 'all' 'abacus' 'deb')
+$(opa 'aa' x 'a' 'all' 'abacus' 'deb')
+$(opa 'aa' x 'a' 'all' 'source' 'dsc')
+$(otta 'a' 'aa')
+-v5*=Deleting 'test.changes'.
+EOF
+checklog logab << EOF
+DATESTR add a deb all abacus aa-addons 1-1
+DATESTR add a deb all abacus aa 1-1
+DATESTR add a dsc all source aa 1-1
+EOF
+test ! -d dists/a
+test ! -d dists/b
+test ! -f test.changes
+test ! -f aa_1-1_abacus.deb
+test ! -f aa_1-1.dsc
+test ! -f aa_1-1.tar.gz
+test ! -f aa-addons_1-1_all.deb
+test -f pool/all/a/aa/aa-addons_1-1_all.deb
+test -f pool/all/a/aa/aa_1-1_abacus.deb
+test -f pool/all/a/aa/aa_1-1.dsc
+test -f pool/all/a/aa/aa_1-1.tar.gz
+testout "" -b . dumptracks a
+cat >results.expected <<END
+Distribution: a
+Source: aa
+Version: 1-1
+Files:
+ pool/all/a/aa/aa_1-1.dsc s 1
+ pool/all/a/aa/aa_1-1.tar.gz s 1
+ pool/all/a/aa/aa_1-1_abacus.deb b 1
+ pool/all/a/aa/aa-addons_1-1_all.deb a 1
+
+END
+if $tracking; then dodiff results.expected results ; else dodiff /dev/null results ; fi
+testrun - -b . export a 3<<EOF
+stdout
+-v1*=Exporting a...
+-v2*=Created directory "./dists/a"
+-v2*=Created directory "./dists/a/all"
+-v2*=Created directory "./dists/a/all/binary-abacus"
+-v6*= exporting 'a|all|abacus'...
+-v6*= creating './dists/a/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/a/all/source"
+-v6*= exporting 'a|all|source'...
+-v6*= creating './dists/a/all/source/Sources' (gzipped)
+EOF
+checknolog logab
+dogrep "Version: 1-1" dists/a/all/binary-abacus/Packages
+rm -r dists/a
+testout - -b . dumppull b 3<<EOF
+stderr
+EOF
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+add 'aa' - '1-1' 'froma'
+add 'aa-addons' - '1-1' 'froma'
+EOF
+dodiff results results.expected
+testrun - -b . --export=changed pull a b 3<<EOF
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'b|all|abacus'
+-v5*= looking what to get from 'a|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+$(opa 'aa' x 'b' 'all' 'abacus' 'deb')
+$(opa 'aa-addons' x 'b' 'all' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/b"
+-v2*=Created directory "./dists/b/all"
+-v2*=Created directory "./dists/b/all/binary-abacus"
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= creating './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+EOF
+checklog logab << EOF
+DATESTR add b deb all abacus aa 1-1
+DATESTR add b deb all abacus aa-addons 1-1
+EOF
+test ! -d dists/a
+test -d dists/b
+dogrep "Version: 1-1" dists/b/all/binary-abacus/Packages
+DISTRI=a PACKAGE=aa EPOCH="" VERSION=1 REVISION="-2" SECTION="stupid/base" genpackage.sh
+testrun - -b . --export=changed --delete include a test.changes 3<<EOF
+stdout
+$(ofa 'pool/all/a/aa/aa-addons_1-2_all.deb')
+$(ofa 'pool/all/a/aa/aa_1-2_abacus.deb')
+$(ofa 'pool/all/a/aa/aa_1-2.tar.gz')
+$(ofa 'pool/all/a/aa/aa_1-2.dsc')
+$(opu 'aa-addons' x x 'a' 'all' 'abacus' 'deb')
+$(opu 'aa' x x 'a' 'all' 'abacus' 'deb')
+$(opu 'aa' x x 'a' 'all' 'source' 'dsc')
+$(otta 'a' 'aa')
+$(ottd 'aa' '1-1' 'a')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/a"
+-v2*=Created directory "./dists/a/all"
+-v2*=Created directory "./dists/a/all/binary-abacus"
+-v6*= looking for changes in 'a|all|abacus'...
+-v6*= creating './dists/a/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/a/all/source"
+-v6*= looking for changes in 'a|all|source'...
+-v6*= creating './dists/a/all/source/Sources' (gzipped)
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/aa/aa_1-1.dsc')
+$(ofd 'pool/all/a/aa/aa_1-1.tar.gz')
+EOF
+checklog logab << EOF
+DATESTR replace a deb all abacus aa-addons 1-2 1-1
+DATESTR replace a deb all abacus aa 1-2 1-1
+DATESTR replace a dsc all source aa 1-2 1-1
+EOF
+test -f test.changes
+test ! -f aa_1-2_abacus.deb
+test ! -f aa_1-2.dsc
+test ! -f aa_1-2.tar.gz
+test ! -f aa-addons_1-2_all.deb
+test -d dists/a
+dogrep "Version: 1-2" dists/a/all/binary-abacus/Packages
+dogrep "Version: 1-1" dists/b/all/binary-abacus/Packages
+testout "" -b . dumptracks a
+cat >results.expected <<END
+Distribution: a
+Source: aa
+Version: 1-2
+Files:
+ pool/all/a/aa/aa_1-2.dsc s 1
+ pool/all/a/aa/aa_1-2.tar.gz s 1
+ pool/all/a/aa/aa_1-2_abacus.deb b 1
+ pool/all/a/aa/aa-addons_1-2_all.deb a 1
+
+END
+if $tracking; then dodiff results.expected results ; else dodiff /dev/null results ; fi
+rm -r dists/a dists/b
+testout - -b . dumppull b 3<<EOF
+stderr
+EOF
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+update 'aa' '1-1' '1-2' 'froma'
+update 'aa-addons' '1-1' '1-2' 'froma'
+EOF
+dodiff results results.expected
+testrun - -b . --export=changed pull a b 3<<EOF
+stderr
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'b|all|abacus'
+-v5*= looking what to get from 'a|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+$(opu 'aa' x x 'b' 'all' 'abacus' 'deb')
+$(opu 'aa-addons' x x 'b' 'all' 'abacus' 'deb')
+-v0=Exporting indices...
+-v2*=Created directory "./dists/b"
+-v2*=Created directory "./dists/b/all"
+-v2*=Created directory "./dists/b/all/binary-abacus"
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= creating './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/aa/aa_1-1_abacus.deb')
+$(ofd 'pool/all/a/aa/aa-addons_1-1_all.deb')
+EOF
+checklog logab << EOF
+DATESTR replace b deb all abacus aa 1-2 1-1
+DATESTR replace b deb all abacus aa-addons 1-2 1-1
+EOF
+test ! -d dists/a
+test -d dists/b
+dogrep "Version: 1-2" dists/b/all/binary-abacus/Packages
+DISTRI=a PACKAGE=aa EPOCH="" VERSION=1 REVISION="-3" SECTION="stupid/base" genpackage.sh
+testrun - -b . --export=never include a test.changes 3<<EOF
+*=Warning: database 'a|all|abacus' was modified but no index file was exported.
+*=Warning: database 'a|all|source' was modified but no index file was exported.
+*=Changes will only be visible after the next 'export'!
+stdout
+$(ofa 'pool/all/a/aa/aa-addons_1-3_all.deb')
+$(ofa 'pool/all/a/aa/aa_1-3_abacus.deb')
+$(ofa 'pool/all/a/aa/aa_1-3.tar.gz')
+$(ofa 'pool/all/a/aa/aa_1-3.dsc')
+$(opu 'aa-addons' x x 'a' 'all' 'abacus' 'deb')
+$(opu 'aa' x x 'a' 'all' 'abacus' 'deb')
+$(opu 'aa' x x 'a' 'all' 'source' 'dsc')
+$(otta 'a' 'aa')
+$(ottd 'aa' '1-2' 'a')
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/aa/aa_1-2.dsc')
+$(ofd 'pool/all/a/aa/aa_1-2.tar.gz')
+EOF
+checklog logab << EOF
+DATESTR replace a deb all abacus aa-addons 1-3 1-2
+DATESTR replace a deb all abacus aa 1-3 1-2
+DATESTR replace a dsc all source aa 1-3 1-2
+EOF
+test -f test.changes
+test -f aa_1-3_abacus.deb
+test -f aa_1-3.dsc
+test -f aa_1-3.tar.gz
+test -f aa-addons_1-3_all.deb
+test ! -f pool/all/a/aa/aa_1-2.dsc
+test -f pool/all/a/aa/aa_1-2_abacus.deb # still in b
+testout "" -b . dumptracks a
+cat >results.expected <<END
+Distribution: a
+Source: aa
+Version: 1-3
+Files:
+ pool/all/a/aa/aa_1-3.dsc s 1
+ pool/all/a/aa/aa_1-3.tar.gz s 1
+ pool/all/a/aa/aa_1-3_abacus.deb b 1
+ pool/all/a/aa/aa-addons_1-3_all.deb a 1
+
+END
+if $tracking; then dodiff results.expected results ; else dodiff /dev/null results ; fi
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+DISTRI=a PACKAGE=ab EPOCH="" VERSION=2 REVISION="-1" SECTION="stupid/base" genpackage.sh
+testrun - -b . --delete --delete --export=never include a test.changes 3<<EOF
+stderr
+*=Warning: database 'a|all|abacus' was modified but no index file was exported.
+*=Warning: database 'a|all|source' was modified but no index file was exported.
+=Changes will only be visible after the next 'export'!
+stdout
+-v2*=Created directory "./pool/all/a/ab"
+$(ofa 'pool/all/a/ab/ab-addons_2-1_all.deb')
+$(ofa 'pool/all/a/ab/ab_2-1_abacus.deb')
+$(ofa 'pool/all/a/ab/ab_2-1.tar.gz')
+$(ofa 'pool/all/a/ab/ab_2-1.dsc')
+$(opa 'ab-addons' x 'a' 'all' 'abacus' 'deb')
+$(opa 'ab' x 'a' 'all' 'abacus' 'deb')
+$(opa 'ab' x 'a' 'all' 'source' 'dsc')
+$(otta 'a' 'ab')
+-v5*=Deleting 'test.changes'.
+EOF
+checklog logab << EOF
+DATESTR add a deb all abacus ab-addons 2-1
+DATESTR add a deb all abacus ab 2-1
+DATESTR add a dsc all source ab 2-1
+EOF
+testout - -b . dumppull b 3<<EOF
+stderr
+EOF
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+update 'aa' '1-2' '1-3' 'froma'
+update 'aa-addons' '1-2' '1-3' 'froma'
+add 'ab' - '2-1' 'froma'
+add 'ab-addons' - '2-1' 'froma'
+EOF
+dodiff results results.expected
+
+testrun - -b . --export=changed pull b 3<<EOF
+stderr
+stdout
+-v0*=Calculating packages to pull...
+-v3*= pulling into 'b|all|abacus'
+-v5*= looking what to get from 'a|all|abacus'
+-v0*=Installing (and possibly deleting) packages...
+$(opu 'aa' x x 'b' 'all' 'abacus' 'deb')
+$(opu 'aa-addons' x x 'b' 'all' 'abacus' 'deb')
+$(opa 'ab' x 'b' 'all' 'abacus' 'deb')
+$(opa 'ab-addons' x 'b' 'all' 'abacus' 'deb')
+-v0=Exporting indices...
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= replacing './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/aa/aa_1-2_abacus.deb')
+$(ofd 'pool/all/a/aa/aa-addons_1-2_all.deb')
+EOF
+checklog logab << EOF
+DATESTR replace b deb all abacus aa 1-3 1-2
+DATESTR replace b deb all abacus aa-addons 1-3 1-2
+DATESTR add b deb all abacus ab 2-1
+DATESTR add b deb all abacus ab-addons 2-1
+EOF
+testout - -b . dumppull b 3<<EOF
+stderr
+EOF
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+keep 'aa' '1-3' '1-3'
+keep 'aa-addons' '1-3' '1-3'
+keep 'ab' '2-1' '2-1'
+keep 'ab-addons' '2-1' '2-1'
+EOF
+dodiff results results.expected
+dogrep "Version: 1-3" dists/b/all/binary-abacus/Packages
+dogrep "Version: 2-1" dists/b/all/binary-abacus/Packages
+test ! -f pool/all/a/aa/aa_1-2_abacus.deb
+test -f pool/all/a/aa/aa_1-3_abacus.deb
+DISTRI=a PACKAGE=ab EPOCH="" VERSION=3 REVISION="-1" SECTION="stupid/base" genpackage.sh
+grep -v '\.tar\.gz' test.changes > broken.changes
+testrun - -b . --delete --delete include a broken.changes 3<<EOF
+*=I don't know what to do having a .dsc without a .diff.gz or .tar.gz in 'broken.changes'!
+-v0*=There have been errors!
+returns 255
+EOF
+checknolog logab
+echo " $EMPTYMD5 stupid/base superfluous ab_3-1.diff.gz" >> broken.changes
+testrun - -b . --delete --delete include a broken.changes 3<<EOF
+*=Cannot find file './ab_3-1.diff.gz' needed by 'broken.changes'!
+-v0*=There have been errors!
+returns 249
+EOF
+checknolog logab
+test -f broken.changes
+test ! -f ab_3-1.diff.gz
+test -f ab-addons_3-1_all.deb
+test -f ab_3-1_abacus.deb
+test -f ab_3-1.dsc
+test ! -f pool/all/a/ab/ab_3-1.diff.gz
+test ! -f pool/all/a/ab/ab-addons_3-1_all.deb
+test ! -f pool/all/a/ab/ab_3-1_abacus.deb
+test ! -f pool/all/a/ab/ab_3-1.dsc
+touch ab_3-1.diff.gz
+testrun - -b . --delete -T deb include a broken.changes 3<<EOF
+stdout
+$(ofa 'pool/all/a/ab/ab-addons_3-1_all.deb')
+$(ofa 'pool/all/a/ab/ab_3-1_abacus.deb')
+$(opu 'ab-addons' x x 'a' 'all' 'abacus' 'deb')
+$(opu 'ab' x x 'a' 'all' 'abacus' 'deb')
+$(otta 'a' 'ab')
+-v0*=Exporting indices...
+-v2*=Created directory "./dists/a"
+-v2*=Created directory "./dists/a/all"
+-v2*=Created directory "./dists/a/all/binary-abacus"
+-v6*= looking for changes in 'a|all|abacus'...
+-v6*= creating './dists/a/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v2*=Created directory "./dists/a/all/source"
+-v6*= looking for changes in 'a|all|source'...
+-v6*= creating './dists/a/all/source/Sources' (gzipped)
+EOF
+checklog logab <<EOF
+DATESTR replace a deb all abacus ab-addons 3-1 2-1
+DATESTR replace a deb all abacus ab 3-1 2-1
+EOF
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+test -f broken.changes
+test -f ab_3-1.diff.gz
+test ! -f ab-addons_3-1_all.deb
+test ! -f ab_3-1_abacus.deb
+test -f ab_3-1.dsc
+test ! -f pool/all/a/ab/ab_3-1.diff.gz
+test -f pool/all/a/ab/ab-addons_3-1_all.deb
+test -f pool/all/a/ab/ab_3-1_abacus.deb
+test ! -f pool/all/a/ab/ab_3-1.dsc
+testout "" -b . dumptracks a
+cat >results.expected <<END
+Distribution: a
+Source: aa
+Version: 1-3
+Files:
+ pool/all/a/aa/aa_1-3.dsc s 1
+ pool/all/a/aa/aa_1-3.tar.gz s 1
+ pool/all/a/aa/aa_1-3_abacus.deb b 1
+ pool/all/a/aa/aa-addons_1-3_all.deb a 1
+
+Distribution: a
+Source: ab
+Version: 2-1
+Files:
+ pool/all/a/ab/ab_2-1.dsc s 1
+ pool/all/a/ab/ab_2-1.tar.gz s 1
+
+Distribution: a
+Source: ab
+Version: 3-1
+Files:
+ pool/all/a/ab/ab_3-1_abacus.deb b 1
+ pool/all/a/ab/ab-addons_3-1_all.deb a 1
+
+END
+if $tracking; then dodiff results.expected results ; else dodiff /dev/null results ; fi
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+testrun - -b . --delete --delete include a broken.changes 3<<EOF
+*=Unable to find pool/all/a/ab/ab_3-1.tar.gz needed by ab_3-1.dsc!
+*=Perhaps you forgot to give dpkg-buildpackage the -sa option,
+= or you could try --ignore=missingfile to guess possible files to use.
+-v0*=There have been errors!
+stdout
+$(ofa 'pool/all/a/ab/ab_3-1.dsc')
+$(ofa 'pool/all/a/ab/ab_3-1.diff.gz')
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/all/a/ab/ab_3-1.diff.gz')
+$(ofd 'pool/all/a/ab/ab_3-1.dsc')
+returns 249
+EOF
+test -f broken.changes
+test -f ab_3-1.diff.gz
+test ! -f ab-addons_3-1_all.deb
+test ! -f ab_3-1_abacus.deb
+test -f ab_3-1.dsc
+test ! -f pool/all/a/ab/ab_3-1.diff.gz
+test -f pool/all/a/ab/ab-addons_3-1_all.deb
+test -f pool/all/a/ab/ab_3-1_abacus.deb
+test ! -f pool/all/a/ab/ab_3-1.dsc
+cat broken.changes
+testrun - -b . -T dsc --delete --delete --ignore=missingfile include a broken.changes 3<<EOF
+*=Unable to find pool/all/a/ab/ab_3-1.tar.gz!
+*=Perhaps you forgot to give dpkg-buildpackage the -sa option.
+*=--ignore=missingfile was given, searching for file...
+stdout
+$(ofa 'pool/all/a/ab/ab_3-1.tar.gz')
+$(ofa 'pool/all/a/ab/ab_3-1.diff.gz')
+$(ofa 'pool/all/a/ab/ab_3-1.dsc')
+$(opu 'ab' x x 'a' 'all' 'source' 'dsc')
+$(ottd 'ab' '2-1' 'a')
+-v0*=Deleting files just added to the pool but not used.
+-v0*=(to avoid use --keepunusednewfiles next time)
+$(ofd 'pool/all/a/ab/ab_3-1.diff.gz')
+-v5*=Deleting 'broken.changes'.
+-v0*=Exporting indices...
+-v6*= looking for changes in 'a|all|abacus'...
+-v6*= looking for changes in 'a|all|source'...
+-v6*= replacing './dists/a/all/source/Sources' (gzipped)
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/ab/ab_2-1.dsc')
+$(ofd 'pool/all/a/ab/ab_2-1.tar.gz')
+EOF
+checklog logab <<EOF
+DATESTR replace a dsc all source ab 3-1 2-1
+EOF
+test ! -f broken.changes
+test ! -f ab_3-1.diff.gz
+test ! -f ab-addons_3-1_all.deb
+test ! -f ab_3-1_abacus.deb
+test ! -f ab_3-1.dsc
+test ! -f pool/all/a/ab/ab_3-1.diff.gz
+test -f pool/all/a/ab/ab-addons_3-1_all.deb
+test -f pool/all/a/ab/ab_3-1_abacus.deb
+test -f pool/all/a/ab/ab_3-1.dsc
+testout "" -b . dumpunreferenced
+cat > results.expected << EOF
+pool/all/a/ab/ab_3-1.diff.gz
+EOF
+dodiff /dev/null results || dodiff results.expected results
+testrun - -b . deleteunreferenced 3<<EOF
+stdout
+$(ofd 'pool/all/a/ab/ab_3-1.diff.gz' true '')
+EOF
+
+DISTRI=b PACKAGE=ac EPOCH="" VERSION=1 REVISION="-1" SECTION="stupid/base" genpackage.sh
+testrun - -b . -A abacus --delete --delete --ignore=missingfile include b test.changes 3<<EOF
+stderr
+-v2*=Skipping 'ac_1-1.dsc' as architecture 'source' is not in the requested set.
+-v2*=Skipping 'ac_1-1.tar.gz' as architecture 'source' is not in the requested set.
+-v3*=Limiting 'ac-addons_1-1_all.deb' to architectures abacus as requested.
+stdout
+-v2*=Created directory "./pool/all/a/ac"
+$(ofa 'pool/all/a/ac/ac-addons_1-1_all.deb')
+$(ofa 'pool/all/a/ac/ac_1-1_abacus.deb')
+$(opa 'ac-addons' x 'b' 'all' 'abacus' 'deb')
+$(opa 'ac' x 'b' 'all' 'abacus' 'deb')
+-v5*=Deleting 'test.changes'.
+-v0*=Exporting indices...
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= replacing './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+EOF
+checklog logab <<EOF
+DATESTR add b deb all abacus ac-addons 1-1
+DATESTR add b deb all abacus ac 1-1
+EOF
+dogrep '^Package: aa$' dists/b/all/binary-abacus/Packages
+dogrep '^Package: aa-addons$' dists/b/all/binary-abacus/Packages
+dogrep '^Package: ab$' dists/b/all/binary-abacus/Packages
+dogrep '^Package: ab-addons$' dists/b/all/binary-abacus/Packages
+dogrep '^Package: ac$' dists/b/all/binary-abacus/Packages
+dogrep '^Package: ac-addons$' dists/b/all/binary-abacus/Packages
+echo "Update: - froma" >> conf/distributions
+cat >conf/updates <<END
+Name: froma
+Method: copy:$WORKDIR
+VerifyRelease: blindtrust
+Suite: a
+GetInRelease: no
+ListHook: /bin/cp
+END
+testout - -b . dumpupdate b 3<<EOF
+-v6*=aptmethod start 'copy:$WORKDIR/dists/a/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/a/Release'
+-v6*=aptmethod start 'copy:$WORKDIR/dists/a/all/binary-abacus/Packages.gz'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/a/all/binary-abacus/Packages.gz'
+-v2*=Uncompress './lists/froma_a_all_abacus_Packages.gz' into './lists/froma_a_all_abacus_Packages' using '/bin/gunzip'...
+-v6*=Called /bin/cp './lists/froma_a_all_abacus_Packages' './lists/_b_all_abacus_froma_froma_a_all_abacus_Packages'
+-v6*=Listhook successfully returned!
+EOF
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+keep 'aa' '1-3' '1-3'
+keep 'aa-addons' '1-3' '1-3'
+update 'ab' '2-1' '3-1' 'froma'
+update 'ab-addons' '2-1' '3-1' 'froma'
+delete 'ac' '1-1'
+delete 'ac-addons' '1-1'
+EOF
+dodiff results.expected results
+testrun - -b . predelete b 3<<EOF
+=WARNING: Single-Instance not yet supported!
+-v6*=aptmethod start 'copy:$WORKDIR/dists/a/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/a/Release'
+-v6*=Called /bin/cp './lists/froma_a_all_abacus_Packages' './lists/_b_all_abacus_froma_froma_a_all_abacus_Packages'
+-v6*=Listhook successfully returned!
+stdout
+-v0*=Removing obsolete or to be replaced packages...
+-v3*= processing updates for 'b|all|abacus'
+-v5*= marking everything to be deleted
+-v5*= reading './lists/_b_all_abacus_froma_froma_a_all_abacus_Packages'
+$(opd 'ab' x b all abacus deb)
+$(opd 'ab-addons' x b all abacus deb)
+$(opd 'ac' x b all abacus deb)
+$(opd 'ac-addons' x b all abacus deb)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= replacing './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v1*=Shutting down aptmethods...
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/ab/ab_2-1_abacus.deb')
+$(ofd 'pool/all/a/ab/ab-addons_2-1_all.deb')
+$(ofd 'pool/all/a/ac/ac_1-1_abacus.deb')
+$(ofd 'pool/all/a/ac/ac-addons_1-1_all.deb')
+-v2*=removed now empty directory ./pool/all/a/ac
+EOF
+testout - -b . dumpupdate b 3<<EOF
+-v6*=aptmethod start 'copy:$WORKDIR/dists/a/Release'
+-v1*=aptmethod got 'copy:$WORKDIR/dists/a/Release'
+-v6*=Called /bin/cp './lists/froma_a_all_abacus_Packages' './lists/_b_all_abacus_froma_froma_a_all_abacus_Packages'
+-v6*=Listhook successfully returned!
+EOF
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+keep 'aa' '1-3' '1-3'
+keep 'aa-addons' '1-3' '1-3'
+add 'ab' - '3-1' 'froma'
+add 'ab-addons' - '3-1' 'froma'
+EOF
+dodiff results.expected results
+checklog logab <<EOF
+DATESTR remove b deb all abacus ab 2-1
+DATESTR remove b deb all abacus ab-addons 2-1
+DATESTR remove b deb all abacus ac 1-1
+DATESTR remove b deb all abacus ac-addons 1-1
+EOF
+dogrep '^Package: aa$' dists/b/all/binary-abacus/Packages
+dogrep '^Package: aa-addons$' dists/b/all/binary-abacus/Packages
+dongrep '^Package: ab$' dists/b/all/binary-abacus/Packages
+dongrep '^Package: ab-addons$' dists/b/all/binary-abacus/Packages
+dongrep '^Package: ac$' dists/b/all/binary-abacus/Packages
+dongrep '^Package: ac-addons$' dists/b/all/binary-abacus/Packages
+test ! -f pool/all/a/ac/ac-addons_1-1_all.deb
+test ! -f pool/all/a/ab/ab_2-1_abacus.deb
+test -f pool/all/a/aa/aa_1-3_abacus.deb
+testrun - -b . copy b a ab ac 3<<EOF
+stderr
+-v0*=Will not copy as not found: ac.
+stdout
+-v9*=Adding reference to 'pool/all/a/ab/ab_3-1_abacus.deb' by 'b|all|abacus'
+-v1*=Adding 'ab' '3-1' to 'b|all|abacus'.
+-v3*=Not looking into 'a|all|source' as no matching target in 'b'!
+$(opa 'ab' x 'b' 'all' 'abacus' 'deb')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= replacing './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+EOF
+# readd?
+#-v3*=No instance of 'ab' found in 'a|all|source'!
+#-v3*=No instance of 'ac' found in 'a|all|abacus'!
+#-v3*=No instance of 'ac' found in 'a|all|source'!
+checklog logab <<EOF
+DATESTR add b deb all abacus ab 3-1
+EOF
+if $tracking ; then
+testout "" -b . dumptracks
+cat > results.expected <<EOF
+Distribution: a
+Source: aa
+Version: 1-3
+Files:
+ pool/all/a/aa/aa_1-3.dsc s 1
+ pool/all/a/aa/aa_1-3.tar.gz s 1
+ pool/all/a/aa/aa_1-3_abacus.deb b 1
+ pool/all/a/aa/aa-addons_1-3_all.deb a 1
+
+Distribution: a
+Source: ab
+Version: 3-1
+Files:
+ pool/all/a/ab/ab_3-1_abacus.deb b 1
+ pool/all/a/ab/ab-addons_3-1_all.deb a 1
+ pool/all/a/ab/ab_3-1.dsc s 1
+ pool/all/a/ab/ab_3-1.tar.gz s 1
+
+EOF
+dodiff results.expected results
+testout "" -b . dumpreferences
+sort results > results.sorted
+cat > results.expected <<EOF
+a aa 1-3 pool/all/a/aa/aa-addons_1-3_all.deb
+a aa 1-3 pool/all/a/aa/aa_1-3.dsc
+a aa 1-3 pool/all/a/aa/aa_1-3.tar.gz
+a aa 1-3 pool/all/a/aa/aa_1-3_abacus.deb
+a ab 3-1 pool/all/a/ab/ab-addons_3-1_all.deb
+a ab 3-1 pool/all/a/ab/ab_3-1.dsc
+a ab 3-1 pool/all/a/ab/ab_3-1.tar.gz
+a ab 3-1 pool/all/a/ab/ab_3-1_abacus.deb
+a|all|abacus pool/all/a/aa/aa-addons_1-3_all.deb
+a|all|abacus pool/all/a/aa/aa_1-3_abacus.deb
+a|all|abacus pool/all/a/ab/ab-addons_3-1_all.deb
+a|all|abacus pool/all/a/ab/ab_3-1_abacus.deb
+a|all|source pool/all/a/aa/aa_1-3.dsc
+a|all|source pool/all/a/aa/aa_1-3.tar.gz
+a|all|source pool/all/a/ab/ab_3-1.dsc
+a|all|source pool/all/a/ab/ab_3-1.tar.gz
+b|all|abacus pool/all/a/aa/aa-addons_1-3_all.deb
+b|all|abacus pool/all/a/aa/aa_1-3_abacus.deb
+b|all|abacus pool/all/a/ab/ab_3-1_abacus.deb
+EOF
+dodiff results.expected results.sorted
+fi
+rm -r -f db2
+cp -a db db2
+echo tracking is $tracking
+testrun - --keepunreferenced --dbdir ./db2 -b . removesrc a unknown 3<<EOF
+stderr
+-u1*=Nothing about source package 'unknown' found in the tracking data of 'a'!
+-u1*=This either means nothing from this source in this version is there,
+-u1*=or the tracking information might be out of date.
+stdout
+EOF
+testrun - --keepunreferenced --dbdir ./db2 -b . removesrc a ab 3-1 3<<EOF
+stdout
+$(opd 'ab-addons' x a all abacus deb)
+$(opd 'ab' x a all abacus deb)
+$(opd 'ab' x a all source dsc)
+$(ottd 'ab' '3-1' 'a')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'a|all|abacus'...
+-v6*= replacing './dists/a/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'a|all|source'...
+-v6*= replacing './dists/a/all/source/Sources' (gzipped)
+-v1*=3 files lost their last reference.
+-v1*=(dumpunreferenced lists such files, use deleteunreferenced to delete them.)
+EOF
+if $tracking ; then
+checklog logab <<EOF
+DATESTR remove a deb all abacus ab-addons 3-1
+DATESTR remove a deb all abacus ab 3-1
+DATESTR remove a dsc all source ab 3-1
+EOF
+else
+checklog logab <<EOF
+DATESTR remove a deb all abacus ab 3-1
+DATESTR remove a deb all abacus ab-addons 3-1
+DATESTR remove a dsc all source ab 3-1
+EOF
+fi
+rm -r db2
+cp -a db db2
+testrun - --keepunreferenced --dbdir ./db2 -b . removesrc a ab 3<<EOF
+stdout
+$(opd 'ab-addons' unset a all abacus deb)
+$(opd 'ab' unset a all abacus deb)
+$(opd 'ab' unset a all source deb)
+$(ottd 'ab' '3-1' 'a')
+-v0*=Exporting indices...
+-v6*= looking for changes in 'a|all|abacus'...
+-v6*= replacing './dists/a/all/binary-abacus/Packages' (uncompressed,gzipped)
+-v6*= looking for changes in 'a|all|source'...
+-v6*= replacing './dists/a/all/source/Sources' (gzipped)
+-v1*=3 files lost their last reference.
+-v1*=(dumpunreferenced lists such files, use deleteunreferenced to delete them.)
+EOF
+if $tracking ; then
+checklog logab <<EOF
+DATESTR remove a deb all abacus ab-addons 3-1
+DATESTR remove a deb all abacus ab 3-1
+DATESTR remove a dsc all source ab 3-1
+EOF
+else
+checklog logab <<EOF
+DATESTR remove a deb all abacus ab 3-1
+DATESTR remove a deb all abacus ab-addons 3-1
+DATESTR remove a dsc all source ab 3-1
+EOF
+fi
+testout "" --keepunreferenced --dbdir ./db2 dumppull
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+keep 'aa' '1-3' '1-3'
+keep 'aa-addons' '1-3' '1-3'
+keep 'ab' '3-1' unavailable
+EOF
+dodiff results.expected results
+testrun - --keepunreferenced --dbdir ./db2 -b . removefilter b "Version (== 1-3), Package (>> aa)" 3<<EOF
+stdout
+$(opd 'aa-addons' unset b all abacus deb)
+-v0*=Exporting indices...
+-v6*= looking for changes in 'b|all|abacus'...
+-v6*= replacing './dists/b/all/binary-abacus/Packages' (uncompressed,gzipped)
+EOF
+checklog logab <<EOF
+DATESTR remove b deb all abacus aa-addons 1-3
+EOF
+testout "" --keepunreferenced --dbdir ./db2 dumppull
+cat > results.expected <<EOF
+Updates needed for 'b|all|abacus':
+keep 'aa' '1-3' '1-3'
+add 'aa-addons' - '1-3' 'froma'
+keep 'ab' '3-1' unavailable
+EOF
+dodiff results.expected results
+if $tracking ; then
+testrun - -b . --delete removealltracks a 3<<EOF
+stdout
+-v0*=Deleting all tracks for a...
+EOF
+testout "" -b . dumptracks
+dodiff /dev/null results
+fi
+testout "" -b . dumpreferences
+sort results > results.sorted
+cat > results.expected <<EOF
+a|all|abacus pool/all/a/aa/aa-addons_1-3_all.deb
+a|all|abacus pool/all/a/aa/aa_1-3_abacus.deb
+a|all|abacus pool/all/a/ab/ab-addons_3-1_all.deb
+a|all|abacus pool/all/a/ab/ab_3-1_abacus.deb
+a|all|source pool/all/a/aa/aa_1-3.dsc
+a|all|source pool/all/a/aa/aa_1-3.tar.gz
+a|all|source pool/all/a/ab/ab_3-1.dsc
+a|all|source pool/all/a/ab/ab_3-1.tar.gz
+b|all|abacus pool/all/a/aa/aa-addons_1-3_all.deb
+b|all|abacus pool/all/a/aa/aa_1-3_abacus.deb
+b|all|abacus pool/all/a/ab/ab_3-1_abacus.deb
+EOF
+dodiff results.expected results.sorted
+cat > conf/distributions <<EOF
+Codename: X
+Architectures: none
+Components: test
+EOF
+checknolog logab
+if $tracking ; then
+testrun - -b . --delete clearvanished 3<<EOF
+-v4*=Strange, 'X|test|none' does not appear in packages.db yet.
+stdout
+*=Deleting vanished identifier 'a|all|abacus'.
+*=Deleting vanished identifier 'a|all|source'.
+*=Deleting vanished identifier 'b|all|abacus'.
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/aa/aa-addons_1-3_all.deb')
+$(ofd 'pool/all/a/aa/aa_1-3.dsc')
+$(ofd 'pool/all/a/aa/aa_1-3.tar.gz')
+$(ofd 'pool/all/a/aa/aa_1-3_abacus.deb')
+-v2*=removed now empty directory ./pool/all/a/aa
+$(ofd 'pool/all/a/ab/ab-addons_3-1_all.deb')
+$(ofd 'pool/all/a/ab/ab_3-1.dsc')
+$(ofd 'pool/all/a/ab/ab_3-1.tar.gz')
+$(ofd 'pool/all/a/ab/ab_3-1_abacus.deb')
+-v2*=removed now empty directory ./pool/all/a/ab
+-v2*=removed now empty directory ./pool/all/a
+-v2*=removed now empty directory ./pool/all
+-v2*=removed now empty directory ./pool
+EOF
+else
+testrun - -b . --delete clearvanished 3<<EOF
+# -v4*=Strange, 'X|test|none' does not appear in packages.db yet.
+stdout
+*=Deleting vanished identifier 'a|all|abacus'.
+*=Deleting vanished identifier 'a|all|source'.
+*=Deleting vanished identifier 'b|all|abacus'.
+-v0*=Deleting files no longer referenced...
+$(ofd 'pool/all/a/aa/aa-addons_1-3_all.deb')
+$(ofd 'pool/all/a/aa/aa_1-3.dsc')
+$(ofd 'pool/all/a/aa/aa_1-3.tar.gz')
+$(ofd 'pool/all/a/aa/aa_1-3_abacus.deb')
+-v2*=removed now empty directory ./pool/all/a/aa
+$(ofd 'pool/all/a/ab/ab-addons_3-1_all.deb')
+$(ofd 'pool/all/a/ab/ab_3-1.dsc')
+$(ofd 'pool/all/a/ab/ab_3-1.tar.gz')
+$(ofd 'pool/all/a/ab/ab_3-1_abacus.deb')
+-v2*=removed now empty directory ./pool/all/a/ab
+-v2*=removed now empty directory ./pool/all/a
+-v2*=removed now empty directory ./pool/all
+-v2*=removed now empty directory ./pool
+EOF
+fi
+checknolog logab
+testout "" -b . dumptracks
+dodiff /dev/null results
+testout "" -b . dumpunreferenced
+dodiff /dev/null results
+rm -r dists
+done
+rm -r db db2 conf lists logs
+rm aa* ab* ac* results.log.expected results.expected results results.sorted
+testsuccess
diff --git a/tests/verify.test b/tests/verify.test
new file mode 100644
index 0000000..949e25d
--- /dev/null
+++ b/tests/verify.test
@@ -0,0 +1,437 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir gpgtestdir
+chmod go-rwx gpgtestdir
+export GNUPGHOME="`pwd`/gpgtestdir"
+gpg --import $TESTSDIR/good.key $TESTSDIR/evil.key $TESTSDIR/expired.key $TESTSDIR/revoked.key $TESTSDIR/expiredwithsubkey-working.key $TESTSDIR/withsubkeys-works.key
+
+CURDATE="$(date +"%Y-%m-%d")"
+
+mkdir conf lists
+cat > conf/distributions <<CONFEND
+Codename: Test
+Architectures: source
+Components: everything
+Update: rule otherrule
+CONFEND
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: 111
+Suite: test
+
+Name: rule
+From: commonbase
+
+Name: otherrule
+From: commonbase
+CONFEND
+
+testrun - -b . update Test 3<<EOF
+return 255
+stdout
+$(odb)
+stderr
+*=Error: Too short key id '111' in VerifyRelease condition '111'!
+-v0*=There have been errors!
+EOF
+
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: 11111111 22222222
+Suite: test
+
+Name: rule
+From: commonbase
+
+Name: otherrule
+From: commonbase
+CONFEND
+
+testrun - -b . update Test 3<<EOF
+return 255
+stdout
+stderr
+*=Error: Space separated key-ids in VerifyRelease condition '11111111 22222222'!
+*=(Alternate keys can be separated with '|'. Do not put spaces in key-ids.)
+-v0*=There have been errors!
+EOF
+
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: 11111111
+Suite: test
+
+Name: rule
+From: commonbase
+
+Name: otherrule
+From: commonbase
+CONFEND
+
+testrun - -b . update Test 3<<EOF
+return 249
+stdout
+stderr
+*=Error: unknown key '11111111'!
+-v0*=There have been errors!
+EOF
+
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: 11111111
+
+Name: rule
+From: commonbase
+VerifyRelease: DC3C29B8|685AF714
+Suite: test
+
+Name: otherrule
+From: commonbase
+VerifyRelease: 685AF714|D04DD3D6
+Suite: test
+CONFEND
+
+mkdir test
+mkdir test/dists
+mkdir test/dists/test
+cat > test/dists/test/Release <<EOF
+Codename: test
+Components: everything
+Architectures: coal
+EOF
+
+gpg --list-secret-keys
+gpg --expert --sign --clearsign -u 60DDED5B -u D7A5D887 -u revoked@nowhere.tld --output test/dists/test/InRelease test/dists/test/Release
+gpg --expert --sign --clearsign -u 60DDED5B -u D7A5D887 -u good@nowhere.tld --output test/dists/test/InRelease.good test/dists/test/Release
+gpg --expert -a --sign --clearsign -u evil@nowhere.tld --output test/dists/test/InRelease.evil test/dists/test/Release
+
+rm -r gpgtestdir
+mkdir gpgtestdir
+chmod go-rwx gpgtestdir
+gpg --import $TESTSDIR/good.key $TESTSDIR/evil.key $TESTSDIR/expired.key $TESTSDIR/revoked.key $TESTSDIR/revoked.pkey $TESTSDIR/expiredwithsubkey.key $TESTSDIR/withsubkeys.key
+gpg --list-keys
+
+testrun - -b . update Test 3<<EOF
+return 255
+stderr
+*=VerifyRelease condition 'DC3C29B8|685AF714' lists revoked key '72F1D61F685AF714'.
+*=(To use it anyway, append it with a '!' to force usage).
+-v0*=There have been errors!
+stdout
+EOF
+
+sed -e 's/685AF714/&!/' -i conf/updates
+
+testrun - -b . update Test 3<<EOF
+return 255
+stderr
+*=VerifyRelease condition '685AF714!|D04DD3D6' lists expired key '894FA29DD04DD3D6'.
+*=(To use it anyway, append it with a '!' to force usage).
+-v0*=There have been errors!
+stdout
+EOF
+
+sed -e 's/D04DD3D6/&!/' -i conf/updates
+
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=Not accepting valid signature in './lists/commonbase_test_InRelease' with REVOKED '12D6C95C8C737389EAAF535972F1D61F685AF714'
+*=(To ignore it append a ! to the key and run reprepro with --ignore=revokedkey)
+*=ERROR: Condition '685AF714!|D04DD3D6!' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='DCAD3A286F5178E2F4B09330A573FEB160DDED5B' (signed ${CURDATE}): expired key
+*='236B4B98B5087AF4B621CB14D8A28B7FD7A5D887' (signed ${CURDATE}): valid
+*='12D6C95C8C737389EAAF535972F1D61F685AF714' (signed ${CURDATE}): key revoced
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - --ignore=revokedkey -b . update Test 3<<EOF
+return 255
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=WARNING: valid signature in './lists/commonbase_test_InRelease' with revoked '12D6C95C8C737389EAAF535972F1D61F685AF714' is accepted as requested!
+*=Missing checksums in Release file './lists/commonbase_test_InRelease'!
+-v0*=There have been errors!
+stdout
+EOF
+
+cp test/dists/test/InRelease.good test/dists/test/InRelease
+
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=ERROR: Condition '685AF714!|D04DD3D6!' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='DCAD3A286F5178E2F4B09330A573FEB160DDED5B' (signed ${CURDATE}): expired key
+*='236B4B98B5087AF4B621CB14D8A28B7FD7A5D887' (signed ${CURDATE}): valid
+*='12E94E82B6D7A883AF6EC8E980F4C43EDC3C29B8' (signed ${CURDATE}): valid
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+# different order
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: 11111111
+
+Name: rule
+From: commonbase
+VerifyRelease: 685AF714!|D04DD3D6!
+Suite: test
+
+Name: otherrule
+From: commonbase
+VerifyRelease: DC3C29B8|685AF714!
+Suite: test
+CONFEND
+
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=ERROR: Condition '685AF714!|D04DD3D6!' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='DCAD3A286F5178E2F4B09330A573FEB160DDED5B' (signed ${CURDATE}): expired key
+*='236B4B98B5087AF4B621CB14D8A28B7FD7A5D887' (signed ${CURDATE}): valid
+*='12E94E82B6D7A883AF6EC8E980F4C43EDC3C29B8' (signed ${CURDATE}): valid
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+# now subkeys:
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: F62C6D3B
+
+Name: rule
+From: commonbase
+VerifyRelease: D7A5D887
+Suite: test
+
+Name: otherrule
+From: commonbase
+Suite: test
+CONFEND
+
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=ERROR: Condition 'F62C6D3B' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='DCAD3A286F5178E2F4B09330A573FEB160DDED5B' (signed ${CURDATE}): expired key
+*='236B4B98B5087AF4B621CB14D8A28B7FD7A5D887' (signed ${CURDATE}): valid
+*='12E94E82B6D7A883AF6EC8E980F4C43EDC3C29B8' (signed ${CURDATE}): valid
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+sed -e 's/F62C6D3B/F62C6D3B+/' -i conf/updates
+
+testrun - -b . update Test 3<<EOF
+return 255
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=Missing checksums in Release file './lists/commonbase_test_InRelease'!
+-v0*=There have been errors!
+stdout
+EOF
+
+# now subkey of an expired key
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: 60DDED5B!
+
+Name: rule
+From: commonbase
+Suite: test
+
+Name: otherrule
+From: commonbase
+Suite: test
+CONFEND
+
+# gpgme no longer seems to distinguish expired and parent-expired:
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=Not accepting valid signature in './lists/commonbase_test_InRelease' with EXPIRED 'DCAD3A286F5178E2F4B09330A573FEB160DDED5B'
+*=(To ignore it append a ! to the key and run reprepro with --ignore=expiredkey)
+*=ERROR: Condition '60DDED5B!' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='DCAD3A286F5178E2F4B09330A573FEB160DDED5B' (signed ${CURDATE}): expired key
+*='236B4B98B5087AF4B621CB14D8A28B7FD7A5D887' (signed ${CURDATE}): valid
+*='12E94E82B6D7A883AF6EC8E980F4C43EDC3C29B8' (signed ${CURDATE}): valid
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+# now listing the expired key, of which we use an non-expired subkey
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: A260449A!+
+
+Name: rule
+From: commonbase
+Suite: test
+
+Name: otherrule
+From: commonbase
+Suite: test
+CONFEND
+
+# gpgme no longer seems to distinguish expired and parent-expired:
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=Not accepting valid signature in './lists/commonbase_test_InRelease' with EXPIRED 'DCAD3A286F5178E2F4B09330A573FEB160DDED5B'
+*=(To ignore it append a ! to the key and run reprepro with --ignore=expiredkey)
+*=ERROR: Condition 'A260449A!+' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='DCAD3A286F5178E2F4B09330A573FEB160DDED5B' (signed ${CURDATE}): expired key
+*='236B4B98B5087AF4B621CB14D8A28B7FD7A5D887' (signed ${CURDATE}): valid
+*='12E94E82B6D7A883AF6EC8E980F4C43EDC3C29B8' (signed ${CURDATE}): valid
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+# Now testing what happens when only signed with a totally different key:
+cp test/dists/test/InRelease.evil test/dists/test/InRelease
+
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=ERROR: Condition 'A260449A!+' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='FDC7D039CCC83CC4921112A09FA943670C672A4A' (signed ${CURDATE}): valid
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+# Now testing an expired signature:
+cat > conf/updates <<CONFEND
+Name: commonbase
+Method: file:$WORKDIR/test
+VerifyRelease: F62C6D3B+
+
+Name: rule
+From: commonbase
+VerifyRelease: F62C6D3B
+Suite: test
+
+Name: otherrule
+From: commonbase
+Suite: test
+CONFEND
+
+# expired signatures are not that easy to fake, so cat it:
+cat > test/dists/test/InRelease <<'EOF'
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Codename: test
+Components: everything
+Architectures: coal
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.12 (GNU/Linux)
+
+iKIEAQECAAwFAk+6EiEFgwABUYAACgkQFU9je/YsbTv4LgP8DkaRBhBG7+JDD1N1
+GANCsth4rzKDfpyMrttFjW6Ra9QegDdnHyLz09IL5Hyzmst4s8DQ69q2LyZaQt3+
+0C2OG9iQ2GjQt8xvppDufvymFpqTbqnGn/LeG6KjP542Su8XZxptFPT2DyPNCe0W
+Vz5f8yupwc67sAWj/qhmBEpZp9E=
+=025V
+-----END PGP SIGNATURE-----
+EOF
+
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=Not accepting valid but EXPIRED signature in './lists/commonbase_test_InRelease' with '2938A0D8CD4E20437CAE9CE4154F637BF62C6D3B'
+*=(To ignore it append a ! to the key and run reprepro with --ignore=expiredsignature)
+*=ERROR: Condition 'F62C6D3B+' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='2938A0D8CD4E20437CAE9CE4154F637BF62C6D3B' (signed 2012-05-21): expired signature (since 2012-05-22)
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+testrun - --ignore=expiredsignature -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=Not accepting valid but EXPIRED signature in './lists/commonbase_test_InRelease' with '2938A0D8CD4E20437CAE9CE4154F637BF62C6D3B'
+*=(To ignore it append a ! to the key and run reprepro with --ignore=expiredsignature)
+*=ERROR: Condition 'F62C6D3B+' not fulfilled for './lists/commonbase_test_InRelease'.
+*=Signatures in './lists/commonbase_test_InRelease':
+*='2938A0D8CD4E20437CAE9CE4154F637BF62C6D3B' (signed 2012-05-21): expired signature (since 2012-05-22)
+*=Error: Not enough signatures found for remote repository commonbase (file:${WORKDIR}/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+sed -e 's/F62C6D3B/&!/' -i conf/updates
+
+testrun - --ignore=expiredsignature -b . update Test 3<<EOF
+return 255
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=WARNING: valid but expired signature in './lists/commonbase_test_InRelease' with '2938A0D8CD4E20437CAE9CE4154F637BF62C6D3B' is accepted as requested!
+*=Missing checksums in Release file './lists/commonbase_test_InRelease'!
+-v0*=There have been errors!
+stdout
+EOF
+
+#empty file:
+cat > test/dists/test/InRelease <<EOF
+EOF
+
+testrun - -b . update Test 3<<EOF
+return 250
+stderr
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/InRelease'
+-v2*=Copy file '${WORKDIR}/test/dists/test/InRelease' to './lists/commonbase_test_InRelease'...
+*=Error: Not enough signatures found for remote repository commonbase (file:$WORKDIR/test test)!
+-v0*=There have been errors!
+stdout
+EOF
+
+rm -rf db conf gpgtestdir gpgtestdir lists test
+
+testsuccess
diff --git a/tests/withsubkeys-works.key b/tests/withsubkeys-works.key
new file mode 100644
index 0000000..792e01f
--- /dev/null
+++ b/tests/withsubkeys-works.key
@@ -0,0 +1,52 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.9 (GNU/Linux)
+
+lQHXBEnjCPkBBADbdIK4D+1lbjq1wzZSIfyHJFWKMpy26iwhS2KJqkBNcN1n3Ute
+ND9WHNuhj+n3k1saFjj7yi/18PwM7weqDPAnzp5dpSVl6OXZU0Oaf4hdk/K7hxkM
+AaW8sGxJc2OMssffU/ZIGde/62kgQSwhqK0S3BnDORdWE2eIShGkC7Ws4QARAQAB
+AAP4gjfE3ynpm1JfUzIg8RVR/9KDUOtJmHz541n8jBTzycLlznKNasZY5yGN3B9w
+tUZxo8weNLeTveID3mve+8uM/UDwcgOVJlMJXXCDCMGYontTR8yAdN2k9mh09Ejx
+ihL+KrFXY+L42YFa6CUQgzNrxvG5nG3T+NFjDKHew44LWQIA26zeTY9Qvu/+tbIa
+YaLYHbNHMCABAPV7zHdhAsgPKII6nO1Ic9e6OobNRRn89vFyWxopYFT3sjvV1ZEc
++gqKKwIA/75SuKR+INGfY/7OZBjI5tOtWW0jBSxKHHf9LbCm1uW9KNtv2yhZ88oJ
+MboLNXTNeIdAgjsxUEZnTtbumTbbIwH9FxIhrIj9q1Pb7FZ8ZP2xLSkpHsNlvHYI
++pEcGcNPfpl9D9KKK3tbyG633CAIrMtmTjioliQH0H1kyF2mxhsjOaWvtFJSZXBy
+ZXBybyBUZXN0c3VpdGUgS2V5IDYgKEZPUiBVU0UgV0lUSElOIFRFU1RTVUlURSBP
+TkxZKSA8d2l0aHN1YmtleXNAbm93aGVyZS50bGQ+iLYEEwECACAFAknjCPkCGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAVT2N79ixtO5xmA/0SkijX92Wupoyu
+zJbQfRBkPVG7TN8fkmQkKdUZCmu8CrVelZynDGmLhikdMncNv6wMaXP6+/0z3AX/
+KwuH8X66xH3nouodM/eueTMbJY3d+b3Re1vEg4xZU7h66/2zE9iXcPoHR2j2l63d
+jCZOR6Q27kBXXih1j2TAALDpFXW3sp0BuwRJ4wl1EQQAzBhaIj9XtwloZZeo5ZKE
+dahIHXOJTt8fXvtpUs+kBEje7aKFxYEZgDSkYBBjxRXII/p/Ab7dLRCMgPcRSGrg
+SXdlVSlMG8mQLm1tIelgkz/7GnMErbFeYClK7ohfWZBrW79Q5quCNlrV+qetJXJR
+D8lstf4e5r9gQDouCNSjx3MAoJh3s4vRjiXvo1lxp75zZdGtouB3BADLrg5965Vf
+GySW/MKGbIK1Rm9CextGUxd01GL1uoR4Hi9oHNW7+/QZ9ixOMKA5ggqomfp7pgEe
+s4wvhHsFIpDpNAWOKlkxlERY33x7R5XxKsgSJS/IfQct5AlbajVDFukFgKW8CWnM
+A9AD+slO9VoOKgHEDevt9Mfhur1dMoWVowP/U5f7N16vJLckL8uk2DiiHXXtNq0C
+ucLXhykPAzmw66e9y+0LHd9sj1WzB7WKYOFCR4SGuSRhfUn8HCzaOSRZYI4XVX7z
+0mq/ZvenSJm+7mBxOm5rkxCgpwEuFojz1589qFDugKhM+JfGW2H4fpevxyHmpfvK
+0oyqOc/6t39Oe/EAAKCQOt2SvprOCp9vDGQrL/O6Wg+mLwnMiOcEGAECAAkFAknj
+CXUCGwIAUgkQFU9je/YsbTtHIAQZEQIABgUCSeMJdQAKCRB0ti0y57+nom/bAJ9b
+NNsPYuDYI1twxdYLxenOoZtqWACeNAHK4UssY9o72IvPktAYzxXHcWx2lwP+MPdu
+UJBlGJGQJDqO//tdtsa8VNOg06SgGTBNDBlbGY3VVSRTxlOy4/Ubrgr9rLBA7TXo
+XvnE2wgPeNiXF17/K+E7uenSStZJCjkAYa9bBJywlxRHGatxIM08QZDuH35i+Bz9
+/FpFAfiuLKuK75ApnVNP7KSUhAtUXsIx5aP402CdAdgESeMJsQEEAOTIv3NxcTHc
+zP6iijUVIj99QHi1VPnATlBoZRpxf7mYMALiY1CKNFzx6EXiCi9XCHojspnfcSQq
+CgtB7EixxRQSCT8WR7w5Q79dtYDSlVLmqjlAll2ea8BxYAqScyDiLA5PAI7Y/ey/
+tAL0bM0qe57pZ64xHFWXIlP0faKUVPbbABEBAAEAA/0cYK4mop6YwbuHph+gf/OU
+jnOtxUg6BllwbdKEmilumurxoKUS+2GNWdAmwufigVgi1kS0A1wkUTaXuOCXD73T
+CHPcKMRp7YLZzg9jy/XlDgbPn4qRl1qa7RHPvAV5a4j4upcw+EzP2B+3z7e/zlQk
+FOuKdSYj/zgBidRwxRs9kQIA8HngrqvrRNC03IzQLZqYhRFV7AxlK2XlikFdEj0x
+XAy6ep4svwsBtHnUUfmXZL/cG4A6MNp1in4XK6WNuBmmyQIA842mH8nV/vmIwm+l
+fDTs1Y5BMoV2g9VhOixJ3WO2HQVGbYB42tBd3cw+tp5urSPESFN9rcq8U2+XE+wP
+ZRiugwIA0aBXyLN73fhkTUduTJhYOgtEl1yormrsEZHwYiNI7zLjc71p3aMZsXpA
+ZV3lcgPV83O7ESgC3z+Pbux6tA8+k6R7iQE9BBgBAgAJBQJJ4wmxAhsCAKgJEBVP
+Y3v2LG07nSAEGQECAAYFAknjCbEACgkQ2KKLf9el2Ie2bQP/T8ThPu2Seq5tR8aG
+xJJ1w0U6szTT2UIyXu8gBO3SM8wLVcBuIXdkxOshUP2Xua758tLmns5XaoDiK8am
+9E22zX1BXyjVmK/74of6yjsf+VxJJtTlgpxeFH+zI1zZxIA1TlRg2TvVIHC5oD1i
+5v1P2xugtzby4aGHTXdi8pUMKGLMegP/U61Kg6OcqEA2C6288UhsNWZzReY9mMOl
+C8z+TET6IfHnP5hd4+hZxKDWKgj0V95vbWzA/XkZIiK09wjjht6Oqw5tqq4R38D5
+kkFUr9yLlwTxMw+jipVTTFlbjjWhsnEoH16QZRQr0PT0FlSnn3CGy+sdSdq+vMpQ
+sIh1h85JVQI=
+=NWLh
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tests/withsubkeys.key b/tests/withsubkeys.key
new file mode 100644
index 0000000..823680b
--- /dev/null
+++ b/tests/withsubkeys.key
@@ -0,0 +1,52 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.9 (GNU/Linux)
+
+lQHXBEnjCPkBBADbdIK4D+1lbjq1wzZSIfyHJFWKMpy26iwhS2KJqkBNcN1n3Ute
+ND9WHNuhj+n3k1saFjj7yi/18PwM7weqDPAnzp5dpSVl6OXZU0Oaf4hdk/K7hxkM
+AaW8sGxJc2OMssffU/ZIGde/62kgQSwhqK0S3BnDORdWE2eIShGkC7Ws4QARAQAB
+AAP4gjfE3ynpm1JfUzIg8RVR/9KDUOtJmHz541n8jBTzycLlznKNasZY5yGN3B9w
+tUZxo8weNLeTveID3mve+8uM/UDwcgOVJlMJXXCDCMGYontTR8yAdN2k9mh09Ejx
+ihL+KrFXY+L42YFa6CUQgzNrxvG5nG3T+NFjDKHew44LWQIA26zeTY9Qvu/+tbIa
+YaLYHbNHMCABAPV7zHdhAsgPKII6nO1Ic9e6OobNRRn89vFyWxopYFT3sjvV1ZEc
++gqKKwIA/75SuKR+INGfY/7OZBjI5tOtWW0jBSxKHHf9LbCm1uW9KNtv2yhZ88oJ
+MboLNXTNeIdAgjsxUEZnTtbumTbbIwH9FxIhrIj9q1Pb7FZ8ZP2xLSkpHsNlvHYI
++pEcGcNPfpl9D9KKK3tbyG633CAIrMtmTjioliQH0H1kyF2mxhsjOaWvtFJSZXBy
+ZXBybyBUZXN0c3VpdGUgS2V5IDYgKEZPUiBVU0UgV0lUSElOIFRFU1RTVUlURSBP
+TkxZKSA8d2l0aHN1YmtleXNAbm93aGVyZS50bGQ+iLYEEwECACAFAknjCPkCGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAVT2N79ixtO5xmA/0SkijX92Wupoyu
+zJbQfRBkPVG7TN8fkmQkKdUZCmu8CrVelZynDGmLhikdMncNv6wMaXP6+/0z3AX/
+KwuH8X66xH3nouodM/eueTMbJY3d+b3Re1vEg4xZU7h66/2zE9iXcPoHR2j2l63d
+jCZOR6Q27kBXXih1j2TAALDpFXW3sp0BuwRJ4wl1EQQAzBhaIj9XtwloZZeo5ZKE
+dahIHXOJTt8fXvtpUs+kBEje7aKFxYEZgDSkYBBjxRXII/p/Ab7dLRCMgPcRSGrg
+SXdlVSlMG8mQLm1tIelgkz/7GnMErbFeYClK7ohfWZBrW79Q5quCNlrV+qetJXJR
+D8lstf4e5r9gQDouCNSjx3MAoJh3s4vRjiXvo1lxp75zZdGtouB3BADLrg5965Vf
+GySW/MKGbIK1Rm9CextGUxd01GL1uoR4Hi9oHNW7+/QZ9ixOMKA5ggqomfp7pgEe
+s4wvhHsFIpDpNAWOKlkxlERY33x7R5XxKsgSJS/IfQct5AlbajVDFukFgKW8CWnM
+A9AD+slO9VoOKgHEDevt9Mfhur1dMoWVowP/U5f7N16vJLckL8uk2DiiHXXtNq0C
+ucLXhykPAzmw66e9y+0LHd9sj1WzB7WKYOFCR4SGuSRhfUn8HCzaOSRZYI4XVX7z
+0mq/ZvenSJm+7mBxOm5rkxCgpwEuFojz1589qFDugKhM+JfGW2H4fpevxyHmpfvK
+0oyqOc/6t39Oe/EAAKCQOt2SvprOCp9vDGQrL/O6Wg+mLwnMiO0EGAECAA8CGwIF
+AknjCfcFCQABUgAAUkcgBBkRAgAGBQJJ4wl1AAoJEHS2LTLnv6eib9sAn1miRieV
++l3MMRA0mHQlPF89CIRrAJ94g4sj4qIJQQMQ2zJwFLYmtluSuwkQFU9je/YsbTtF
+sgQAtaP0bMzn7wmyGAWif9LUxdV2RjfEvrA3jj4V+GeoMT0V4no0eoDKuj2o2tBZ
+bWWrCXdseJ8UWoftmCErCetWy4zrsr26hwtcMB5NQIXsYzlagejMIv/89AkdnbAN
+3B70PEeIpuTZSYRP4598dSrGDQqoSpKWVCemMXEoYl0pMMWdAdgESeMJsQEEAOTI
+v3NxcTHczP6iijUVIj99QHi1VPnATlBoZRpxf7mYMALiY1CKNFzx6EXiCi9XCHoj
+spnfcSQqCgtB7EixxRQSCT8WR7w5Q79dtYDSlVLmqjlAll2ea8BxYAqScyDiLA5P
+AI7Y/ey/tAL0bM0qe57pZ64xHFWXIlP0faKUVPbbABEBAAEAA/0cYK4mop6YwbuH
+ph+gf/OUjnOtxUg6BllwbdKEmilumurxoKUS+2GNWdAmwufigVgi1kS0A1wkUTaX
+uOCXD73TCHPcKMRp7YLZzg9jy/XlDgbPn4qRl1qa7RHPvAV5a4j4upcw+EzP2B+3
+z7e/zlQkFOuKdSYj/zgBidRwxRs9kQIA8HngrqvrRNC03IzQLZqYhRFV7AxlK2Xl
+ikFdEj0xXAy6ep4svwsBtHnUUfmXZL/cG4A6MNp1in4XK6WNuBmmyQIA842mH8nV
+/vmIwm+lfDTs1Y5BMoV2g9VhOixJ3WO2HQVGbYB42tBd3cw+tp5urSPESFN9rcq8
+U2+XE+wPZRiugwIA0aBXyLN73fhkTUduTJhYOgtEl1yormrsEZHwYiNI7zLjc71p
+3aMZsXpAZV3lcgPV83O7ESgC3z+Pbux6tA8+k6R7iQE9BBgBAgAJBQJJ4wmxAhsC
+AKgJEBVPY3v2LG07nSAEGQECAAYFAknjCbEACgkQ2KKLf9el2Ie2bQP/T8ThPu2S
+eq5tR8aGxJJ1w0U6szTT2UIyXu8gBO3SM8wLVcBuIXdkxOshUP2Xua758tLmns5X
+aoDiK8am9E22zX1BXyjVmK/74of6yjsf+VxJJtTlgpxeFH+zI1zZxIA1TlRg2TvV
+IHC5oD1i5v1P2xugtzby4aGHTXdi8pUMKGLMegP/U61Kg6OcqEA2C6288UhsNWZz
+ReY9mMOlC8z+TET6IfHnP5hd4+hZxKDWKgj0V95vbWzA/XkZIiK09wjjht6Oqw5t
+qq4R38D5kkFUr9yLlwTxMw+jipVTTFlbjjWhsnEoH16QZRQr0PT0FlSnn3CGy+sd
+Sdq+vMpQsIh1h85JVQI=
+=+Gnh
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/tests/wrongarch.test b/tests/wrongarch.test
new file mode 100644
index 0000000..ab1abf7
--- /dev/null
+++ b/tests/wrongarch.test
@@ -0,0 +1,86 @@
+set -u
+. "$TESTSDIR"/test.inc
+
+mkdir conf
+cat > conf/distributions <<EOF
+Codename: test
+Architectures: a1 a2 source
+Components: main
+Update: update
+EOF
+cat > conf/updates <<EOF
+Name: update
+Architectures: a>a2 source
+Suite: test
+Method: file:${WORKDIR}/test
+IgnoreRelease: yes
+EOF
+mkdir test
+mkdir test/dists
+mkdir test/dists/test
+mkdir test/dists/test/main
+mkdir test/dists/test/main/binary-a
+mkdir test/dists/test/main/source
+
+cat > test/dists/test/main/binary-a/Packages <<EOF
+Package: fake1
+Version: 0a
+Architecture: a
+Filename: filename
+Size: 1
+MD5sum: 1111111111111111
+
+Package: fake2
+Version: 2all
+Architecture: all
+Filename: filename
+Size: 1
+MD5sum: 1111111111111111
+EOF
+cat > test/dists/test/main/source/Sources <<EOF
+Package: fake1
+Version: 0s
+Files:
+ 1111111111111111 1 somefile
+
+Package: fake2
+Version: 2s
+Files:
+ 1111111111111111 1 somefile
+EOF
+
+testrun - dumpupdate 3<<EOF
+stderr
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages.gz'
+*=aptmethod error receiving 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages.gz':
+='<File not there, apt-method suggests '${WORKDIR}/test/dists/test/main/binary-a/Packages' instead>'
+='File not found'
+='File not found - ${WORKDIR}/test/dists/test/main/binary-a/Packages.gz (2: No such file or directory)'
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages.bz2'
+*=aptmethod error receiving 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages.bz2':
+='File not found - ${WORKDIR}/test/dists/test/main/binary-a/Packages.bz2 (2: No such file or directory)'
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/test/main/source/Sources.gz'
+*=aptmethod error receiving 'file:${WORKDIR}/test/dists/test/main/source/Sources.gz':
+='File not found - ${WORKDIR}/test/dists/test/main/source/Sources.gz (2: No such file or directory)'
+='<File not there, apt-method suggests '${WORKDIR}/test/dists/test/main/source/Sources' instead>'
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/test/main/source/Sources.bz2'
+*=aptmethod error receiving 'file:${WORKDIR}/test/dists/test/main/source/Sources.bz2':
+='File not found - ${WORKDIR}/test/dists/test/main/source/Sources.bz2 (2: No such file or directory)'
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages'
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages'
+-v2*=Copy file '${WORKDIR}/test/dists/test/main/binary-a/Packages' to './lists/update_test_main_a_Packages'...
+-v6=aptmethod start 'file:${WORKDIR}/test/dists/test/main/source/Sources'
+-v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/main/source/Sources'
+-v2*=Copy file '${WORKDIR}/test/dists/test/main/source/Sources' to './lists/update_test_main_Sources'...
+stdout
+$(odb)
+-v2*=Created directory "./lists"
+*=Updates needed for 'test|main|source':
+*=add 'fake1' - '0s' 'update'
+*=add 'fake2' - '2s' 'update'
+*=Updates needed for 'test|main|a2':
+*=add 'fake2' - '2all' 'update'
+EOF
+
+rm -r conf lists test db
+testsuccess
diff --git a/tool.c b/tool.c
new file mode 100644
index 0000000..cc8579c
--- /dev/null
+++ b/tool.c
@@ -0,0 +1,3099 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2006,2007,2008,2009,2010 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <string.h>
+#include <strings.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <time.h>
+#include "error.h"
+#include "filecntl.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "dirs.h"
+#include "checksums.h"
+#include "chunks.h"
+#include "chunkedit.h"
+#include "signature.h"
+#include "debfile.h"
+#include "sourceextraction.h"
+#include "uncompression.h"
+
+/* for compatibility with used code */
+int verbose=0;
+bool interrupted(void) {
+ return false;
+}
+
+static void about(bool help) NORETURN;
+static void about(bool help) {
+ fprintf(help?stdout:stderr,
+"changestool: Modify a Debian style .changes file\n"
+"Syntax: changestool [--create] <changesfile> <commands>\n"
+"Possible commands include:\n"
+" verify\n"
+" updatechecksums [<files to update>]\n"
+" includeallsources [<files to copy from .dsc to .changes>]\n"
+" adddeb <.deb filenames>\n"
+" adddsc <.dsc filenames>\n"
+" addrawfile <filenames>\n"
+" add <filenames processed by filename suffix>\n"
+" setdistribution <distributions to list>\n"
+" dumbremove <filenames>\n"
+);
+ if (help)
+ exit(EXIT_SUCCESS);
+ else
+ exit(EXIT_FAILURE);
+}
+
+struct binaryfile {
+ struct binaryfile *next; // in binaries.files list
+ struct binary *binary; // parent
+ struct fileentry *file;
+ char *controlchunk;
+ char *name, *version, *architecture;
+ char *sourcename, *sourceversion;
+ char *maintainer;
+ char *section, *priority;
+ char *shortdescription;
+ bool hasmd5sums;
+};
+
+static void binaryfile_free(struct binaryfile *p) {
+ if (p == NULL)
+ return;
+
+ free(p->controlchunk);
+ free(p->name);
+ free(p->version);
+ free(p->architecture);
+ free(p->sourcename);
+ free(p->sourceversion);
+ free(p->maintainer);
+ free(p->section);
+ free(p->priority);
+ free(p->shortdescription);
+ free(p);
+}
+
+enum filetype { ft_UNKNOWN,
+ ft_TAR, ft_ORIG_TAR, ft_DIFF,
+#define ft_MaxInSource ft_DSC-1
+ ft_DSC, ft_DEB, ft_UDEB, ft_DDEB, ft_Count};
+#define ft_Max ft_Count-1
+
+static const struct {
+ const char *suffix;
+ size_t len;
+ bool allowcompressed;
+} typesuffix[ft_Count] = {
+ { "?", -1, false},
+ { ".tar", 4, true},
+ { ".orig.tar", 9, true},
+ { ".diff", 5, true},
+ { ".dsc", 4, false},
+ { ".deb", 4, false},
+ { ".udeb", 5, false},
+ { ".ddeb", 5, false}
+};
+
+struct dscfile {
+ struct fileentry *file;
+ char *name;
+ char *version;
+ struct strlist binaries;
+ char *maintainer;
+ char *controlchunk;
+ // hard to get:
+ char *section, *priority;
+ // TODO: check Architectures?
+ struct checksumsarray expected;
+ struct fileentry **uplink;
+ bool parsed, modified;
+};
+
+static void dscfile_free(struct dscfile *p) {
+ if (p == NULL)
+ return;
+
+ free(p->name);
+ free(p->version);
+ free(p->maintainer);
+ free(p->controlchunk);
+ free(p->section);
+ free(p->priority);
+ checksumsarray_done(&p->expected);
+ free(p->uplink);
+ free(p);
+}
+
+struct fileentry {
+ struct fileentry *next;
+ char *basename; size_t namelen;
+ char *fullfilename;
+ /* NULL means was not listed there yet: */
+ struct checksums *checksumsfromchanges,
+ *realchecksums;
+ char *section, *priority;
+ enum filetype type;
+ enum compression compression;
+ /* only if type deb or udeb */
+ struct binaryfile *deb;
+ /* only if type dsc */
+ struct dscfile *dsc;
+ int refcount;
+};
+struct changes;
+static struct fileentry *add_fileentry(struct changes *c, const char *basefilename, size_t len, bool source, /*@null@*//*@out@*/size_t *ofs_p);
+
+struct changes {
+ /* the filename of the .changes file */
+ char *filename;
+ /* directory of filename */
+ char *basedir;
+ /* Contents of the .changes file: */
+ char *name;
+ char *version;
+ char *maintainer;
+ char *control;
+ struct strlist architectures;
+ struct strlist distributions;
+ size_t binarycount;
+ struct binary {
+ char *name;
+ char *description;
+ struct binaryfile *files;
+ bool missedinheader, uncheckable;
+ } *binaries;
+ struct fileentry *files;
+ bool modified;
+};
+
+static void fileentry_free(/*@only@*/struct fileentry *f) {
+ if (f == NULL)
+ return;
+ free(f->basename);
+ free(f->fullfilename);
+ checksums_free(f->checksumsfromchanges);
+ checksums_free(f->realchecksums);
+ free(f->section);
+ free(f->priority);
+ if (f->type == ft_DEB || f->type == ft_DDEB || f->type == ft_UDEB) {
+ binaryfile_free(f->deb);
+ } else if (f->type == ft_DSC) {
+ dscfile_free(f->dsc);
+ }
+ free(f);
+}
+
+static void changes_free(struct changes *c) {
+ unsigned int i;
+
+ if (c == NULL)
+ return;
+
+ free(c->filename);
+ free(c->basedir);
+ free(c->name);
+ free(c->version);
+ free(c->maintainer);
+ free(c->control);
+ strlist_done(&c->architectures);
+ strlist_done(&c->distributions);
+ for (i = 0 ; i < c->binarycount ; i++) {
+ free(c->binaries[i].name);
+ free(c->binaries[i].description);
+ // .files belongs elsewhere
+ }
+ free(c->binaries);
+ while (c->files) {
+ struct fileentry *f = c->files;
+ c->files = f->next;
+ fileentry_free(f);
+ }
+ free(c);
+}
+
+static struct fileentry **find_fileentry(struct changes *c, const char *basefilename, size_t basenamelen, size_t *ofs_p) {
+ struct fileentry **fp = &c->files;
+ struct fileentry *f;
+ size_t ofs = 0;
+
+ while ((f=*fp) != NULL) {
+ if (f->namelen == basenamelen &&
+ strncmp(basefilename, f->basename, basenamelen) == 0) {
+ break;
+ }
+ fp = &f->next;
+ ofs++;
+ }
+ if (ofs_p != NULL)
+ *ofs_p = ofs;
+ return fp;
+}
+
+static struct fileentry *add_fileentry(struct changes *c, const char *basefilename, size_t len, bool source, size_t *ofs_p) {
+ size_t ofs = 0;
+ struct fileentry **fp = find_fileentry(c, basefilename, len, &ofs);
+ struct fileentry *f = *fp;
+
+ if (f == NULL) {
+ enum compression;
+
+ f = zNEW(struct fileentry);
+ if (FAILEDTOALLOC(f))
+ return NULL;
+ f->basename = strndup(basefilename, len);
+ f->namelen = len;
+
+ if (FAILEDTOALLOC(f->basename)) {
+ free(f);
+ return NULL;
+ }
+ *fp = f;
+
+ /* guess compression */
+ f->compression = compression_by_suffix(f->basename, &len);
+
+ /* guess type */
+ for (f->type = source?ft_MaxInSource:ft_Max ;
+ f->type > ft_UNKNOWN ; f->type--) {
+ size_t l = typesuffix[f->type].len;
+
+ if (f->compression != c_none &&
+ !typesuffix[f->type].allowcompressed)
+ continue;
+ if (len <= l)
+ continue;
+ if (strncmp(f->basename + (len-l),
+ typesuffix[f->type].suffix,
+ l) == 0)
+ break;
+ }
+ }
+ if (ofs_p != NULL)
+ *ofs_p = ofs;
+ return f;
+}
+
+static retvalue searchforfile(const char *changesdir, const char *basefilename, /*@null@*/const struct strlist *searchpath, /*@null@*/const char *searchfirstin, char **result) {
+ int i; bool found;
+ char *fullname;
+
+ if (searchfirstin != NULL) {
+ fullname = calc_dirconcat(searchfirstin, basefilename);
+ if (FAILEDTOALLOC(fullname))
+ return RET_ERROR_OOM;
+ if (isregularfile(fullname)) {
+ *result = fullname;
+ return RET_OK;
+ }
+ free(fullname);
+ }
+
+ fullname = calc_dirconcat(changesdir, basefilename);
+ if (FAILEDTOALLOC(fullname))
+ return RET_ERROR_OOM;
+
+ found = isregularfile(fullname);
+ i = 0;
+ while (!found && searchpath != NULL && i < searchpath->count) {
+ free(fullname);
+ fullname = calc_dirconcat(searchpath->values[i],
+ basefilename);
+ if (FAILEDTOALLOC(fullname))
+ return RET_ERROR_OOM;
+ if (isregularfile(fullname)) {
+ found = true;
+ break;
+ }
+ i++;
+ }
+ if (found) {
+ *result = fullname;
+ return RET_OK;
+ } else {
+ free(fullname);
+ return RET_NOTHING;
+ }
+}
+
+static retvalue findfile(const char *filename, const struct changes *c, /*@null@*/const struct strlist *searchpath, /*@null@*/const char *searchfirstin, char **result) {
+ char *fullfilename;
+
+ if (rindex(filename, '/') == NULL) {
+ retvalue r;
+
+ r = searchforfile(c->basedir, filename,
+ searchpath, searchfirstin, &fullfilename);
+ if (!RET_IS_OK(r))
+ return r;
+ } else {
+ if (!isregularfile(filename))
+ return RET_NOTHING;
+ fullfilename = strdup(filename);
+ if (FAILEDTOALLOC(fullfilename))
+ return RET_ERROR_OOM;
+ }
+ *result = fullfilename;
+ return RET_OK;
+}
+
+static retvalue add_file(struct changes *c, /*@only@*/char *basefilename, /*@only@*/char *fullfilename, enum filetype type, struct fileentry **file) {
+ size_t basenamelen = strlen(basefilename);
+ struct fileentry **fp;
+ struct fileentry *f;
+
+ fp = find_fileentry(c, basefilename, basenamelen, NULL);
+ f = *fp;
+
+ if (f != NULL) {
+ *file = f;
+ free(basefilename);
+ free(fullfilename);
+ return RET_NOTHING;
+ }
+ assert (f == NULL);
+ f = zNEW(struct fileentry);
+ if (FAILEDTOALLOC(f)) {
+ free(basefilename);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+ f->basename = basefilename;
+ f->namelen = basenamelen;
+ f->fullfilename = fullfilename;
+ f->type = type;
+ f->compression = c_none;
+
+ *fp = f;
+ *file = f;
+ return RET_OK;
+}
+
+
+static struct binary *get_binary(struct changes *c, const char *p, size_t len) {
+ unsigned int j;
+
+ for (j = 0 ; j < c->binarycount ; j++) {
+ if (strncmp(c->binaries[j].name, p, len) == 0 &&
+ c->binaries[j].name[len] == '\0')
+ break;
+ }
+ if (j == c->binarycount) {
+ char *name = strndup(p, len);
+ struct binary *n;
+
+ if (FAILEDTOALLOC(name))
+ return NULL;
+ n = realloc(c->binaries, (j+1)*sizeof(struct binary));
+ if (FAILEDTOALLOC(n)) {
+ free(name);
+ return NULL;
+ }
+ c->binaries = n;
+ c->binarycount = j+1;
+ c->binaries[j].name = name;
+ c->binaries[j].description = NULL;
+ c->binaries[j].files = NULL;
+ c->binaries[j].missedinheader = true;
+ c->binaries[j].uncheckable = false;
+ }
+ assert (j < c->binarycount);
+ return &c->binaries[j];
+}
+
+static retvalue parse_changes_description(struct changes *c, struct strlist *tmp) {
+ int i;
+
+ for (i = 0 ; i < tmp->count ; i++) {
+ struct binary *b;
+ const char *p = tmp->values[i];
+ const char *e = p;
+ const char *d;
+ while (*e != '\0' && *e != ' ' && *e != '\t')
+ e++;
+ d = e;
+ while (*d == ' ' || *d == '\t')
+ d++;
+ if (*d == '-')
+ d++;
+ while (*d == ' ' || *d == '\t')
+ d++;
+
+ b = get_binary(c, p, e-p);
+ if (FAILEDTOALLOC(b))
+ return RET_ERROR_OOM;
+
+ b->description = strdup(d);
+ if (FAILEDTOALLOC(b->description))
+ return RET_ERROR_OOM;
+ }
+ return RET_OK;
+}
+
+static retvalue parse_changes_files(struct changes *c, struct strlist filelines[cs_hashCOUNT]) {
+ int i;
+ struct fileentry *f;
+ retvalue r;
+ struct hashes *hashes;
+ struct strlist *tmp;
+ size_t ofs, count = 0;
+ enum checksumtype cs;
+
+ tmp = &filelines[cs_md5sum];
+ hashes = nzNEW(tmp->count, struct hashes);
+ if (FAILEDTOALLOC(hashes))
+ return RET_ERROR_OOM;
+
+ for (i = 0 ; i < tmp->count ; i++) {
+ char *p;
+ const char *md5start, *md5end, *sizestart, *sizeend,
+ *sectionstart, *sectionend, *priostart, *prioend,
+ *filestart, *fileend;
+ p = tmp->values[i];
+#undef xisspace
+#define xisspace(c) (c == ' ' || c == '\t')
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ md5start = p;
+ while ((*p >= '0' && *p <= '9') ||
+ (*p >= 'A' && *p <= 'F') ||
+ (*p >= 'a' && *p <= 'f')) {
+ if (*p >= 'A' && *p <= 'F')
+ (*p) += 'a' - 'A';
+ p++;
+ }
+ md5end = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ while (*p == '0' && ('0' <= p[1] && p[1] <= '9'))
+ p++;
+ sizestart = p;
+ while ((*p >= '0' && *p <= '9'))
+ p++;
+ sizeend = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ sectionstart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ sectionend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ priostart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ prioend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ filestart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ fileend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ if (*p != '\0') {
+ fprintf(stderr,
+"Unexpected sixth argument in '%s'!\n",
+ tmp->values[i]);
+ free(hashes);
+ return RET_ERROR;
+ }
+ if (fileend - filestart == 0)
+ continue;
+ f = add_fileentry(c, filestart, fileend-filestart, false, &ofs);
+ assert (ofs <= count);
+ if (ofs == count)
+ count++;
+ if (hashes[ofs].hashes[cs_md5sum].start != NULL) {
+ fprintf(stderr,
+"WARNING: Multiple occourance of '%s' in .changes file!\nIgnoring all but the first one.\n",
+ f->basename);
+ continue;
+ }
+ hashes[ofs].hashes[cs_md5sum].start = md5start;
+ hashes[ofs].hashes[cs_md5sum].len = md5end - md5start;
+ hashes[ofs].hashes[cs_length].start = sizestart;
+ hashes[ofs].hashes[cs_length].len = sizeend - sizestart;
+
+ if (sectionend - sectionstart == 1 && *sectionstart == '-') {
+ f->section = NULL;
+ } else {
+ f->section = strndup(sectionstart,
+ sectionend - sectionstart);
+ if (FAILEDTOALLOC(f->section))
+ return RET_ERROR_OOM;
+ }
+ if (prioend - priostart == 1 && *priostart == '-') {
+ f->priority = NULL;
+ } else {
+ f->priority = strndup(priostart, prioend - priostart);
+ if (FAILEDTOALLOC(f->priority))
+ return RET_ERROR_OOM;
+ }
+ }
+ const char * const hashname[cs_hashCOUNT] = {"Md5", "Sha1", "Sha256", "Sha512" };
+ for (cs = cs_firstEXTENDED ; cs < cs_hashCOUNT ; cs++) {
+ tmp = &filelines[cs];
+
+ for (i = 0 ; i < tmp->count ; i++) {
+ char *p;
+ const char *hashstart, *hashend, *sizestart, *sizeend,
+ *filestart, *fileend;
+ p = tmp->values[i];
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ hashstart = p;
+ while ((*p >= '0' && *p <= '9') ||
+ (*p >= 'A' && *p <= 'F') ||
+ (*p >= 'a' && *p <= 'f') ) {
+ if (*p >= 'A' && *p <= 'F')
+ (*p) += 'a' - 'A';
+ p++;
+ }
+ hashend = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ while (*p == '0' && ('0' <= p[1] && p[1] <= '9'))
+ p++;
+ sizestart = p;
+ while ((*p >= '0' && *p <= '9'))
+ p++;
+ sizeend = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ filestart = p;
+ while (*p !='\0' && !xisspace(*p))
+ p++;
+ fileend = p;
+ while (*p !='\0' && xisspace(*p))
+ p++;
+ if (*p != '\0') {
+ fprintf(stderr,
+"Unexpected forth argument in '%s'!\n",
+ tmp->values[i]);
+ return RET_ERROR;
+ }
+ if (fileend - filestart == 0)
+ continue;
+ f = add_fileentry(c, filestart, fileend-filestart,
+ false, &ofs);
+ assert (ofs <= count);
+ // until md5sums are no longer obligatory:
+ if (ofs == count)
+ continue;
+ if (hashes[ofs].hashes[cs].start != NULL) {
+ fprintf(stderr,
+"WARNING: Multiple occourance of '%s' in Checksums-'%s' of .changes file!\n"
+"Ignoring all but the first one.\n",
+ f->basename, hashname[cs]);
+ continue;
+ }
+ hashes[ofs].hashes[cs].start = hashstart;
+ hashes[ofs].hashes[cs].len = hashend - hashstart;
+
+ size_t sizelen = sizeend - sizestart;
+
+ if (hashes[ofs].hashes[cs_length].start == NULL) {
+ hashes[ofs].hashes[cs_length].start = sizestart;
+ hashes[ofs].hashes[cs_length].len = sizelen;
+
+ } else if (hashes[ofs].hashes[cs_length].len != sizelen
+ || memcmp(sizestart,
+ hashes[ofs].hashes[cs_length].start,
+ sizelen) != 0) {
+ fprintf(stderr,
+"Error: Contradicting file size information for '%s' ('%.*s' vs '%.*s') in .changes file\n",
+ f->basename,
+ (int)sizelen, sizestart,
+ (int)hashes[ofs].hashes[cs_length].len,
+ hashes[ofs].hashes[cs_length].start);
+ return RET_ERROR;
+ }
+ }
+ }
+ ofs = 0;
+ for (f = c->files ; f != NULL ; f = f->next, ofs++) {
+ r = checksums_initialize(&f->checksumsfromchanges,
+ hashes[ofs].hashes);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ assert (count == ofs);
+ free(hashes);
+
+ return RET_OK;
+}
+
+static retvalue read_dscfile(const char *fullfilename, struct dscfile **dsc) {
+ struct dscfile *n;
+ struct strlist filelines[cs_hashCOUNT];
+ enum checksumtype cs;
+ retvalue r;
+
+ n = zNEW(struct dscfile);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ r = signature_readsignedchunk(fullfilename, fullfilename,
+ &n->controlchunk, NULL, NULL);
+ assert (r != RET_NOTHING);
+ // TODO: can this be ignored sometimes?
+ if (RET_WAS_ERROR(r)) {
+ free(n);
+ return r;
+ }
+ r = chunk_getname(n->controlchunk, "Source", &n->name, false);
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Maintainer", &n->maintainer);
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Version", &n->version);
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(n);
+ return r;
+ }
+
+ /* usually not here, but hidden in the contents */
+ r = chunk_getvalue(n->controlchunk, "Section", &n->section);
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(n);
+ return r;
+ }
+ /* dito */
+ r = chunk_getvalue(n->controlchunk, "Priority", &n->priority);
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(n);
+ return r;
+ }
+
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ assert (source_checksum_names[cs] != NULL);
+ r = chunk_getextralinelist(n->controlchunk,
+ source_checksum_names[cs], &filelines[cs]);
+ if (r == RET_NOTHING) {
+ if (cs == cs_md5sum) {
+ fprintf(stderr,
+"Error: Missing 'Files' entry in '%s'!\n", fullfilename);
+ r = RET_ERROR;
+ }
+ strlist_init(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum) {
+ strlist_done(&filelines[cs]);
+ }
+ dscfile_free(n);
+ return r;
+ }
+ }
+ r = checksumsarray_parse(&n->expected, filelines, fullfilename);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ strlist_done(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(n);
+ return r;
+ }
+ if (n->expected.names.count > 0) {
+ n->uplink = nzNEW(n->expected.names.count, struct fileentry *);
+ if (FAILEDTOALLOC(n->uplink)) {
+ dscfile_free(n);
+ return RET_ERROR_OOM;
+ }
+ }
+ *dsc = n;
+ return RET_OK;
+}
+
+static retvalue parse_dsc(struct fileentry *dscfile, struct changes *changes) {
+ struct dscfile *n;
+ retvalue r;
+ int i;
+
+ if (dscfile->fullfilename == NULL)
+ return RET_NOTHING;
+ r = read_dscfile(dscfile->fullfilename, &n);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ for (i = 0 ; i < n->expected.names.count ; i++) {
+ const char *basefilename = n->expected.names.values[i];
+ n->uplink[i] = add_fileentry(changes,
+ basefilename, strlen(basefilename),
+ true, NULL);
+ if (FAILEDTOALLOC(n->uplink[i])) {
+ dscfile_free(n);
+ return RET_ERROR_OOM;
+ }
+ }
+ dscfile->dsc = n;
+ return RET_OK;
+}
+
+#define DSC_WRITE_FILES 1
+#define DSC_WRITE_ALL 0xFFFF
+#define flagset(a) (flags & a) != 0
+
+static retvalue write_dsc_file(struct fileentry *dscfile, unsigned int flags) {
+ struct dscfile *dsc = dscfile->dsc;
+ int i;
+ struct chunkeditfield *cef;
+ retvalue r;
+ char *control; size_t controllen;
+ struct checksums *checksums;
+ char *destfilename;
+ enum checksumtype cs;
+
+ if (flagset(DSC_WRITE_FILES)) {
+ cef = NULL;
+ for (cs = cs_hashCOUNT ; (cs--) > cs_md5sum ; ) {
+ cef = cef_newfield(source_checksum_names[cs],
+ CEF_ADD, CEF_LATE,
+ dsc->expected.names.count, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ for (i = 0 ; i < dsc->expected.names.count ; i++) {
+ const char *basefilename =
+ dsc->expected.names.values[i];
+ const char *hash, *size;
+ size_t hashlen, sizelen;
+
+ if (!checksums_gethashpart(dsc->expected.checksums[i],
+ cs, &hash, &hashlen,
+ &size, &sizelen)) {
+ assert (cs != cs_md5sum);
+ cef = cef_pop(cef);
+ break;
+ }
+ cef_setline2(cef, i, hash, hashlen,
+ size, sizelen,
+ 1, basefilename, NULL);
+ }
+ }
+ } else
+ cef = NULL;
+
+ r = chunk_edit(dsc->controlchunk, &control, &controllen, cef);
+ cef_free(cef);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (RET_IS_OK(r));
+
+ // TODO: try to add the signatures to it again...
+
+ // TODO: add options to place changed files in different directory...
+ if (dscfile->fullfilename != NULL)
+ destfilename = strdup(dscfile->fullfilename);
+ else
+ destfilename = strdup(dscfile->basename);
+ if (FAILEDTOALLOC(destfilename)) {
+ free(control);
+ return RET_ERROR_OOM;
+ }
+
+ r = checksums_replace(destfilename, control, controllen, &checksums);
+ if (RET_WAS_ERROR(r)) {
+ free(destfilename);
+ free(control);
+ return r;
+ }
+ assert (RET_IS_OK(r));
+
+ free(dscfile->fullfilename);
+ dscfile->fullfilename = destfilename;
+ checksums_free(dscfile->realchecksums);
+ dscfile->realchecksums = checksums;
+ free(dsc->controlchunk);
+ dsc->controlchunk = control;
+ return RET_OK;
+}
+
+static retvalue read_binaryfile(const char *fullfilename, struct binaryfile **result) {
+ retvalue r;
+ struct binaryfile *n;
+
+ n = zNEW(struct binaryfile);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+
+ r = extractcontrol(&n->controlchunk, fullfilename);
+ if (!RET_IS_OK(r)) {
+ free(n);
+ if (r == RET_ERROR_OOM)
+ return r;
+ else
+ return RET_NOTHING;
+ }
+
+ r = chunk_getname(n->controlchunk, "Package", &n->name, false);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Version", &n->version);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ r = chunk_getnameandversion(n->controlchunk, "Source",
+ &n->sourcename, &n->sourceversion);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Maintainer", &n->maintainer);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Architecture", &n->architecture);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Section", &n->section);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Priority", &n->priority);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ r = chunk_getvalue(n->controlchunk, "Description", &n->shortdescription);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(n);
+ return r;
+ }
+ *result = n;
+ return RET_OK;
+}
+
+static retvalue parse_deb(struct fileentry *debfile, struct changes *changes) {
+ retvalue r;
+ struct binaryfile *n;
+
+ if (debfile->fullfilename == NULL)
+ return RET_NOTHING;
+ r = read_binaryfile(debfile->fullfilename, &n);
+ if (!RET_IS_OK(r))
+ return r;
+ if (n->name != NULL) {
+ n->binary = get_binary(changes, n->name, strlen(n->name));
+ if (FAILEDTOALLOC(n->binary)) {
+ binaryfile_free(n);
+ return RET_ERROR_OOM;
+ }
+ n->next = n->binary->files;
+ n->binary->files = n;
+ }
+
+ debfile->deb = n;
+ return RET_OK;
+}
+
+static retvalue processfiles(const char *changesfilename, struct changes *changes,
+ const struct strlist *searchpath) {
+ char *dir;
+ struct fileentry *file;
+ retvalue r;
+
+ r = dirs_getdirectory(changesfilename, &dir);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ for (file = changes->files; file != NULL ; file = file->next) {
+ assert (file->fullfilename == NULL);
+
+ r = searchforfile(dir, file->basename, searchpath, NULL,
+ &file->fullfilename);
+
+ if (RET_IS_OK(r)) {
+ if (file->type == ft_DSC)
+ r = parse_dsc(file, changes);
+ else if (file->type == ft_DEB || file->type == ft_DDEB || file->type == ft_UDEB)
+ r = parse_deb(file, changes);
+ if (RET_WAS_ERROR(r)) {
+ free(dir);
+ return r;
+ }
+ }
+
+ if (r == RET_NOTHING) {
+ /* apply heuristics when not readable */
+ if (file->type == ft_DSC) {
+ } else if (file->type == ft_DEB || file->type == ft_DDEB || file->type == ft_UDEB) {
+ struct binary *b; size_t len;
+
+ len = 0;
+ while (file->basename[len] != '_' &&
+ file->basename[len] != '\0')
+ len++;
+ b = get_binary(changes, file->basename, len);
+ if (FAILEDTOALLOC(b)) {
+ free(dir);
+ return RET_ERROR_OOM;
+ }
+ b->uncheckable = true;
+ }
+ }
+ }
+ free(dir);
+ return RET_OK;
+}
+
+static retvalue parse_changes(const char *changesfile, const char *chunk, struct changes **changes, const struct strlist *searchpath) {
+ retvalue r;
+ struct strlist tmp;
+ struct strlist filelines[cs_hashCOUNT];
+ enum checksumtype cs;
+#define R if (RET_WAS_ERROR(r)) { changes_free(n); return r; }
+
+ struct changes *n = zNEW(struct changes);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->filename = strdup(changesfile);
+ if (FAILEDTOALLOC(n->filename)) {
+ changes_free(n);
+ return RET_ERROR_OOM;
+ }
+ r = dirs_getdirectory(changesfile, &n->basedir);
+ R;
+ // TODO: do getname here? trim spaces?
+ r = chunk_getvalue(chunk, "Source", &n->name);
+ R;
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Source:' field in %s!\n",
+ changesfile);
+ n->name = NULL;
+ }
+ r = chunk_getvalue(chunk, "Version", &n->version);
+ R;
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Version:' field in %s!\n",
+ changesfile);
+ n->version = NULL;
+ }
+ r = chunk_getwordlist(chunk, "Architecture", &n->architectures);
+ R;
+ if (r == RET_NOTHING)
+ strlist_init(&n->architectures);
+ r = chunk_getwordlist(chunk, "Distribution", &n->distributions);
+ R;
+ if (r == RET_NOTHING)
+ strlist_init(&n->distributions);
+ r = chunk_getvalue(chunk, "Maintainer", &n->maintainer);
+ R;
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Missing 'Maintainer:' field in %s!\n",
+ changesfile);
+ n->maintainer = NULL;
+ }
+ r = chunk_getuniqwordlist(chunk, "Binary", &tmp);
+ R;
+ if (r == RET_NOTHING) {
+ n->binaries = NULL;
+ } else {
+ int i;
+
+ assert (RET_IS_OK(r));
+ n->binaries = nzNEW(tmp.count, struct binary);
+ if (FAILEDTOALLOC(n->binaries)) {
+ changes_free(n);
+ return RET_ERROR_OOM;
+ }
+ for (i = 0 ; i < tmp.count ; i++) {
+ n->binaries[i].name = tmp.values[i];
+ }
+ n->binarycount = tmp.count;
+ free(tmp.values);
+ }
+ r = chunk_getextralinelist(chunk, "Description", &tmp);
+ R;
+ if (RET_IS_OK(r)) {
+ r = parse_changes_description(n, &tmp);
+ strlist_done(&tmp);
+ if (RET_WAS_ERROR(r)) {
+ changes_free(n);
+ return r;
+ }
+ }
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ assert (changes_checksum_names[cs] != NULL);
+ r = chunk_getextralinelist(chunk,
+ changes_checksum_names[cs], &filelines[cs]);
+ if (r == RET_NOTHING) {
+ if (cs == cs_md5sum)
+ break;
+ strlist_init(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r)) {
+ while (cs-- > cs_md5sum) {
+ strlist_done(&filelines[cs]);
+ }
+ changes_free(n);
+ return r;
+ }
+ }
+ if (cs == cs_hashCOUNT) {
+ r = parse_changes_files(n, filelines);
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ strlist_done(&filelines[cs]);
+ }
+ if (RET_WAS_ERROR(r)) {
+ changes_free(n);
+ return r;
+ }
+ }
+ r = processfiles(changesfile, n, searchpath);
+ R;
+ *changes = n;
+ return RET_OK;
+}
+
+#define CHANGES_WRITE_FILES 0x01
+#define CHANGES_WRITE_BINARIES 0x02
+#define CHANGES_WRITE_SOURCE 0x04
+#define CHANGES_WRITE_VERSION 0x08
+#define CHANGES_WRITE_ARCHITECTURES 0x10
+#define CHANGES_WRITE_MAINTAINER 0x20
+#define CHANGES_WRITE_DISTRIBUTIONS 0x40
+#define CHANGES_WRITE_ALL 0xFFFF
+
+static retvalue write_changes_file(const char *changesfilename, struct changes *c, unsigned int flags, bool fakefields) {
+ struct chunkeditfield *cef;
+ char datebuffer[100];
+ retvalue r;
+ char *control; size_t controllen;
+ unsigned int filecount = 0;
+ struct fileentry *f;
+ struct tm *tm; time_t t;
+ unsigned int i;
+ struct strlist binaries;
+ enum checksumtype cs;
+
+ strlist_init(&binaries);
+
+ for (f = c->files; f != NULL ; f = f->next) {
+ if (f->checksumsfromchanges != NULL)
+ filecount++;
+ }
+
+ if (flagset(CHANGES_WRITE_FILES)) {
+ cef = NULL;
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ cef = cef_newfield(changes_checksum_names[cs],
+ CEF_ADD, CEF_LATE, filecount, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ i = 0;
+ for (f = c->files; f != NULL ; f = f->next) {
+ const char *hash, *size;
+ size_t hashlen, sizelen;
+
+ if (f->checksumsfromchanges == NULL)
+ continue;
+ if (!checksums_gethashpart(f->checksumsfromchanges,
+ cs, &hash, &hashlen,
+ &size, &sizelen)) {
+ assert (cs != cs_md5sum);
+ cef = cef_pop(cef);
+ break;
+ }
+ if (cs == cs_md5sum)
+ cef_setline2(cef, i,
+ hash, hashlen, size, sizelen,
+ 3,
+ f->section?f->section:"-",
+ f->priority?f->priority:"-",
+ f->basename, NULL);
+ else
+ /* strange way, but as dpkg-genchanges
+ * does it this way... */
+ cef_setline2(cef, i,
+ hash, hashlen, size, sizelen,
+ 1,
+ f->basename, NULL);
+ i++;
+ }
+ assert (f != NULL || i == filecount);
+ }
+ } else {
+ cef = cef_newfield("Files", CEF_KEEP, CEF_LATE, 0, NULL);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ if (fakefields) {
+ cef = cef_newfield("Changes", CEF_ADDMISSED, CEF_LATE, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ cef_setdata(cef,
+"\n Changes information missing, as not an original .changes file");
+ } else {
+ cef = cef_newfield("Changes", CEF_KEEP, CEF_LATE, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ cef = cef_newfield("Closes", CEF_KEEP, CEF_LATE, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ if (flagset(CHANGES_WRITE_BINARIES)) {
+ unsigned int count = 0;
+ for (i = 0 ; i < c->binarycount ; i++) {
+ const struct binary *b = c->binaries + i;
+ if (b->description != NULL)
+ count++;
+ }
+ cef = cef_newfield("Description", CEF_ADD, CEF_LATE,
+ count, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ count = 0;
+ for (i = 0 ; i < c->binarycount ; i++) {
+ const struct binary *b = c->binaries + i;
+ if (b->description == NULL)
+ continue;
+ cef_setline(cef, count++, 3,
+ b->name,
+ "-",
+ b->description,
+ NULL);
+ }
+
+ }
+ // Changed-by: line
+ if (flagset(CHANGES_WRITE_MAINTAINER) && c->maintainer != NULL) {
+ cef = cef_newfield("Maintainer", CEF_ADD, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ cef_setdata(cef, c->maintainer);
+ } else {
+ cef = cef_newfield("Maintainer", CEF_KEEP, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ if (fakefields) {
+ cef = cef_newfield("Urgency", CEF_ADDMISSED, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef)) {
+ return RET_ERROR_OOM;
+ }
+ cef_setdata(cef, "low");
+ } else {
+ cef = cef_newfield("Urgency", CEF_KEEP, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ cef = cef_newfield("Distribution", CEF_KEEP, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ if (c->distributions.count > 0) {
+ if (flagset(CHANGES_WRITE_DISTRIBUTIONS))
+ cef = cef_newfield("Distribution", CEF_ADD,
+ CEF_EARLY, 0, cef);
+ else
+ cef = cef_newfield("Distribution", CEF_ADDMISSED,
+ CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ cef_setwordlist(cef, &c->distributions);
+ } else if (flagset(CHANGES_WRITE_DISTRIBUTIONS)) {
+ cef = cef_newfield("Distribution", CEF_DELETE,
+ CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ if (c->version != NULL) {
+ if (flagset(CHANGES_WRITE_VERSION))
+ cef = cef_newfield("Version", CEF_ADD,
+ CEF_EARLY, 0, cef);
+ else
+ cef = cef_newfield("Version", CEF_ADDMISSED,
+ CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ cef_setdata(cef, c->version);
+ } else if (flagset(CHANGES_WRITE_VERSION)) {
+ cef = cef_newfield("Version", CEF_DELETE,
+ CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ if (flagset(CHANGES_WRITE_ARCHITECTURES)) {
+ cef = cef_newfield("Architecture", CEF_ADD, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ cef_setwordlist(cef, &c->architectures);
+ } else {
+ cef = cef_newfield("Architecture", CEF_KEEP, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ if (flagset(CHANGES_WRITE_BINARIES)) {
+ r = strlist_init_n(c->binarycount, &binaries);
+ if (RET_WAS_ERROR(r)) {
+ cef_free(cef);
+ return r;
+ }
+ assert (RET_IS_OK(r));
+ for (i = 0 ; i < c->binarycount ; i++) {
+ const struct binary *b = c->binaries + i;
+ if (!b->missedinheader) {
+ r = strlist_add_dup(&binaries, b->name);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&binaries);
+ cef_free(cef);
+ return r;
+ }
+ }
+ }
+ cef = cef_newfield("Binary", CEF_ADD, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef)) {
+ strlist_done(&binaries);
+ return RET_ERROR_OOM;
+ }
+ cef_setwordlist(cef, &binaries);
+ } else {
+ cef = cef_newfield("Binary", CEF_KEEP, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef))
+ return RET_ERROR_OOM;
+ }
+ if (c->name != NULL) {
+ if (flagset(CHANGES_WRITE_SOURCE))
+ cef = cef_newfield("Source", CEF_ADD,
+ CEF_EARLY, 0, cef);
+ else
+ cef = cef_newfield("Source", CEF_ADDMISSED,
+ CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef)) {
+ strlist_done(&binaries);
+ return RET_ERROR_OOM;
+ }
+ cef_setdata(cef, c->name);
+ } else if (flagset(CHANGES_WRITE_SOURCE)) {
+ cef = cef_newfield("Source", CEF_DELETE,
+ CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef)) {
+ strlist_done(&binaries);
+ return RET_ERROR_OOM;
+ }
+ }
+ // TODO: if localized make sure this uses C locale....
+ t = time(NULL);
+ if ((tm = localtime(&t)) != NULL &&
+ strftime(datebuffer, sizeof(datebuffer)-1,
+ "%a, %e %b %Y %H:%M:%S %Z", tm) > 0) {
+ cef = cef_newfield("Date", CEF_ADD, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef)) {
+ strlist_done(&binaries);
+ return RET_ERROR_OOM;
+ }
+ cef_setdata(cef, datebuffer);
+ } else {
+ cef = cef_newfield("Date", CEF_DELETE,
+ CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef)) {
+ strlist_done(&binaries);
+ return RET_ERROR_OOM;
+ }
+ }
+ cef = cef_newfield("Format", CEF_ADDMISSED, CEF_EARLY, 0, cef);
+ if (FAILEDTOALLOC(cef)) {
+ strlist_done(&binaries);
+ return RET_ERROR_OOM;
+ }
+ cef_setdata(cef, "1.7");
+
+ r = chunk_edit((c->control==NULL)?"":c->control, &control, &controllen,
+ cef);
+ strlist_done(&binaries);
+ cef_free(cef);
+ if (RET_WAS_ERROR(r))
+ return r;
+ assert (RET_IS_OK(r));
+
+ // TODO: try to add the signatures to it again...
+
+ // TODO: add options to place changed files in different directory...
+
+ r = checksums_replace(changesfilename, control, controllen, NULL);
+ if (RET_WAS_ERROR(r)) {
+ free(control);
+ return r;
+ }
+ assert (RET_IS_OK(r));
+
+ free(c->control);
+ c->control = control;
+ return RET_OK;
+}
+
+static retvalue getchecksums(struct changes *changes) {
+ struct fileentry *file;
+ retvalue r;
+
+ for (file = changes->files; file != NULL ; file = file->next) {
+
+ if (file->fullfilename == NULL)
+ continue;
+ assert (file->realchecksums == NULL);
+
+ r = checksums_read(file->fullfilename, &file->realchecksums);
+ if (r == RET_ERROR_OOM)
+ return r;
+ else if (!RET_IS_OK(r)) {
+ // assume everything else is not fatal and means
+ // a file not readable...
+ file->realchecksums = NULL;
+ }
+ }
+ return RET_OK;
+}
+
+static bool may_be_type(const char *name, enum filetype ft) {
+ enum compression c;
+ size_t len = strlen(name);
+
+ c = compression_by_suffix(name, &len);
+ if (c != c_none && !typesuffix[ft].allowcompressed)
+ return false;
+ return strncmp(name + (len - typesuffix[ft].len),
+ typesuffix[ft].suffix,
+ typesuffix[ft].len) == 0;
+}
+
+static void verify_sourcefile_checksums(struct dscfile *dsc, int i, const char *dscfile) {
+ const struct fileentry * const file = dsc->uplink[i];
+ const struct checksums * const expectedchecksums
+ = dsc->expected.checksums[i];
+ const char * const basefilename = dsc->expected.names.values[i];
+ assert (file != NULL);
+
+ if (file->checksumsfromchanges == NULL) {
+ if (may_be_type(basefilename, ft_ORIG_TAR)) {
+ fprintf(stderr,
+"Not checking checksums of '%s', as not included in .changes file.\n",
+ basefilename);
+ return;
+ } else if (file->realchecksums == NULL) {
+ fprintf(stderr,
+"ERROR: File '%s' mentioned in '%s' was not found and is not mentioned in the .changes!\n",
+ basefilename, dscfile);
+ return;
+ }
+ }
+ if (file->realchecksums == NULL)
+ /* there will be an message later about that */
+ return;
+ if (checksums_check(expectedchecksums, file->realchecksums, NULL))
+ return;
+
+ if (file->checksumsfromchanges != NULL &&
+ checksums_check(expectedchecksums, file->checksumsfromchanges, NULL))
+ fprintf(stderr,
+"ERROR: checksums of '%s' differ from the ones listed in both '%s' and the .changes file!\n",
+ basefilename, dscfile);
+ else {
+ fprintf(stderr,
+"ERROR: checksums of '%s' differ from those listed in '%s':\n!\n",
+ basefilename, dscfile);
+ checksums_printdifferences(stderr,
+ expectedchecksums, file->realchecksums);
+ }
+}
+
+static void verify_binary_name(const char *basefilename, const char *name, const char *version, const char *architecture, enum filetype type, enum compression c) {
+ size_t nlen, vlen, alen, slen;
+ const char *versionwithoutepoch;
+
+ if (name == NULL)
+ return;
+ nlen = strlen(name);
+ if (strncmp(basefilename, name, nlen) != 0 || basefilename[nlen] != '_') {
+ fprintf(stderr,
+"ERROR: '%s' does not start with '%s_' as expected!\n",
+ basefilename, name);
+ return;
+ }
+ if (version == NULL)
+ return;
+ versionwithoutepoch = strchr(version, ':');
+ if (versionwithoutepoch == NULL)
+ versionwithoutepoch = version;
+ else
+ versionwithoutepoch++;
+ vlen = strlen(versionwithoutepoch);
+ if (strncmp(basefilename+nlen+1, versionwithoutepoch, vlen) != 0
+ || basefilename[nlen+1+vlen] != '_') {
+ fprintf(stderr,
+"ERROR: '%s' does not start with '%s_%s_' as expected!\n",
+ basefilename, name, version);
+ return;
+ }
+ if (architecture == NULL)
+ return;
+ alen = strlen(architecture);
+ slen = typesuffix[type].len;
+ if (strncmp(basefilename+nlen+1+vlen+1, architecture, alen) != 0
+ || strncmp(basefilename+nlen+1+vlen+1+alen,
+ typesuffix[type].suffix, slen) != 0
+ || strcmp(basefilename+nlen+1+vlen+1+alen+slen,
+ uncompression_suffix[c]) != 0)
+ fprintf(stderr,
+"ERROR: '%s' is not called '%s_%s_%s%s%s' as expected!\n",
+ basefilename, name, versionwithoutepoch,
+ architecture, typesuffix[type].suffix,
+ uncompression_suffix[c]);
+}
+
+static retvalue verify(const char *changesfilename, struct changes *changes) {
+ retvalue r;
+ struct fileentry *file;
+ size_t k;
+
+ printf("Checking Source packages...\n");
+ for (file = changes->files; file != NULL ; file = file->next) {
+ const char *name, *version, *p;
+ size_t namelen, versionlen, l;
+ bool has_tar, has_diff, has_orig, has_format_tar;
+ int i;
+
+ if (file->type != ft_DSC)
+ continue;
+ if (!strlist_in(&changes->architectures, "source")) {
+ fprintf(stderr,
+"ERROR: '%s' contains a .dsc, but does not list Architecture 'source'!\n",
+ changesfilename);
+ }
+ if (file->fullfilename == NULL) {
+ fprintf(stderr,
+"ERROR: Could not find '%s'!\n", file->basename);
+ continue;
+ }
+ if (file->dsc == NULL) {
+ fprintf(stderr,
+"WARNING: Could not read '%s', thus it cannot be checked!\n",
+ file->fullfilename);
+ continue;
+ }
+ if (file->dsc->name == NULL)
+ fprintf(stderr,
+"ERROR: '%s' does not contain a 'Source:' header!\n", file->fullfilename);
+ else if (changes->name != NULL &&
+ strcmp(changes->name, file->dsc->name) != 0)
+ fprintf(stderr,
+"ERROR: '%s' lists Source '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ file->dsc->name, changes->name);
+ if (file->dsc->version == NULL)
+ fprintf(stderr,
+"ERROR: '%s' does not contain a 'Version:' header!\n", file->fullfilename);
+ else if (changes->version != NULL &&
+ strcmp(changes->version,
+ file->dsc->version) != 0)
+ fprintf(stderr,
+"ERROR: '%s' lists Version '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ file->dsc->version, changes->version);
+ if (file->dsc->maintainer == NULL)
+ fprintf(stderr,
+"ERROR: No maintainer specified in '%s'!\n", file->fullfilename);
+ else if (changes->maintainer != NULL &&
+ strcmp(changes->maintainer,
+ file->dsc->maintainer) != 0)
+ fprintf(stderr,
+"Warning: '%s' lists Maintainer '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ file->dsc->maintainer, changes->maintainer);
+ if (file->dsc->section != NULL && file->section != NULL &&
+ strcmp(file->section, file->dsc->section) != 0)
+ fprintf(stderr,
+"Warning: '%s' has Section '%s' while .changes says it is '%s'!\n",
+ file->fullfilename,
+ file->dsc->section, file->section);
+ if (file->dsc->priority != NULL && file->priority != NULL
+ && strcmp(file->priority,
+ file->dsc->priority) != 0)
+ fprintf(stderr,
+"Warning: '%s' has Priority '%s' while .changes says it is '%s'!\n",
+ file->fullfilename,
+ file->dsc->priority, file->priority);
+ // Todo: check types of files it contains...
+ // check names are sensible
+ p = file->basename;
+ while (*p != '\0' && *p != '_')
+ p++;
+ if (*p == '_') {
+ l = strlen(p+1);
+ assert (l >= 4); /* It ends in ".dsc" to come here */
+ } else
+ l = 0;
+
+ if (file->dsc->name != NULL) {
+ name = file->dsc->name;
+ namelen = strlen(name);
+ } else {
+ // TODO: more believe file name or changes name?
+ if (changes->name != NULL) {
+ name = changes->name;
+ namelen = strlen(name);
+ } else {
+ if (*p != '_') {
+ name = NULL;
+ namelen = 0;
+ fprintf(stderr,
+"Warning: '%s' does not contain a '_' separating name and version!\n",
+ file->basename);
+ }else {
+ name = file->basename;
+ namelen = p-name;
+ }
+ }
+ }
+ if (file->dsc->version != NULL) {
+ version = file->dsc->version;
+ versionlen = strlen(version);
+ } else {
+ // TODO: dito
+ if (changes->version != NULL) {
+ version = changes->version;
+ versionlen = strlen(version);
+ } else {
+ if (*p != '_') {
+ version = NULL;
+ SETBUTNOTUSED( versionlen = 0; )
+ if (name != NULL)
+ fprintf(stderr,
+"ERROR: '%s' does not contain a '_' separating name and version!\n",
+ file->basename);
+ } else {
+ version = p+1;
+ versionlen = l-4;
+ }
+ }
+ }
+ if (version != NULL) {
+ const char *colon = strchr(version, ':');
+ if (colon != NULL) {
+ colon++;
+ versionlen -= (colon-version);
+ version = colon;
+ }
+ }
+ if (name != NULL && version != NULL) {
+ if (*p != '_'
+ || (size_t)(p-file->basename) != namelen || l-4 != versionlen
+ || strncmp(p+1, version, versionlen) != 0
+ || strncmp(file->basename, name, namelen) != 0)
+ fprintf(stderr,
+"ERROR: '%s' is not called '%*s_%*s.dsc' as expected!\n",
+ file->basename,
+ (unsigned int)namelen, name,
+ (unsigned int)versionlen, version);
+ }
+ has_tar = false;
+ has_format_tar = false;
+ has_diff = false;
+ has_orig = false;
+ for (i = 0 ; i < file->dsc->expected.names.count ; i++) {
+ const char *basefilename
+ = file->dsc->expected.names.values[i];
+ const struct fileentry *sfile = file->dsc->uplink[i];
+ size_t expectedversionlen, expectedformatlen;
+ const char *expectedformat;
+ bool istar = false, versionok;
+
+ switch (sfile->type) {
+ case ft_UNKNOWN:
+ fprintf(stderr,
+"ERROR: '%s' lists a file '%s' with unrecognized suffix!\n",
+ file->fullfilename,
+ basefilename);
+ break;
+ case ft_TAR:
+ istar = true;
+ has_tar = true;
+ break;
+ case ft_ORIG_TAR:
+ if (has_orig)
+ fprintf(stderr,
+"ERROR: '%s' lists multiple .orig..tar files!\n",
+ file->fullfilename);
+ has_orig = true;
+ break;
+ case ft_DIFF:
+ if (has_diff)
+ fprintf(stderr,
+"ERROR: '%s' lists multiple .diff files!\n",
+ file->fullfilename);
+ has_diff = true;
+ break;
+ default:
+ assert (sfile->type == ft_UNKNOWN);
+ }
+
+ if (name == NULL) // TODO: try extracting it from this
+ continue;
+ if (strncmp(sfile->basename, name, namelen) != 0
+ || sfile->basename[namelen] != '_') {
+ fprintf(stderr,
+"ERROR: '%s' does not begin with '%*s_' as expected!\n",
+ sfile->basename,
+ (unsigned int)namelen, name);
+ /* cannot check further */
+ continue;
+ }
+
+ if (version == NULL)
+ continue;
+ /* versionlen is now always initialized */
+
+ if (sfile->type == ft_ORIG_TAR) {
+ const char *q, *revision;
+ revision = NULL;
+ for (q = version; *q != '\0'; q++) {
+ if (*q == '-')
+ revision = q;
+ }
+ if (revision == NULL)
+ expectedversionlen = versionlen;
+ else
+ expectedversionlen = revision - version;
+ } else
+ expectedversionlen = versionlen;
+
+ versionok = strncmp(sfile->basename+namelen+1,
+ version, expectedversionlen) == 0;
+ if (istar) {
+ if (!versionok) {
+ fprintf(stderr,
+"ERROR: '%s' does not start with '%*s_%*s' as expected!\n",
+ sfile->basename,
+ (unsigned int)namelen, name,
+ (unsigned int)expectedversionlen,
+ version);
+ continue;
+ }
+ expectedformat = sfile->basename + namelen + 1 +
+ expectedversionlen;
+ if (strncmp(expectedformat, ".tar.", 5) == 0)
+ expectedformatlen = 0;
+ else {
+ const char *dot;
+
+ dot = strchr(expectedformat + 1, '.');
+ if (dot == NULL)
+ expectedformatlen = 0;
+ else {
+ expectedformatlen =
+ dot - expectedformat;
+ has_format_tar = true;
+ }
+ }
+ } else {
+ expectedformat = "";
+ expectedformatlen = 0;
+ }
+
+ if (sfile->type == ft_UNKNOWN)
+ continue;
+ if (versionok
+ && strncmp(sfile->basename+namelen+1
+ +expectedversionlen
+ +expectedformatlen,
+ typesuffix[sfile->type].suffix,
+ typesuffix[sfile->type].len) == 0
+ && strcmp(sfile->basename+namelen+1
+ +expectedversionlen
+ +expectedformatlen
+ +typesuffix[sfile->type].len,
+ uncompression_suffix[sfile->compression])
+ == 0)
+ continue;
+ fprintf(stderr,
+"ERROR: '%s' is not called '%.*s_%.*s%.*s%s%s' as expected!\n",
+ sfile->basename,
+ (unsigned int)namelen, name,
+ (unsigned int)expectedversionlen,
+ version,
+ (unsigned int)expectedformatlen,
+ expectedformat,
+ typesuffix[sfile->type].suffix,
+ uncompression_suffix[sfile->compression]);
+ }
+ if (!has_tar && !has_orig)
+ if (has_diff)
+ fprintf(stderr,
+"ERROR: '%s' lists only a .diff, but no .orig.tar!\n",
+ file->fullfilename);
+ else
+ fprintf(stderr,
+"ERROR: '%s' lists no source files!\n",
+ file->fullfilename);
+ else if (has_diff && !has_orig)
+ fprintf(stderr,
+"ERROR: '%s' lists a .diff, but the .tar is not called .orig.tar!\n",
+ file->fullfilename);
+ else if (!has_format_tar && !has_diff && has_orig)
+ fprintf(stderr,
+"ERROR: '%s' lists a .orig.tar, but no .diff!\n",
+ file->fullfilename);
+ }
+ printf("Checking Binary consistency...\n");
+ for (k = 0 ; k < changes->binarycount ; k++) {
+ struct binary *b = &changes->binaries[k];
+
+ if (b->files == NULL && !b->uncheckable) {
+ /* no files - not even conjectured -,
+ * headers must be wrong */
+
+ if (b->description != NULL && !b->missedinheader) {
+ fprintf(stderr,
+"ERROR: '%s' has binary '%s' in 'Binary:' and 'Description:' header, but no files for it found!\n",
+ changesfilename, b->name);
+ } else if (b->description != NULL) {
+ fprintf(stderr,
+"ERROR: '%s' has unexpected description of '%s'\n",
+ changesfilename, b->name);
+ } else {
+ assert (!b->missedinheader);
+ fprintf(stderr,
+"ERROR: '%s' has unexpected Binary: '%s'\n",
+ changesfilename, b->name);
+ }
+ }
+ if (b->files == NULL)
+ continue;
+ /* files are there, make sure they are listed and
+ * have a description*/
+
+ if (b->description == NULL) {
+ fprintf(stderr,
+"ERROR: '%s' has no description for '%s'\n",
+ changesfilename, b->name);
+ }
+ if (b->missedinheader) {
+ fprintf(stderr,
+"ERROR: '%s' does not list '%s' in its Binary header!\n",
+ changesfilename, b->name);
+ }
+ // TODO: check if the files have the names they should
+ // have an architectures as they are listed...
+ }
+ for (file = changes->files; file != NULL ; file = file->next) {
+ const struct binary *b;
+ const struct binaryfile *deb;
+
+ if (file->type != ft_DEB && file->type != ft_DDEB && file->type != ft_UDEB)
+ continue;
+ if (file->fullfilename == NULL) {
+ fprintf(stderr,
+"ERROR: Could not find '%s'!\n", file->basename);
+ continue;
+ }
+ if (file->deb == NULL) {
+ fprintf(stderr,
+"WARNING: Could not read '%s', thus it cannot be checked!\n", file->fullfilename);
+ continue;
+ }
+ deb = file->deb;
+ b = deb->binary;
+
+ if (deb->shortdescription == NULL)
+ fprintf(stderr,
+"Warning: '%s' contains no description!\n",
+ file->fullfilename);
+ else if (b->description != NULL &&
+ strcmp(b->description, deb->shortdescription) != 0)
+ fprintf(stderr,
+"Warning: '%s' says '%s' has description '%s' while '%s' has '%s'!\n",
+ changesfilename, b->name,
+ b->description,
+ file->fullfilename,
+ deb->shortdescription);
+ if (deb->name == NULL)
+ fprintf(stderr,
+"ERROR: '%s' does not contain a 'Package:' header!\n", file->fullfilename);
+ if (deb->sourcename != NULL) {
+ if (strcmp(changes->name, deb->sourcename) != 0)
+ fprintf(stderr,
+"ERROR: '%s' lists Source '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ deb->sourcename, changes->name);
+ } else if (deb->name != NULL &&
+ strcmp(changes->name, deb->name) != 0) {
+ fprintf(stderr,
+"ERROR: '%s' lists Source '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ deb->name, changes->name);
+ }
+ if (deb->version == NULL)
+ fprintf(stderr,
+"ERROR: '%s' does not contain a 'Version:' header!\n", file->fullfilename);
+ if (deb->sourceversion != NULL) {
+ if (strcmp(changes->version, deb->sourceversion) != 0)
+ fprintf(stderr,
+"ERROR: '%s' lists Source version '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ deb->sourceversion, changes->version);
+ } else if (deb->version != NULL &&
+ strcmp(changes->version, deb->version) != 0) {
+ fprintf(stderr,
+"ERROR: '%s' lists Source version '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ deb->version, changes->name);
+ }
+
+ if (deb->maintainer == NULL)
+ fprintf(stderr,
+"ERROR: No maintainer specified in '%s'!\n", file->fullfilename);
+ else if (changes->maintainer != NULL &&
+ strcmp(changes->maintainer,
+ deb->maintainer) != 0)
+ fprintf(stderr,
+"Warning: '%s' lists Maintainer '%s' while .changes lists '%s'!\n",
+ file->fullfilename,
+ deb->maintainer, changes->maintainer);
+ if (deb->section == NULL)
+ fprintf(stderr,
+"ERROR: No section specified in '%s'!\n", file->fullfilename);
+ else if (file->section != NULL &&
+ strcmp(file->section, deb->section) != 0)
+ fprintf(stderr,
+"Warning: '%s' has Section '%s' while .changes says it is '%s'!\n",
+ file->fullfilename,
+ deb->section, file->section);
+ if (deb->priority == NULL)
+ fprintf(stderr,
+"ERROR: No priority specified in '%s'!\n", file->fullfilename);
+ else if (file->priority != NULL &&
+ strcmp(file->priority, deb->priority) != 0)
+ fprintf(stderr,
+"Warning: '%s' has Priority '%s' while .changes says it is '%s'!\n",
+ file->fullfilename,
+ deb->priority, file->priority);
+ verify_binary_name(file->basename, deb->name, deb->version,
+ deb->architecture, file->type, file->compression);
+ if (deb->architecture != NULL
+ && !strlist_in(&changes->architectures,
+ deb->architecture)) {
+ fprintf(stderr,
+"ERROR: '%s' does not list Architecture: '%s' needed for '%s'!\n",
+ changesfilename, deb->architecture,
+ file->fullfilename);
+ }
+ // todo: check for md5sums file, verify it...
+ }
+
+ printf("Checking checksums...\n");
+ r = getchecksums(changes);
+ if (RET_WAS_ERROR(r))
+ return r;
+ for (file = changes->files; file != NULL ; file = file->next) {
+
+ if (file->checksumsfromchanges == NULL)
+ /* nothing to check here */
+ continue;
+
+ if (file->fullfilename == NULL) {
+ fprintf(stderr,
+"WARNING: Could not check checksums of '%s' as file not found!\n",
+ file->basename);
+ if (file->type == ft_DSC) {
+ fprintf(stderr,
+"WARNING: This file most likely contains additional checksums which could also not be checked because it was not found!\n");
+ }
+ continue;
+ }
+ if (file->realchecksums == NULL) {
+ fprintf(stderr,
+"WARNING: Could not check checksums of '%s'! File vanished while checking or not readable?\n",
+ file->basename);
+ } else if (!checksums_check(file->realchecksums,
+ file->checksumsfromchanges, NULL)) {
+ fprintf(stderr,
+"ERROR: checksums of '%s' differ from those listed in .changes:\n",
+ file->fullfilename);
+ checksums_printdifferences(stderr,
+ file->checksumsfromchanges,
+ file->realchecksums);
+ }
+
+ if (file->type == ft_DSC) {
+ int i;
+
+ if (file->dsc == NULL) {
+ fprintf(stderr,
+"WARNING: Could not read '%s', thus the content cannot be checked\n"
+" and may be faulty and other things depending on it may be incorrect!\n", file->basename);
+ continue;
+ }
+
+ for (i = 0 ; i < file->dsc->expected.names.count ; i++) {
+ verify_sourcefile_checksums(file->dsc, i,
+ file->fullfilename);
+ }
+ }
+ // TODO: check .deb files
+ }
+ return RET_OK;
+}
+
+static bool isarg(int argc, char **argv, const char *name) {
+ while (argc > 0) {
+ if (strcmp(*argv, name) == 0)
+ return true;
+ argc--;
+ argv++;
+ }
+ return false;
+}
+
+static bool improvedchecksum_supported(const struct changes *c, bool improvedfilehashes[cs_hashCOUNT]) {
+ enum checksumtype cs;
+ struct fileentry *file;
+
+ for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) {
+ if (!improvedfilehashes[cs])
+ continue;
+ for (file = c->files; file != NULL ; file = file->next) {
+ const char *dummy1, *dummy3;
+ size_t dummy2, dummy4;
+
+ if (file->checksumsfromchanges == NULL)
+ continue;
+
+ if (!checksums_gethashpart(file->checksumsfromchanges,
+ cs,
+ &dummy1, &dummy2,
+ &dummy3, &dummy4))
+ break;
+ }
+ if (file == NULL)
+ return true;
+ }
+ return false;
+}
+
+static bool anyset(bool *list, size_t count) {
+ while (count > 0)
+ if (list[--count])
+ return true;
+ return false;
+}
+
+static retvalue updatechecksums(const char *changesfilename, struct changes *c, int argc, char **argv) {
+ retvalue r;
+ struct fileentry *file;
+ bool improvedfilehashes[cs_hashCOUNT];
+
+ r = getchecksums(c);
+ if (RET_WAS_ERROR(r))
+ return r;
+ /* first update all .dsc files and perhaps recalculate their checksums*/
+ for (file = c->files; file != NULL ; file = file->next) {
+ int i;
+ bool improvedhash[cs_hashCOUNT];
+
+ if (file->type != ft_DSC)
+ continue;
+
+ if (file->dsc == NULL) {
+ fprintf(stderr,
+"WARNING: Could not read '%s', hoping the content and its checksums are correct!\n",
+ file->basename);
+ continue;
+ }
+ memset(improvedhash, 0, sizeof(improvedhash));
+
+ assert (file->fullfilename != NULL);
+ for (i = 0 ; i < file->dsc->expected.names.count ; i++) {
+ const char *basefilename = file->dsc->expected.names.values[i];
+ const struct fileentry *sfile = file->dsc->uplink[i];
+ struct checksums **expected_p = &file->dsc->expected.checksums[i];
+ const struct checksums * const expected = *expected_p;
+ const char *hashes1, *hashes2;
+ size_t dummy;
+ bool doit;
+ bool improves;
+
+ assert (expected != NULL);
+ assert (basefilename != NULL);
+
+ doit = isarg(argc, argv, basefilename);
+ if (argc > 0 && !doit)
+ continue;
+
+ assert (sfile != NULL);
+ if (sfile->checksumsfromchanges == NULL) {
+ if (!doit) {
+ fprintf(stderr,
+"Not checking/updating '%s' as not in .changes and not specified on command line.\n",
+ basefilename);
+ continue;
+ }
+ if (sfile->realchecksums == NULL) {
+ fprintf(stderr,
+"WARNING: Could not check checksums of '%s'!\n", basefilename);
+ continue;
+ }
+ } else {
+ if (sfile->realchecksums == NULL) {
+ fprintf(stderr,
+"WARNING: Could not check checksums of '%s'!\n",
+ basefilename);
+ continue;
+ }
+ }
+
+ if (checksums_check(expected, sfile->realchecksums,
+ &improves)) {
+ if (!improves) {
+ /* already correct */
+ continue;
+ }
+ /* future versions might be able to store them
+ * in the dsc */
+ r = checksums_combine(expected_p,
+ sfile->realchecksums,
+ improvedhash);
+ if (RET_WAS_ERROR(r))
+ return r;
+ continue;
+ }
+ r = checksums_getcombined(expected, &hashes1, &dummy);
+ if (!RET_IS_OK(r))
+ hashes1 = "<unknown>";
+ r = checksums_getcombined(sfile->realchecksums,
+ &hashes2, &dummy);
+ if (!RET_IS_OK(r))
+ hashes2 = "<unknown>";
+ fprintf(stderr,
+"Going to update '%s' in '%s'\nfrom '%s'\nto '%s'.\n",
+ basefilename, file->fullfilename,
+ hashes1, hashes2);
+ checksums_free(*expected_p);
+ *expected_p = checksums_dup(sfile->realchecksums);
+ if (FAILEDTOALLOC(*expected_p))
+ return RET_ERROR_OOM;
+ file->dsc->modified = true;
+ }
+ checksumsarray_resetunsupported(&file->dsc->expected,
+ improvedhash);
+ if (file->dsc->modified | anyset(improvedhash, cs_hashCOUNT)) {
+ r = write_dsc_file(file, DSC_WRITE_FILES);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ }
+ memset(improvedfilehashes, 0, sizeof(improvedfilehashes));
+ for (file = c->files; file != NULL ; file = file->next) {
+ bool improves;
+ const char *hashes1, *hashes2;
+ size_t dummy;
+
+ if (file->checksumsfromchanges == NULL)
+ /* nothing to check here */
+ continue;
+ if (file->realchecksums == NULL) {
+ fprintf(stderr,
+"WARNING: Could not check checksums of '%s'! Leaving it as it is.\n",
+ file->basename);
+ continue;
+ }
+ if (checksums_check(file->checksumsfromchanges,
+ file->realchecksums, &improves)) {
+ if (!improves)
+ continue;
+ /* future versions might store sha sums in .changes: */
+ r = checksums_combine(&file->checksumsfromchanges,
+ file->realchecksums, improvedfilehashes);
+ if (RET_WAS_ERROR(r))
+ return r;
+ continue;
+ }
+ r = checksums_getcombined(file->checksumsfromchanges,
+ &hashes1, &dummy);
+ if (!RET_IS_OK(r))
+ hashes1 = "<unknown>";
+ r = checksums_getcombined(file->realchecksums,
+ &hashes2, &dummy);
+ if (!RET_IS_OK(r))
+ hashes2 = "<unknown>";
+ fprintf(stderr,
+"Going to update '%s' in '%s'\nfrom '%s'\nto '%s'.\n",
+ file->basename, changesfilename,
+ hashes1, hashes2);
+ checksums_free(file->checksumsfromchanges);
+ file->checksumsfromchanges = checksums_dup(file->realchecksums);
+ if (FAILEDTOALLOC(file->checksumsfromchanges))
+ return RET_ERROR_OOM;
+ c->modified = true;
+ }
+ if (c->modified) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_FILES, false);
+ } else if (improvedchecksum_supported(c, improvedfilehashes)) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_FILES, false);
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue includeallsources(const char *changesfilename, struct changes *c, int argc, char **argv) {
+ struct fileentry *file;
+
+ for (file = c->files; file != NULL ; file = file->next) {
+ int i;
+
+ if (file->type != ft_DSC)
+ continue;
+
+ if (file->dsc == NULL) {
+ fprintf(stderr,
+"WARNING: Could not read '%s', thus cannot determine if it depends on unlisted files!\n",
+ file->basename);
+ continue;
+ }
+ assert (file->fullfilename != NULL);
+ for (i = 0 ; i < file->dsc->expected.names.count ; i++) {
+ const char *basefilename = file->dsc->expected.names.values[i];
+ struct fileentry * const sfile = file->dsc->uplink[i];
+ struct checksums **expected_p = &file->dsc->expected.checksums[i];
+ const struct checksums * const expected = *expected_p;
+
+ assert (expected != NULL);
+ assert (basefilename != NULL);
+ assert (sfile != NULL);
+
+ if (sfile->checksumsfromchanges != NULL)
+ continue;
+
+ if (argc > 0 && !isarg(argc, argv, basefilename))
+ continue;
+
+ sfile->checksumsfromchanges = checksums_dup(expected);
+ if (FAILEDTOALLOC(sfile->checksumsfromchanges))
+ return RET_ERROR_OOM;
+ /* copy section and priority information from the dsc */
+ if (sfile->section == NULL && file->section != NULL) {
+ sfile->section = strdup(file->section);
+ if (FAILEDTOALLOC(sfile->section))
+ return RET_ERROR_OOM;
+ }
+ if (sfile->priority == NULL && file->priority != NULL) {
+ sfile->priority = strdup(file->priority);
+ if (FAILEDTOALLOC(sfile->priority))
+ return RET_ERROR_OOM;
+ }
+
+ fprintf(stderr, "Going to add '%s' to '%s'.\n",
+ basefilename, changesfilename);
+ c->modified = true;
+ }
+ }
+ if (c->modified) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_FILES, false);
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue adddsc(struct changes *c, const char *dscfilename, const struct strlist *searchpath) {
+ retvalue r;
+ struct fileentry *f;
+ struct dscfile *dsc;
+ char *fullfilename, *basefilename;
+ char *origdirectory;
+ const char *v;
+ int i;
+
+ r = findfile(dscfilename, c, searchpath, ".", &fullfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Cannot find '%s'!\n", dscfilename);
+ return RET_ERROR_MISSING;
+ }
+ r = read_dscfile(fullfilename, &dsc);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Error reading '%s'!\n", fullfilename);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ return r;
+ }
+ if (dsc->name == NULL || dsc->version == NULL) {
+ if (dsc->name == NULL)
+ fprintf(stderr, "Could not extract name of '%s'!\n",
+ fullfilename);
+ else
+ fprintf(stderr, "Could not extract version of '%s'!\n",
+ fullfilename);
+ dscfile_free(dsc);
+ free(fullfilename);
+ return RET_ERROR;
+ }
+ if (c->name != NULL) {
+ if (strcmp(c->name, dsc->name) != 0) {
+ fprintf(stderr,
+"ERROR: '%s' lists source '%s' while '%s' already is '%s'!\n",
+ fullfilename, dsc->name,
+ c->filename, c->name);
+ dscfile_free(dsc);
+ free(fullfilename);
+ return RET_ERROR;
+ }
+ } else {
+ c->name = strdup(dsc->name);
+ if (FAILEDTOALLOC(c->name)) {
+ dscfile_free(dsc);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+ }
+ if (c->version != NULL) {
+ if (strcmp(c->version, dsc->version) != 0)
+ fprintf(stderr,
+"WARNING: '%s' lists version '%s' while '%s' already lists '%s'!\n",
+ fullfilename, dsc->version,
+ c->filename, c->version);
+ } else {
+ c->version = strdup(dsc->version);
+ if (FAILEDTOALLOC(c->version)) {
+ dscfile_free(dsc);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+ }
+ // TODO: make sure if the .changes name/version are modified they will
+ // also be written...
+ v = strchr(dsc->version, ':');
+ if (v != NULL)
+ v++;
+ else
+ v = dsc->version;
+ basefilename = mprintf("%s_%s.dsc", dsc->name, v);
+ if (FAILEDTOALLOC(basefilename)) {
+ dscfile_free(dsc);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+
+ r = dirs_getdirectory(fullfilename, &origdirectory);
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(dsc);
+ free(origdirectory);
+ free(fullfilename);
+ return r;
+ }
+
+ // TODO: add rename/copy option to be activated when old and new
+ // basefilename differ
+
+ r = add_file(c, basefilename, fullfilename, ft_DSC, &f);
+ if (RET_WAS_ERROR(r)) {
+ dscfile_free(dsc);
+ free(origdirectory);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"ERROR: '%s' already contains a file of the same name!\n",
+ c->filename);
+ dscfile_free(dsc);
+ free(origdirectory);
+ // TODO: check instead if it is already the same...
+ return RET_ERROR;
+ }
+ /* f owns dsc, fullfilename and basefilename now */
+ f->dsc = dsc;
+
+ /* now include the files needed by this */
+ for (i = 0 ; i < dsc->expected.names.count ; i++) {
+ struct fileentry *file;
+ const char *b = dsc->expected.names.values[i];
+ const struct checksums *checksums = dsc->expected.checksums[i];
+
+ file = add_fileentry(c, b, strlen(b), true, NULL);
+ if (FAILEDTOALLOC(file)) {
+ free(origdirectory);
+ return RET_ERROR_OOM;
+ }
+ dsc->uplink[i] = file;
+ /* make them appear in the .changes file if not there: */
+ // TODO: add missing checksums here from file
+ if (file->checksumsfromchanges == NULL) {
+ file->checksumsfromchanges = checksums_dup(checksums);
+ if (FAILEDTOALLOC(file->checksumsfromchanges)) {
+ free(origdirectory);
+ return RET_ERROR_OOM;
+ }
+ } // TODO: otherwise warn if not the same
+ }
+
+ c->modified = true;
+ r = checksums_read(f->fullfilename, &f->realchecksums);
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ return r;
+ }
+ f->checksumsfromchanges = checksums_dup(f->realchecksums);
+ if (FAILEDTOALLOC(f->checksumsfromchanges)) {
+ free(origdirectory);
+ return RET_ERROR_OOM;
+ }
+ /* for a "extended" dsc with section or priority */
+ if (dsc->section != NULL) {
+ free(f->section);
+ f->section = strdup(dsc->section);
+ if (FAILEDTOALLOC(f->section)) {
+ free(origdirectory);
+ return RET_ERROR_OOM;
+ }
+ }
+ if (dsc->priority != NULL) {
+ free(f->priority);
+ f->priority = strdup(dsc->priority);
+ if (FAILEDTOALLOC(f->priority)) {
+ free(origdirectory);
+ return RET_ERROR_OOM;
+ }
+ }
+ if (f->section == NULL || f->priority == NULL) {
+ struct sourceextraction *extraction;
+ int j;
+
+ extraction = sourceextraction_init(
+ (f->section == NULL)?&f->section:NULL,
+ (f->priority == NULL)?&f->priority:NULL);
+ if (FAILEDTOALLOC(extraction)) {
+ free(origdirectory);
+ return RET_ERROR_OOM;
+ }
+ for (j = 0 ; j < dsc->expected.names.count ; j++) {
+ sourceextraction_setpart(extraction, j,
+ dsc->expected.names.values[j]);
+ }
+ while (sourceextraction_needs(extraction, &j)) {
+ if (dsc->uplink[j]->fullfilename == NULL) {
+ /* look for file */
+ r = findfile(dsc->expected.names.values[j], c,
+ searchpath, origdirectory,
+ &dsc->uplink[j]->fullfilename);
+ if (RET_WAS_ERROR(r)) {
+ sourceextraction_abort(extraction);
+ free(origdirectory);
+ return r;
+ }
+ if (r == RET_NOTHING ||
+ dsc->uplink[j]->fullfilename == NULL)
+ break;
+ }
+ r = sourceextraction_analyse(extraction,
+ dsc->uplink[j]->fullfilename);
+ if (RET_WAS_ERROR(r)) {
+ sourceextraction_abort(extraction);
+ free(origdirectory);
+ return r;
+ }
+ }
+ r = sourceextraction_finish(extraction);
+ if (RET_WAS_ERROR(r)) {
+ free(origdirectory);
+ return r;
+ }
+ }
+ free(origdirectory);
+ /* update information in the main .changes file if not there already */
+ if (c->maintainer == NULL && dsc->maintainer != NULL) {
+ c->maintainer = strdup(dsc->maintainer);
+ if (FAILEDTOALLOC(c->maintainer))
+ return RET_ERROR_OOM;
+ }
+ if (!strlist_in(&c->architectures, "source")) {
+ r = strlist_add_dup(&c->architectures, "source");
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static retvalue adddscs(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) {
+ if (argc <= 0) {
+ fprintf(stderr,
+"Filenames of .dsc files to include expected!\n");
+ return RET_ERROR;
+ }
+ while (argc > 0) {
+ retvalue r = adddsc(c, argv[0], searchpath);
+ if (RET_WAS_ERROR(r))
+ return r;
+ argc--; argv++;
+ }
+ if (c->modified) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_ALL, fakefields);
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue adddeb(struct changes *c, const char *debfilename, const struct strlist *searchpath) {
+ retvalue r;
+ struct fileentry *f;
+ struct binaryfile *deb;
+ const char *packagetype;
+ enum filetype type;
+ char *fullfilename, *basefilename;
+ const char *v;
+
+ r = findfile(debfilename, c, searchpath, ".", &fullfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Cannot find '%s'!\n", debfilename);
+ return RET_ERROR_MISSING;
+ }
+ r = read_binaryfile(fullfilename, &deb);
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Error reading '%s'!\n", fullfilename);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ return r;
+ }
+ // TODO: check if there are other things but the name to distinguish them
+ if (strlen(fullfilename) > 5 &&
+ strcmp(fullfilename+strlen(fullfilename)-5, ".udeb") == 0) {
+ packagetype = "udeb";
+ type = ft_UDEB;
+ } else if (strlen(fullfilename) > 5 &&
+ strcmp(fullfilename+strlen(fullfilename)-5, ".ddeb") == 0) {
+ packagetype = "ddeb";
+ type = ft_DDEB;
+ } else {
+ packagetype = "deb";
+ type = ft_DEB;
+ }
+ if (deb->name == NULL || deb->version == NULL || deb->architecture == NULL) {
+ if (deb->name == NULL)
+ fprintf(stderr,
+"Could not extract packagename of '%s'!\n",
+ fullfilename);
+ else if (deb->version == NULL)
+ fprintf(stderr,
+"Could not extract version of '%s'!\n",
+ fullfilename);
+ else
+ fprintf(stderr,
+"Could not extract architecture of '%s'!\n",
+ fullfilename);
+ binaryfile_free(deb);
+ free(fullfilename);
+ return RET_ERROR;
+ }
+ if (c->name != NULL) {
+ const char *sourcename;
+ if (deb->sourcename != NULL)
+ sourcename = deb->sourcename;
+ else
+ sourcename = deb->name;
+ if (strcmp(c->name, sourcename) != 0) {
+ fprintf(stderr,
+"ERROR: '%s' lists source '%s' while '%s' already is '%s'!\n",
+ fullfilename, sourcename,
+ c->filename, c->name);
+ binaryfile_free(deb);
+ free(fullfilename);
+ return RET_ERROR;
+ }
+ } else {
+ if (deb->sourcename != NULL)
+ c->name = strdup(deb->sourcename);
+ else
+ c->name = strdup(deb->name);
+ if (FAILEDTOALLOC(c->name)) {
+ binaryfile_free(deb);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+ }
+ if (c->version != NULL) {
+ const char *sourceversion;
+ if (deb->sourceversion != NULL)
+ sourceversion = deb->sourceversion;
+ else
+ sourceversion = deb->version;
+ if (strcmp(c->version, sourceversion) != 0)
+ fprintf(stderr,
+"WARNING: '%s' lists source version '%s' while '%s' already lists '%s'!\n",
+ fullfilename, sourceversion,
+ c->filename, c->version);
+ } else {
+ if (deb->sourceversion != NULL)
+ c->version = strdup(deb->sourceversion);
+ else
+ c->version = strdup(deb->version);
+ if (FAILEDTOALLOC(c->version)) {
+ binaryfile_free(deb);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+ }
+ // TODO: make sure if the .changes name/version are modified they will
+ // also be written...
+ v = strchr(deb->version, ':');
+ if (v != NULL)
+ v++;
+ else
+ v = deb->version;
+ basefilename = mprintf("%s_%s_%s.%s", deb->name, v, deb->architecture,
+ packagetype);
+ if (FAILEDTOALLOC(basefilename)) {
+ binaryfile_free(deb);
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+
+ // TODO: add rename/copy option to be activated when old and new
+ // basefilename differ
+
+ r = add_file(c, basefilename, fullfilename, type, &f);
+ if (RET_WAS_ERROR(r)) {
+ binaryfile_free(deb);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"ERROR: '%s' already contains a file of the same name!\n",
+ c->filename);
+ binaryfile_free(deb);
+ // TODO: check instead if it is already the same...
+ return RET_ERROR;
+ }
+ /* f owns deb, fullfilename and basefilename now */
+ f->deb = deb;
+ deb->binary = get_binary(c, deb->name, strlen(deb->name));
+ if (FAILEDTOALLOC(deb->binary))
+ return RET_ERROR_OOM;
+ deb->next = deb->binary->files;
+ deb->binary->files = deb;
+ deb->binary->missedinheader = false;
+ c->modified = true;
+ r = checksums_read(f->fullfilename, &f->realchecksums);
+ if (RET_WAS_ERROR(r))
+ return r;
+ f->checksumsfromchanges = checksums_dup(f->realchecksums);
+ if (FAILEDTOALLOC(f->checksumsfromchanges))
+ return RET_ERROR_OOM;
+ if (deb->shortdescription != NULL) {
+ if (deb->binary->description == NULL) {
+ deb->binary->description = strdup(deb->shortdescription);
+ deb->binary->missedinheader = false;
+ } else if (strcmp(deb->binary->description,
+ deb->shortdescription) != 0) {
+ fprintf(stderr,
+"WARNING: '%s' already lists a different description for '%s' than contained in '%s'!\n",
+ c->filename, deb->name, fullfilename);
+ }
+ }
+ if (deb->section != NULL) {
+ free(f->section);
+ f->section = strdup(deb->section);
+ }
+ if (deb->priority != NULL) {
+ free(f->priority);
+ f->priority = strdup(deb->priority);
+ }
+ if (c->maintainer == NULL && deb->maintainer != NULL) {
+ c->maintainer = strdup(deb->maintainer);
+ }
+ if (deb->architecture != NULL &&
+ !strlist_in(&c->architectures, deb->architecture)) {
+ strlist_add_dup(&c->architectures, deb->architecture);
+ }
+ return RET_OK;
+}
+
+static retvalue adddebs(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) {
+ if (argc <= 0) {
+ fprintf(stderr,
+"Filenames of .deb files to include expected!\n");
+ return RET_ERROR;
+ }
+ while (argc > 0) {
+ retvalue r = adddeb(c, argv[0], searchpath);
+ if (RET_WAS_ERROR(r))
+ return r;
+ argc--; argv++;
+ }
+ if (c->modified) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_ALL, fakefields);
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue addrawfile(struct changes *c, const char *filename, const struct strlist *searchpath) {
+ retvalue r;
+ struct fileentry *f;
+ char *fullfilename, *basefilename;
+ struct checksums *checksums;
+
+ r = findfile(filename, c, searchpath, ".", &fullfilename);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr, "Cannot find '%s'!\n", filename);
+ return RET_ERROR_MISSING;
+ }
+ basefilename = strdup(dirs_basename(filename));
+ if (FAILEDTOALLOC(basefilename)) {
+ free(fullfilename);
+ return RET_ERROR_OOM;
+ }
+ r = checksums_read(fullfilename, &checksums);
+ if (RET_WAS_ERROR(r)) {
+ free(fullfilename);
+ free(basefilename);
+ return r;
+ }
+ r = add_file(c, basefilename, fullfilename, ft_UNKNOWN, &f);
+ // fullfilename and basefilename now belong to *f or are already free'd
+ basefilename = NULL;
+ fullfilename = NULL;
+ if (RET_WAS_ERROR(r)) {
+ checksums_free(checksums);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+
+ assert (f != NULL);
+
+ if (f->checksumsfromchanges != NULL) {
+ /* already listed in .changes */
+
+ if (!checksums_check(f->checksumsfromchanges, checksums,
+ NULL)) {
+ fprintf(stderr,
+"ERROR: '%s' already contains a file with name '%s' but different checksums!\n",
+ c->filename, f->basename);
+ checksums_free(checksums);
+ return RET_ERROR;
+ }
+ printf(
+"'%s' already lists '%s' with same checksums. Doing nothing.\n",
+ c->filename, f->basename);
+ checksums_free(checksums);
+ return RET_NOTHING;
+ } else {
+ /* file already expected by some other part (e.g. a .dsc) */
+
+ // TODO: find out whom this files belong to and warn if different
+ }
+ }
+
+ c->modified = true;
+ assert (f->checksumsfromchanges == NULL);
+ f->checksumsfromchanges = checksums;
+ checksums = NULL;
+ if (f->realchecksums == NULL)
+ f->realchecksums = checksums_dup(f->checksumsfromchanges);
+ if (FAILEDTOALLOC(f->realchecksums))
+ return RET_ERROR_OOM;
+ return RET_OK;
+}
+
+static retvalue addrawfiles(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) {
+ if (argc <= 0) {
+ fprintf(stderr,
+"Filenames of files to add (without further parsing) expected!\n");
+ return RET_ERROR;
+ }
+ while (argc > 0) {
+ retvalue r = addrawfile(c, argv[0], searchpath);
+ if (RET_WAS_ERROR(r))
+ return r;
+ argc--; argv++;
+ }
+ if (c->modified) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_FILES, fakefields);
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue addfiles(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) {
+ if (argc <= 0) {
+ fprintf(stderr, "Filenames of files to add expected!\n");
+ return RET_ERROR;
+ }
+ while (argc > 0) {
+ retvalue r;
+ const char *filename = argv[0];
+ size_t l = strlen(filename);
+
+ if ((l > 4 && strcmp(filename+l-4, ".deb") == 0) ||
+ (l > 5 && strcmp(filename+l-5, ".ddeb") == 0) ||
+ (l > 5 && strcmp(filename+l-5, ".udeb") == 0))
+ r = adddeb(c, filename, searchpath);
+ else if ((l > 4 && strcmp(filename+l-4, ".dsc") == 0))
+ r = adddsc(c, filename, searchpath);
+ else
+ r = addrawfile(c, argv[0], searchpath);
+ if (RET_WAS_ERROR(r))
+ return r;
+ argc--; argv++;
+ }
+ if (c->modified) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_ALL, fakefields);
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue dumbremovefiles(const char *changesfilename, struct changes *c, int argc, char **argv) {
+ if (argc <= 0) {
+ fprintf(stderr,
+"Filenames of files to remove (without further parsing) expected!\n");
+ return RET_ERROR;
+ }
+ while (argc > 0) {
+ struct fileentry **fp;
+ /*@null@*/ struct fileentry *f;
+
+ fp = find_fileentry(c, argv[0], strlen(argv[0]), NULL);
+ f = *fp;
+ if (f == NULL) {
+ fprintf(stderr,
+"Not removing '%s' as not listed in '%s'!\n",
+ argv[0], c->filename);
+ } else if (f->checksumsfromchanges != NULL) {
+ /* removing its checksums makes it vanish from the
+ * .changes file generated, while still keeping pointers
+ * from other files intact */
+ checksums_free(f->checksumsfromchanges);
+ f->checksumsfromchanges = NULL;
+ c->modified = true;
+ }
+ argc--; argv++;
+ }
+ if (c->modified) {
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_FILES, false);
+ } else
+ return RET_NOTHING;
+}
+
+static retvalue setdistribution(const char *changesfilename, struct changes *c, int argc, char **argv) {
+ retvalue r;
+ struct strlist distributions;
+ int i;
+
+ if (argc <= 0) {
+ fprintf(stderr, "expected Distribution name to set!\n");
+ return RET_ERROR;
+ }
+ r = strlist_init_n(argc, &distributions);
+ if (RET_WAS_ERROR(r))
+ return r;
+ for (i = 0 ; i < argc ; i++) {
+ r = strlist_add_dup(&distributions, argv[i]);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&distributions);
+ return r;
+ }
+ }
+ strlist_done(&c->distributions);
+ strlist_move(&c->distributions, &distributions);
+ return write_changes_file(changesfilename, c,
+ CHANGES_WRITE_DISTRIBUTIONS, false);
+}
+
+static int execute_command(int argc, char **argv, const char *changesfilename, const struct strlist *searchpath, bool file_exists, bool create_file, bool fakefields, struct changes *changesdata) {
+ const char *command = argv[0];
+ retvalue r;
+
+ assert (argc > 0);
+
+ if (strcasecmp(command, "verify") == 0) {
+ if (argc > 1) {
+ fprintf(stderr, "Too many arguments!\n");
+ r = RET_ERROR;
+ } else if (file_exists)
+ r = verify(changesfilename, changesdata);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "updatechecksums") == 0) {
+ if (file_exists)
+ r = updatechecksums(changesfilename, changesdata,
+ argc-1, argv+1);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "includeallsources") == 0) {
+ if (file_exists)
+ r = includeallsources(changesfilename, changesdata,
+ argc-1, argv+1);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "addrawfile") == 0) {
+ if (file_exists || create_file)
+ r = addrawfiles(changesfilename, changesdata,
+ argc-1, argv+1, searchpath, fakefields);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "adddsc") == 0) {
+ if (file_exists || create_file)
+ r = adddscs(changesfilename, changesdata,
+ argc-1, argv+1, searchpath, fakefields);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "adddeb") == 0) {
+ if (file_exists || create_file)
+ r = adddebs(changesfilename, changesdata,
+ argc-1, argv+1, searchpath, fakefields);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "add") == 0) {
+ if (file_exists || create_file)
+ r = addfiles(changesfilename, changesdata,
+ argc-1, argv+1, searchpath, fakefields);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "setdistribution") == 0) {
+ if (file_exists)
+ r = setdistribution(changesfilename, changesdata,
+ argc-1, argv+1);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else if (strcasecmp(command, "dumbremove") == 0) {
+ if (file_exists)
+ r = dumbremovefiles(changesfilename, changesdata,
+ argc-1, argv+1);
+ else {
+ fprintf(stderr, "No such file '%s'!\n",
+ changesfilename);
+ r = RET_ERROR;
+ }
+ } else {
+ fprintf(stderr, "Unknown command '%s'\n", command);
+ r = RET_ERROR;
+ }
+ return r;
+}
+
+static retvalue splitpath(struct strlist *list, const char *path) {
+ retvalue r;
+ const char *next;
+
+ while ((next = index(path, ':')) != NULL) {
+ if (next > path) {
+ char *dir = strndup(path, next-path);
+ if (FAILEDTOALLOC(dir)) {
+ return RET_ERROR_OOM;
+ }
+ r = strlist_add(list, dir);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ path = next+1;
+ }
+ return strlist_add_dup(list, path);
+}
+
+int main(int argc, char *argv[]) {
+ static int longoption = 0;
+ static const struct option longopts[] = {
+ {"help", no_argument, NULL, 'h'},
+ {"create", no_argument, NULL, 'C'},
+ {"create-with-all-fields", no_argument, &longoption, 6},
+ {"searchpath", required_argument, NULL, 's'},
+ {"gunzip", required_argument, &longoption, 1},
+ {"bunzip2", required_argument, &longoption, 2},
+ {"unlzma", required_argument, &longoption, 3},
+ {"unxz", required_argument, &longoption, 4},
+ {"lunzip", required_argument, &longoption, 5},
+ {"unzstd", required_argument, &longoption, 7},
+ {NULL, 0, NULL, 0},
+ };
+ int c;
+ const char *changesfilename;
+ bool file_exists;
+ bool create_file = false;
+ bool all_fields = false;
+ struct strlist searchpath;
+ struct changes *changesdata;
+ char *gunzip = NULL, *bunzip2 = NULL, *unlzma = NULL,
+ *unxz = NULL, *lunzip = NULL, *unzstd = NULL;
+ retvalue r;
+
+ strlist_init(&searchpath);
+
+ while ((c = getopt_long(argc, argv, "+hi:s:", longopts, NULL)) != -1) {
+ switch (c) {
+ case '\0':
+ switch (longoption) {
+ case 1:
+ gunzip = strdup(optarg);
+ break;
+ case 2:
+ bunzip2 = strdup(optarg);
+ break;
+ case 3:
+ unlzma = strdup(optarg);
+ break;
+ case 4:
+ unxz = strdup(optarg);
+ break;
+ case 5:
+ lunzip = strdup(optarg);
+ break;
+ case 7:
+ unzstd = strdup(optarg);
+ break;
+ case 6:
+ create_file = true;
+ all_fields = true;
+ break;
+ }
+ break;
+ case 'h':
+ about(true);
+ case 'C':
+ create_file = true;
+ break;
+ case 's':
+ r = splitpath(&searchpath, optarg);
+ if (RET_WAS_ERROR(r)) {
+ if (r == RET_ERROR_OOM)
+ fprintf(stderr,
+"Out of memory!\n");
+ exit(EXIT_FAILURE);
+ }
+ break;
+ }
+ }
+ if (argc - optind < 2) {
+ about(false);
+ }
+ signature_init(false);
+ uncompressions_check(gunzip, bunzip2, unlzma, unxz, lunzip, unzstd);
+
+ changesfilename = argv[optind];
+ if (strcmp(changesfilename, "-") != 0 &&
+ !endswith(changesfilename, ".changes")) {
+ fprintf(stderr, "first argument not ending with '.changes'\n");
+ exit(EXIT_FAILURE);
+ }
+ file_exists = isregularfile(changesfilename);
+ if (file_exists) {
+ char *changes;
+
+ r = signature_readsignedchunk(changesfilename, changesfilename,
+ &changes, NULL, NULL);
+ if (!RET_IS_OK(r)) {
+ signatures_done();
+ if (r == RET_ERROR_OOM)
+ fprintf(stderr, "Out of memory!\n");
+ exit(EXIT_FAILURE);
+ }
+ r = parse_changes(changesfilename, changes,
+ &changesdata, &searchpath);
+ if (RET_IS_OK(r))
+ changesdata->control = changes;
+ else {
+ free(changes);
+ changesdata = NULL;
+ }
+ } else {
+ changesdata = zNEW(struct changes);
+ if (FAILEDTOALLOC(changesdata))
+ r = RET_ERROR_OOM;
+ else {
+ changesdata->filename = strdup(changesfilename);
+ if (FAILEDTOALLOC(changesdata->filename))
+ r = RET_ERROR_OOM;
+ else
+ r = dirs_getdirectory(changesfilename,
+ &changesdata->basedir);
+ }
+ }
+
+ if (!RET_WAS_ERROR(r)) {
+ argc -= (optind+1);
+ argv += (optind+1);
+ r = execute_command(argc, argv, changesfilename, &searchpath,
+ file_exists, create_file, all_fields,
+ changesdata);
+ }
+ changes_free(changesdata);
+
+ signatures_done();
+ if (RET_IS_OK(r))
+ exit(EXIT_SUCCESS);
+ if (r == RET_ERROR_OOM)
+ fprintf(stderr, "Out of memory!\n");
+ exit(EXIT_FAILURE);
+}
diff --git a/tracking.c b/tracking.c
new file mode 100644
index 0000000..6fe5554
--- /dev/null
+++ b/tracking.c
@@ -0,0 +1,1468 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2005,2006,2007,2008,2009,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+#include <config.h>
+
+#include <assert.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <ctype.h>
+
+#include "error.h"
+#include "names.h"
+#include "dirs.h"
+#include "names.h"
+#include "reference.h"
+#include "ignore.h"
+#include "configparser.h"
+#include "package.h"
+
+#include "database_p.h"
+#include "tracking.h"
+
+#ifndef NOPARANOIA
+#define PARANOIA
+#endif
+
+struct s_tracking {
+ char *codename;
+ struct table *table;
+ enum trackingtype type;
+ struct trackingoptions options;
+};
+
+retvalue tracking_done(trackingdb db, struct distribution *distribution) {
+ retvalue r;
+
+ if (db == NULL)
+ return RET_OK;
+
+ r = table_close(db->table);
+ free(db->codename);
+ free(db);
+ if (distribution->trackingdb == NULL) {
+ fprintf(stderr,
+"Internal Error: Tracking database was closed, but corresponding entry in the distribution structure %s is missing.\n",
+ distribution->codename);
+ } else {
+ distribution->trackingdb = NULL;
+ }
+ return r;
+}
+
+retvalue tracking_initialize(/*@out@*/trackingdb *db, struct distribution *distribution, bool readonly) {
+ struct s_tracking *t;
+ retvalue r;
+
+ t = zNEW(struct s_tracking);
+ if (FAILEDTOALLOC(t))
+ return RET_ERROR_OOM;
+ t->codename = strdup(distribution->codename);
+ if (FAILEDTOALLOC(t->codename)) {
+ free(t);
+ return RET_ERROR_OOM;
+ }
+ assert (distribution->tracking != dt_NONE || readonly);
+ t->type = distribution->tracking;
+ t->options = distribution->trackingoptions;
+ r = database_opentracking(t->codename, readonly, &t->table);
+ if (!RET_IS_OK(r)) {
+ free(t->codename);
+ free(t);
+ return r;
+ }
+ *db = t;
+ distribution->trackingdb = t;
+ return RET_OK;
+}
+
+static inline enum filetype filetypechar(enum filetype filetype) {
+ switch (filetype) {
+ case ft_LOG:
+ case ft_BUILDINFO:
+ case ft_CHANGES:
+ case ft_ALL_BINARY:
+ case ft_ARCH_BINARY:
+ case ft_SOURCE:
+ case ft_XTRA_DATA:
+ return filetype;
+ }
+ assert(false);
+ return ft_XTRA_DATA;
+}
+
+retvalue trackedpackage_addfilekey(trackingdb tracks, struct trackedpackage *pkg, enum filetype filetype, char *filekey, bool used) {
+ char *id;
+ enum filetype ft = filetypechar(filetype);
+ int i, *newrefcounts;
+ enum filetype *newfiletypes;
+ retvalue r;
+
+ if (FAILEDTOALLOC(filekey))
+ return RET_ERROR_OOM;
+
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ if (strcmp(pkg->filekeys.values[i], filekey) == 0) {
+ if (pkg->filetypes[i] != ft) {
+ /* if old file has refcount 0, just repair: */
+ if (pkg->refcounts[i] <= 0) {
+ free(filekey);
+ pkg->filetypes[i] = ft;
+ if (used)
+ pkg->refcounts[i] = 1;
+ return RET_OK;
+ }
+ fprintf(stderr,
+"Filekey '%s' already registered for '%s_%s' as type '%c' is tried to be reregistered as type '%c'!\n",
+ filekey, pkg->sourcename,
+ pkg->sourceversion,
+ pkg->filetypes[i], ft);
+ free(filekey);
+ return RET_ERROR;
+ }
+ free(filekey);
+ if (used)
+ pkg->refcounts[i]++;
+ return RET_OK;
+ }
+ }
+
+ newrefcounts = realloc(pkg->refcounts,
+ (pkg->filekeys.count + 1) * sizeof(int));
+ if (FAILEDTOALLOC(newrefcounts)) {
+ free(filekey);
+ return RET_ERROR_OOM;
+ }
+ if (used)
+ newrefcounts[pkg->filekeys.count]=1;
+ else
+ newrefcounts[pkg->filekeys.count]=0;
+ pkg->refcounts = newrefcounts;
+ newfiletypes = realloc(pkg->filetypes,
+ (pkg->filekeys.count + 1) * sizeof(enum filetype));
+ if (FAILEDTOALLOC(newfiletypes)) {
+ free(filekey);
+ return RET_ERROR_OOM;
+ }
+ newfiletypes[pkg->filekeys.count] = filetype;
+ pkg->filetypes = newfiletypes;
+
+ r = strlist_add(&pkg->filekeys, filekey);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ id = calc_trackreferee(tracks->codename,
+ pkg->sourcename, pkg->sourceversion);
+ if (FAILEDTOALLOC(id))
+ return RET_ERROR_OOM;
+ r = references_increment(filekey, id);
+ free(id);
+ return r;
+}
+
+retvalue trackedpackage_adddupfilekeys(trackingdb tracks, struct trackedpackage *pkg, enum filetype filetype, const struct strlist *filekeys, bool used) {
+ int i;
+ retvalue result, r;
+ assert (filekeys != NULL);
+
+ result = RET_OK;
+ for (i = 0 ; i < filekeys->count ; i++) {
+ char *filekey = strdup(filekeys->values[i]);
+ r = trackedpackage_addfilekey(tracks, pkg, filetype,
+ filekey, used);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+static inline retvalue trackedpackage_removefilekey(trackingdb tracks, struct trackedpackage *pkg, const char *filekey) {
+ int i;
+
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ if (strcmp(pkg->filekeys.values[i], filekey) == 0) {
+ if (pkg->refcounts[i] > 0) {
+ pkg->refcounts[i]--;
+ } else
+ fprintf(stderr,
+"Warning: tracking database of %s has inconsistent refcounts of %s_%s.\n",
+ tracks->codename,
+ pkg->sourcename,
+ pkg->sourceversion);
+
+ return RET_OK;
+ }
+ }
+ fprintf(stderr,
+"Warning: tracking database of %s missed files for %s_%s.\n",
+ tracks->codename, pkg->sourcename, pkg->sourceversion);
+ return RET_OK;
+
+}
+
+retvalue trackedpackage_removefilekeys(trackingdb tracks, struct trackedpackage *pkg, const struct strlist *filekeys) {
+ int i;
+ retvalue result, r;
+ assert (filekeys != NULL);
+
+ result = RET_OK;
+ for (i = 0 ; i < filekeys->count ; i++) {
+ const char *filekey = filekeys->values[i];
+ r = trackedpackage_removefilekey(tracks, pkg, filekey);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+void trackedpackage_free(struct trackedpackage *pkg) {
+ if (pkg != NULL) {
+ free(pkg->sourcename);
+ free(pkg->sourceversion);
+ strlist_done(&pkg->filekeys);
+ free(pkg->refcounts);
+ free(pkg->filetypes);
+ free(pkg);
+ }
+}
+
+static inline int parsenumber(const char **d, size_t *s) {
+ int count;
+
+ count = 0;
+ do {
+ if (**d < '0' || **d > '7')
+ return -1;
+ count = (count*8) + (**d-'0');
+ (*d)++;
+ (*s)--;
+ if (*s == 0)
+ return -1;
+ } while (**d != '\0');
+ (*d)++;
+ (*s)--;
+ return count;
+}
+
+static retvalue tracking_new(const char *sourcename, const char *version, /*@out@*/struct trackedpackage **pkg) {
+ struct trackedpackage *p;
+ assert (pkg != NULL && sourcename != NULL && version != NULL);
+
+ p = zNEW(struct trackedpackage);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ p->sourcename = strdup(sourcename);
+ p->sourceversion = strdup(version);
+ p->flags.isnew = true;
+ if (FAILEDTOALLOC(p->sourcename) || FAILEDTOALLOC(p->sourceversion)) {
+ trackedpackage_free(p);
+ return RET_ERROR_OOM;
+ }
+ *pkg = p;
+ return RET_OK;
+}
+
+static inline retvalue parse_data(const char *name, const char *version, const char *data, size_t datalen, /*@out@*/struct trackedpackage **pkg) {
+ struct trackedpackage *p;
+ int i;
+
+ p = zNEW(struct trackedpackage);
+ if (FAILEDTOALLOC(p))
+ return RET_ERROR_OOM;
+ p->sourcename = strdup(name);
+ p->sourceversion = strdup(version);
+ if (FAILEDTOALLOC(p->sourcename)
+ || FAILEDTOALLOC(p->sourceversion)
+ /* || FAILEDTOALLOC(p->sourcedir) */) {
+ trackedpackage_free(p);
+ return RET_ERROR_OOM;
+ }
+ while (datalen > 0 && *data != '\0') {
+ char *filekey;
+ const char *separator;
+ size_t filekeylen;
+ retvalue r;
+
+ if (((p->filekeys.count)&31) == 0) {
+ enum filetype *n = realloc(p->filetypes,
+ (p->filekeys.count+32)*sizeof(enum filetype));
+ if (FAILEDTOALLOC(n)) {
+ trackedpackage_free(p);
+ return RET_ERROR_OOM;
+ }
+ p->filetypes = n;
+ }
+ p->filetypes[p->filekeys.count] = *data;
+ data++; datalen--;
+ separator = memchr(data, '\0', datalen);
+ if (separator == NULL) {
+ fprintf(stderr,
+"Internal Error: Corrupt tracking data for %s %s\n",
+ name, version);
+ trackedpackage_free(p);
+ return RET_ERROR;
+ }
+ filekeylen = separator - data;
+ filekey = strndup(data, filekeylen);
+ if (FAILEDTOALLOC(filekey)) {
+ trackedpackage_free(p);
+ return RET_ERROR_OOM;
+ }
+ r = strlist_add(&p->filekeys, filekey);
+ if (RET_WAS_ERROR(r)) {
+ trackedpackage_free(p);
+ return r;
+ }
+ data += filekeylen + 1;
+ datalen -= filekeylen + 1;
+ }
+ data++; datalen--;
+ p->refcounts = nzNEW(p->filekeys.count, int);
+ if (FAILEDTOALLOC(p->refcounts)) {
+ trackedpackage_free(p);
+ return RET_ERROR_OOM;
+ }
+ for (i = 0 ; i < p->filekeys.count ; i++) {
+ if ((p->refcounts[i] = parsenumber(&data, &datalen)) < 0) {
+ fprintf(stderr,
+"Internal Error: Corrupt tracking data for %s %s\n",
+ name, version);
+ trackedpackage_free(p);
+ return RET_ERROR;
+ }
+ }
+ if (datalen > 0) {
+ fprintf(stderr,
+"Internal Error: Trailing garbage in tracking data for %s %s\n (%ld bytes)",
+ name, version, (long)datalen);
+ trackedpackage_free(p);
+ return RET_ERROR;
+ }
+ p->flags.isnew = false;
+ p->flags.deleted = false;
+ *pkg = p;
+ return RET_OK;
+}
+
+retvalue tracking_get(trackingdb t, const char *sourcename, const char *version, /*@out@*/struct trackedpackage **pkg) {
+ const char *data;
+ size_t datalen;
+ retvalue r;
+
+ assert (pkg != NULL && sourcename != NULL && version != NULL);
+
+ r = table_getpair(t->table, sourcename, version, &data, &datalen);
+ if (!RET_IS_OK(r))
+ return r;
+ return parse_data(sourcename, version, data, datalen, pkg);
+}
+
+retvalue tracking_getornew(trackingdb tracks, const char *name, const char *version, /*@out@*/struct trackedpackage **pkg) {
+ retvalue r;
+ r = tracking_get(tracks, name, version, pkg);
+ if (r == RET_NOTHING)
+ r = tracking_new(name, version, pkg);
+ return r;
+}
+
+static retvalue gen_data(struct trackedpackage *pkg, /*@out@*/char **newdata_p, /*@out@*/size_t *newdatalen_p) {
+ size_t versionsize = strlen(pkg->sourceversion)+1;
+ int i;
+ char *d, *data;
+ size_t datalen;
+
+ datalen = versionsize + 1;
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ size_t l;
+ l = strlen(pkg->filekeys.values[i]);
+ if (l > 0)
+ datalen += l+9;
+ }
+ data = malloc(datalen + 1);
+ if (FAILEDTOALLOC(data))
+ return RET_ERROR_OOM;
+ memcpy(data, pkg->sourceversion, versionsize);
+ d = data + versionsize;
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ size_t l;
+ l = strlen(pkg->filekeys.values[i]);
+ if (l > 0) {
+ *d = pkg->filetypes[i];
+ d++;
+ memcpy(d, pkg->filekeys.values[i], l + 1);
+ d+=l+1;
+ }
+ }
+ *d ='\0'; d++;
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ int j;
+#define MAXREFCOUNTOCTETS 7
+ char countstring[MAXREFCOUNTOCTETS];
+ size_t count = pkg->refcounts[i];
+
+ countstring[MAXREFCOUNTOCTETS-1] = '\0';
+ for (j = MAXREFCOUNTOCTETS-2 ; j >= 0 ; j--) {
+ countstring[j] = '0' + (count & 7);
+ count >>= 3;
+ if (count == 0)
+ break;
+ }
+#undef MAXREFCOUNTOCTETS
+ assert (count == 0);
+
+ memcpy(d, countstring+j, 7 - j);
+ d+=7-j;
+ datalen -= j;
+ }
+ *d ='\0';
+ assert ((size_t)(d-data) == datalen);
+ *newdata_p = data;
+ *newdatalen_p = datalen;
+ return RET_OK;
+}
+
+static retvalue tracking_saveatcursor(trackingdb t, struct cursor *cursor, struct trackedpackage *pkg) {
+ if (pkg->flags.deleted) {
+ /* delete if delete is requested
+ * (all unreferencing has to be done before) */
+ return cursor_delete(t->table, cursor,
+ pkg->sourcename, pkg->sourceversion);
+ } else {
+ char *newdata;
+ size_t newdatalen;
+ retvalue r;
+
+ r = gen_data(pkg, &newdata, &newdatalen);
+ if (RET_IS_OK(r)) {
+ r = cursor_replace(t->table, cursor,
+ newdata, newdatalen);
+ free(newdata);
+ }
+ return r;
+ }
+}
+
+static retvalue tracking_saveonly(trackingdb t, struct trackedpackage *pkg) {
+ retvalue r, r2;
+ char *newdata;
+ size_t newdatalen;
+
+ assert (pkg != NULL);
+
+ if (!pkg->flags.isnew) {
+ struct cursor *cursor;
+
+ r = table_newpairedcursor(t->table,
+ pkg->sourcename, pkg->sourceversion, &cursor,
+ NULL, NULL);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Internal error: tracking_save with isnew=false called but could not find %s_%s in %s!\n",
+ pkg->sourcename, pkg->sourceversion,
+ t->codename);
+ pkg->flags.isnew = true;
+ } else {
+ r = tracking_saveatcursor(t, cursor, pkg);
+ r2 = cursor_close(t->table, cursor);
+ RET_ENDUPDATE(r, r2);
+ return r;
+ }
+ }
+
+ if (pkg->flags.deleted)
+ return RET_OK;
+
+ r = gen_data(pkg, &newdata, &newdatalen);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r))
+ return r;
+
+ r = table_addrecord(t->table, pkg->sourcename, newdata, newdatalen, false);
+ free(newdata);
+ if (verbose > 18)
+ fprintf(stderr, "Adding tracked package '%s'_'%s' to '%s'\n",
+ pkg->sourcename, pkg->sourceversion,
+ t->codename);
+ return r;
+}
+
+retvalue tracking_save(trackingdb t, struct trackedpackage *pkg) {
+ retvalue r = tracking_saveonly(t, pkg);
+ trackedpackage_free(pkg);
+ return r;
+}
+
+retvalue tracking_listdistributions(struct strlist *distributions) {
+ return database_listsubtables("tracking.db", distributions);
+}
+
+retvalue tracking_drop(const char *codename) {
+ retvalue result, r;
+
+ result = database_dropsubtable("tracking.db", codename);
+ r = references_remove(codename);
+ RET_UPDATE(result, r);
+
+ return result;
+}
+
+static retvalue tracking_recreatereferences(trackingdb t) {
+ struct cursor *cursor;
+ retvalue result, r;
+ struct trackedpackage *pkg;
+ char *id;
+ int i;
+ const char *key, *value, *data;
+ size_t datalen;
+
+ r = table_newglobalcursor(t->table, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ while (cursor_nextpair(t->table, cursor,
+ &key, &value, &data, &datalen)) {
+ r = parse_data(key, value, data, datalen, &pkg);
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor_close(t->table, cursor);
+ return r;
+ }
+ id = calc_trackreferee(t->codename, pkg->sourcename,
+ pkg->sourceversion);
+ if (FAILEDTOALLOC(id)) {
+ trackedpackage_free(pkg);
+ (void)cursor_close(t->table, cursor);
+ return RET_ERROR_OOM;
+ }
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ const char *filekey = pkg->filekeys.values[i];
+ r = references_increment(filekey, id);
+ RET_UPDATE(result, r);
+ }
+ free(id);
+ trackedpackage_free(pkg);
+ }
+ r = cursor_close(t->table, cursor);
+ RET_UPDATE(result, r);
+ return result;
+}
+
+retvalue tracking_rereference(struct distribution *distribution) {
+ retvalue result, r;
+ trackingdb tracks;
+
+ result = references_remove(distribution->codename);
+ if (distribution->tracking == dt_NONE)
+ return result;
+ r = tracking_initialize(&tracks, distribution, true);
+ RET_UPDATE(result, r);
+ if (!RET_IS_OK(r))
+ return result;
+ r = tracking_recreatereferences(tracks);
+ RET_UPDATE(result, r);
+ r = tracking_done(tracks, distribution);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue tracking_remove(trackingdb t, const char *sourcename, const char *version) {
+ retvalue result, r;
+ struct cursor *cursor;
+ const char *data;
+ size_t datalen;
+ char *id;
+ struct trackedpackage *pkg SETBUTNOTUSED(= NULL);
+
+ r = table_newpairedcursor(t->table, sourcename, version, &cursor,
+ &data, &datalen);
+ if (!RET_IS_OK(r))
+ return r;
+
+ id = calc_trackreferee(t->codename, sourcename, version);
+ if (FAILEDTOALLOC(id)) {
+ (void)cursor_close(t->table, cursor);
+ return RET_ERROR_OOM;
+ }
+
+ result = parse_data(sourcename, version, data, datalen, &pkg);
+ if (RET_IS_OK(r)) {
+ assert (pkg != NULL);
+ r = references_delete(id, &pkg->filekeys, NULL);
+ RET_UPDATE(result, r);
+ trackedpackage_free(pkg);
+ } else {
+ RET_UPDATE(result, r);
+ fprintf(stderr,
+"Could not parse data, removing all references blindly...\n");
+ r = references_remove(id);
+ RET_UPDATE(result, r);
+ }
+ free(id);
+ r = cursor_delete(t->table, cursor, sourcename, version);
+ if (RET_IS_OK(r))
+ fprintf(stderr, "Removed %s_%s from %s.\n",
+ sourcename, version, t->codename);
+ RET_UPDATE(result, r);
+ r = cursor_close(t->table, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+static void print(const char *codename, const struct trackedpackage *pkg){
+ int i;
+
+ printf("Distribution: %s\n", codename);
+ printf("Source: %s\n", pkg->sourcename);
+ printf("Version: %s\n", pkg->sourceversion);
+ printf("Files:\n");
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ const char *filekey = pkg->filekeys.values[i];
+
+ printf(" %s %c %d\n", filekey,
+ pkg->filetypes[i], pkg->refcounts[i]);
+ }
+ (void)fputs("\n", stdout);
+}
+
+retvalue tracking_printall(trackingdb t) {
+ struct cursor *cursor;
+ retvalue result, r;
+ struct trackedpackage *pkg;
+ const char *key, *value, *data;
+ size_t datalen;
+
+ r = table_newglobalcursor(t->table, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ while (cursor_nextpair(t->table, cursor,
+ &key, &value, &data, &datalen)) {
+ r = parse_data(key, value, data, datalen, &pkg);
+ if (RET_IS_OK(r)) {
+ print(t->codename, pkg);
+ trackedpackage_free(pkg);
+ }
+ RET_UPDATE(result, r);
+ }
+ r = cursor_close(t->table, cursor);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue tracking_foreach_ro(struct distribution *d, tracking_foreach_ro_action *action) {
+ trackingdb t;
+ struct cursor *cursor;
+ retvalue result, r;
+ struct trackedpackage *pkg;
+ const char *key, *value, *data;
+ size_t datalen;
+
+ r = tracking_initialize(&t, d, true);
+ if (!RET_IS_OK(r))
+ return r;
+
+ r = table_newglobalcursor(t->table, true, &cursor);
+ if (!RET_IS_OK(r)) {
+ (void)tracking_done(t, d);
+ return r;
+ }
+
+ result = RET_NOTHING;
+ while (cursor_nextpair(t->table, cursor,
+ &key, &value, &data, &datalen)) {
+ r = parse_data(key, value, data, datalen, &pkg);
+ if (RET_IS_OK(r)) {
+ r = action(d, pkg);
+ trackedpackage_free(pkg);
+ }
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ r = cursor_close(t->table, cursor);
+ RET_ENDUPDATE(result, r);
+ r = tracking_done(t, d);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue tracking_parse(struct distribution *d, struct configiterator *iter) {
+ enum trackingflags { tf_keep, tf_all, tf_minimal,
+ tf_includechanges, tf_includebyhand, tf_includelogs,
+ tf_includebuildinfos,
+ tf_keepsources,
+ tf_needsources, tf_embargoalls,
+ tf_COUNT /* must be last */
+ };
+ static const struct constant trackingflags[] = {
+ {"keep", tf_keep},
+ {"all", tf_all},
+ {"minimal", tf_minimal},
+ {"includechanges", tf_includechanges},
+ {"includebuildinfos", tf_includebuildinfos},
+ {"includelogs", tf_includelogs},
+ {"includebyhand", tf_includebyhand},
+ {"keepsources", tf_keepsources},
+ {"needsources", tf_needsources},
+ {"embargoalls", tf_embargoalls},
+ {NULL, -1}
+ };
+ bool flags[tf_COUNT];
+ retvalue r;
+ int modecount;
+
+ assert (d->tracking == dt_NONE);
+ memset(flags, 0, sizeof(flags));
+ r = config_getflags(iter, "Tracking", trackingflags, flags,
+ IGNORABLE(unknownfield), "");
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ modecount = flags[tf_keep]?1:0 + flags[tf_minimal]?1:0 + flags[tf_all]?1:0;
+ if (modecount > 1) {
+ fprintf(stderr,
+"Error parsing config file %s, line %u:\n"
+"Only one of 'keep','all' or 'minimal' can be in one Tracking header.\n",
+ config_filename(iter), config_line(iter));
+ return RET_ERROR;
+ }
+ if (modecount < 1) {
+ fprintf(stderr,
+"Error parsing config file %s, line %u, column %u:\n"
+"Tracking mode ('keep','all' or 'minimal') expected.\n",
+ config_filename(iter), config_line(iter),
+ config_column(iter));
+ return RET_ERROR;
+ }
+ if (flags[tf_keep])
+ d->tracking = dt_KEEP;
+ else if (flags[tf_minimal])
+ d->tracking = dt_MINIMAL;
+ else
+ d->tracking = dt_ALL;
+
+ d->trackingoptions.includechanges = flags[tf_includechanges];
+ d->trackingoptions.includebyhand = flags[tf_includebyhand];
+ d->trackingoptions.includebuildinfos = flags[tf_includebuildinfos];
+ d->trackingoptions.includelogs = flags[tf_includelogs];
+ d->trackingoptions.keepsources = flags[tf_keepsources];
+ d->trackingoptions.needsources = flags[tf_needsources];
+ if (flags[tf_needsources])
+ fprintf(stderr,
+"Warning parsing config file %s, line %u:\n"
+"'needsources' ignored as not yet supported.\n",
+ config_filename(iter), config_line(iter));
+ d->trackingoptions.embargoalls = flags[tf_embargoalls];
+ if (flags[tf_embargoalls])
+ fprintf(stderr,
+"Warning parsing config file %s, line %u:\n"
+"'embargoall' ignored as not yet supported.\n",
+ config_filename(iter), config_line(iter));
+ return RET_OK;
+}
+
+static retvalue trackingdata_remember(struct trackingdata *td, const char*name, const char*version) {
+ struct trackingdata_remember *r;
+
+ r = NEW(struct trackingdata_remember);
+ if (FAILEDTOALLOC(r))
+ return RET_ERROR_OOM;
+ r->name = strdup(name);
+ r->version = strdup(version);
+ if (FAILEDTOALLOC(r->name) || FAILEDTOALLOC(r->version)) {
+ free(r->name);
+ free(r->version);
+ free(r);
+ return RET_ERROR_OOM;
+ }
+ r->next = td->remembered;
+ td->remembered = r;
+ return RET_OK;
+}
+
+retvalue trackingdata_summon(trackingdb tracks, const char *name, const char *version, struct trackingdata *data) {
+ struct trackedpackage *pkg;
+ retvalue r;
+
+ r = tracking_getornew(tracks, name, version, &pkg);
+ assert (r != RET_NOTHING);
+ if (RET_IS_OK(r)) {
+ data->tracks = tracks;
+ data->pkg = pkg;
+ data->remembered = NULL;
+ return r;
+ }
+ return r;
+}
+
+retvalue trackingdata_new(trackingdb tracks, struct trackingdata *data) {
+
+ data->tracks = tracks;
+ data->pkg = NULL;
+ data->remembered = NULL;
+ return RET_OK;
+}
+
+retvalue trackingdata_switch(struct trackingdata *data, const char *source, const char *version) {
+ retvalue r;
+
+ if (data->pkg != NULL) {
+ if (strcmp(data->pkg->sourcename, source) == 0 &&
+ strcmp(data->pkg->sourceversion, version) == 0)
+ return RET_OK;
+ r = tracking_saveonly(data->tracks, data->pkg);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = trackingdata_remember(data, data->pkg->sourcename,
+ data->pkg->sourceversion);
+ strlist_done(&data->pkg->filekeys);
+ free(data->pkg->sourcename);
+ free(data->pkg->sourceversion);
+ free(data->pkg->refcounts);
+ free(data->pkg->filetypes);
+ free(data->pkg);
+ data->pkg = NULL;
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ r = tracking_getornew(data->tracks, source, version, &data->pkg);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+
+retvalue trackingdata_insert(struct trackingdata *data, enum filetype filetype, const struct strlist *filekeys, /*@null@*/const struct package *old, /*@null@*/const struct strlist *oldfilekeys) {
+ retvalue result, r;
+ struct trackedpackage *pkg;
+
+ assert (data != NULL);
+ assert(data->pkg != NULL);
+ result = trackedpackage_adddupfilekeys(data->tracks, data->pkg,
+ filetype, filekeys, true);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ if (old == NULL || old->source == NULL || old->sourceversion == NULL
+ || oldfilekeys == NULL) {
+ return RET_OK;
+ }
+ if (strcmp(old->sourceversion, data->pkg->sourceversion) == 0 &&
+ strcmp(old->source, data->pkg->sourcename) == 0) {
+ /* Unlikely, but it may also be the same source version as
+ * the package we are currently adding */
+ return trackedpackage_removefilekeys(data->tracks, data->pkg,
+ oldfilekeys);
+ }
+ r = tracking_get(data->tracks, old->source, old->sourceversion, &pkg);
+ if (RET_WAS_ERROR(r)) {
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ fprintf(stderr,
+"Could not find tracking data for %s_%s in %s to remove old files from it.\n",
+ old->source, old->sourceversion,
+ data->tracks->codename);
+ return result;
+ }
+ r = trackedpackage_removefilekeys(data->tracks, pkg, oldfilekeys);
+ RET_UPDATE(result, r);
+ r = tracking_save(data->tracks, pkg);
+ RET_UPDATE(result, r);
+ r = trackingdata_remember(data, old->source, old->sourceversion);
+ RET_UPDATE(result, r);
+
+ return result;
+}
+
+retvalue trackingdata_remove(struct trackingdata *data, const char* oldsource, const char*oldversion, const struct strlist *oldfilekeys) {
+ retvalue result, r;
+ struct trackedpackage *pkg;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: trackingdata_remove(oldsource=%s, oldversion=%s) called.\n",
+ oldsource, oldversion);
+ assert(oldsource != NULL && oldversion != NULL && oldfilekeys != NULL);
+ if (data->pkg != NULL &&
+ strcmp(oldversion, data->pkg->sourceversion) == 0 &&
+ strcmp(oldsource, data->pkg->sourcename) == 0) {
+ /* Unlikely, but it may also be the same source version as
+ * the package we are currently adding */
+ return trackedpackage_removefilekeys(data->tracks,
+ data->pkg, oldfilekeys);
+ }
+ result = tracking_get(data->tracks, oldsource, oldversion, &pkg);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+ if (result == RET_NOTHING) {
+ fprintf(stderr,
+"Could not find tracking data for %s_%s in %s to remove old files from it.\n",
+ oldsource, oldversion, data->tracks->codename);
+ return RET_OK;
+ }
+ r = trackedpackage_removefilekeys(data->tracks, pkg, oldfilekeys);
+ RET_UPDATE(result, r);
+ r = tracking_save(data->tracks, pkg);
+ RET_UPDATE(result, r);
+ r = trackingdata_remember(data, oldsource, oldversion);
+ RET_UPDATE(result, r);
+
+ return result;
+}
+
+void trackingdata_done(struct trackingdata *d) {
+ trackedpackage_free(d->pkg);
+ d->pkg = NULL;
+ d->tracks = NULL;
+ while (d->remembered != NULL) {
+ struct trackingdata_remember *h = d->remembered;
+ d->remembered = h->next;
+ free(h->name);
+ free(h->version);
+ free(h);
+ }
+
+}
+
+static inline retvalue trackedpackage_removeall(trackingdb tracks, struct trackedpackage *pkg) {
+ retvalue result = RET_OK, r;
+ char *id;
+
+// printf("[trackedpackage_removeall %s %s %s]\n", tracks->codename, pkg->sourcename, pkg->sourceversion);
+ id = calc_trackreferee(tracks->codename, pkg->sourcename,
+ pkg->sourceversion);
+ if (FAILEDTOALLOC(id))
+ return RET_ERROR_OOM;
+
+ pkg->flags.deleted = true;
+ r = references_delete(id, &pkg->filekeys, NULL);
+ RET_UPDATE(result, r);
+ free(id);
+ strlist_done(&pkg->filekeys);
+ strlist_init(&pkg->filekeys);
+ free(pkg->refcounts); pkg->refcounts = NULL;
+ return result;
+}
+
+static inline bool tracking_needed(trackingdb tracks, struct trackedpackage *pkg, int ofs) {
+ if (pkg->refcounts[ofs] > 0)
+ return true;
+ // TODO: add checks so that only .changes, .buildinfo and .log files
+ // belonging to still existing binaries are kept in minimal mode
+ if (pkg->filetypes[ofs] == ft_LOG && tracks->options.includelogs)
+ return true;
+ if (pkg->filetypes[ofs] == ft_BUILDINFO && tracks->options.includebuildinfos)
+ return true;
+ if (pkg->filetypes[ofs] == ft_CHANGES && tracks->options.includechanges)
+ return true;
+ if (pkg->filetypes[ofs] == ft_XTRA_DATA)
+ return true;
+ if (pkg->filetypes[ofs] == ft_SOURCE && tracks->options.keepsources)
+ return true;
+ return false;
+
+}
+
+static inline retvalue trackedpackage_removeunneeded(trackingdb tracks, struct trackedpackage *pkg) {
+ retvalue result = RET_OK, r;
+ char *id = NULL;
+ int i, j, count;
+
+ assert(tracks->type == dt_MINIMAL);
+
+ count = pkg->filekeys.count;
+ j = 0;
+ for (i = 0 ; i < count ; i++) {
+ if (tracking_needed(tracks, pkg, i)) {
+ if (j < i) {
+ pkg->filekeys.values[j] = pkg->filekeys.values[i];
+ pkg->refcounts[j] = pkg->refcounts[i];
+ pkg->filetypes[j] = pkg->filetypes[i];
+ }
+ j++;
+ } else {
+ char *filekey = pkg->filekeys.values[i];
+ pkg->filekeys.values[i] = NULL;
+ if (FAILEDTOALLOC(id)) {
+ id = calc_trackreferee(tracks->codename,
+ pkg->sourcename, pkg->sourceversion);
+ if (id == NULL)
+ result = RET_ERROR_OOM;
+ }
+ if (id != NULL) {
+// printf("[trackedpackage_removeunneeded %s %s %s: '%s']\n", tracks->codename, pkg->sourcename, pkg->sourceversion, filekey);
+ r = references_decrement(filekey, id);
+ RET_UPDATE(result, r);
+ }
+ free(filekey);
+ }
+ }
+ assert (j <= pkg->filekeys.count);
+ pkg->filekeys.count = j;
+ free(id);
+ return result;
+}
+
+static inline retvalue trackedpackage_tidy(trackingdb tracks, struct trackedpackage *pkg) {
+ int i;
+
+ if (tracks->type == dt_KEEP)
+ return RET_OK;
+ /* look if anything clings to this package */
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ if (pkg->refcounts[i] > 0)
+ break;
+ }
+ if (i >= pkg->filekeys.count)
+
+ /* nothing left, remove it all */
+ return trackedpackage_removeall(tracks, pkg);
+
+ else if (tracks->type == dt_MINIMAL)
+
+ /* remove all files no longer needed */
+ return trackedpackage_removeunneeded(tracks, pkg);
+ else
+ return RET_OK;
+}
+
+retvalue trackingdata_finish(trackingdb tracks, struct trackingdata *d) {
+ retvalue r;
+ assert (d->tracks == tracks);
+ if (d->pkg != NULL) {
+ r = trackedpackage_tidy(tracks, d->pkg);
+ r = tracking_save(tracks, d->pkg);
+ } else
+ r = RET_OK;
+ d->pkg = NULL;
+ /* call for all remembered actions... */
+ while (d->remembered != NULL) {
+ struct trackingdata_remember *h = d->remembered;
+ struct trackedpackage *pkg;
+ d->remembered = h->next;
+ r = tracking_get(tracks, h->name, h->version, &pkg);
+ free(h->name);
+ free(h->version);
+ free(h);
+ if (RET_IS_OK(r)) {
+ r = trackedpackage_tidy(tracks, pkg);
+ r = tracking_save(tracks, pkg);
+ }
+ }
+ d->tracks = NULL;
+ return r;
+
+}
+
+retvalue tracking_tidyall(trackingdb t) {
+ struct cursor *cursor;
+ retvalue result, r;
+ struct trackedpackage *pkg;
+ const char *key, *value, *data;
+ size_t datalen;
+
+ r = table_newglobalcursor(t->table, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ while (cursor_nextpair(t->table, cursor,
+ &key, &value, &data, &datalen)) {
+ r = parse_data(key, value, data, datalen, &pkg);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ r = trackedpackage_tidy(t, pkg);
+ RET_UPDATE(result, r);
+ r = tracking_saveatcursor(t, cursor, pkg);
+ RET_UPDATE(result, r);
+ trackedpackage_free(pkg);
+ }
+ r = cursor_close(t->table, cursor);
+ RET_UPDATE(result, r);
+ return result;
+}
+
+retvalue tracking_reset(trackingdb t) {
+ struct cursor *cursor;
+ retvalue result, r;
+ struct trackedpackage *pkg;
+ const char *key, *value, *data;
+ char *newdata;
+ size_t datalen, newdatalen;
+ int i;
+
+ r = table_newglobalcursor(t->table, true, &cursor);
+ if (!RET_IS_OK(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ while (cursor_nextpair(t->table, cursor,
+ &key, &value, &data, &datalen)) {
+ // this would perhaps be more stable if it just replaced
+ // everything within the string just received...
+ result = parse_data(key, value, data, datalen, &pkg);
+ if (RET_WAS_ERROR(result))
+ break;
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ pkg->refcounts[i] = 0;
+ }
+ result = gen_data(pkg, &newdata, &newdatalen);
+ trackedpackage_free(pkg);
+ if (RET_IS_OK(result))
+ result = cursor_replace(t->table, cursor,
+ newdata, newdatalen);
+ free(newdata);
+ if (RET_WAS_ERROR(result))
+ break;
+ }
+ r = cursor_close(t->table, cursor);
+ RET_UPDATE(result, r);
+ return result;
+}
+
+static retvalue tracking_foreachversion(trackingdb t, struct distribution *distribution, const char *sourcename, retvalue (action)(trackingdb t, struct trackedpackage *, struct distribution *)) {
+ struct cursor *cursor;
+ retvalue result, r;
+ struct trackedpackage *pkg;
+ const char *value, *data;
+ size_t datalen;
+
+ r = table_newduplicatepairedcursor(t->table, sourcename, &cursor,
+ &value, &data, &datalen);
+ if (!RET_IS_OK(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ do {
+ r = parse_data(sourcename, value, data, datalen, &pkg);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ if (verbose > 10)
+ printf("Processing track of '%s' version '%s'\n",
+ pkg->sourcename, pkg->sourceversion);
+ r = action(t, pkg, distribution);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r)) {
+ (void)cursor_close(t->table, cursor);
+ trackedpackage_free(pkg);
+ return r;
+ }
+ r = trackedpackage_tidy(t, pkg);
+ RET_ENDUPDATE(result, r);
+ r = tracking_saveatcursor(t, cursor, pkg);
+ RET_UPDATE(result, r);
+ trackedpackage_free(pkg);
+ } while (cursor_nextpair(t->table, cursor, NULL,
+ &value, &data, &datalen));
+ r = cursor_close(t->table, cursor);
+ RET_UPDATE(result, r);
+ return result;
+}
+
+
+static retvalue targetremovesourcepackage(trackingdb t, struct trackedpackage *pkg, struct distribution *distribution, struct target *target) {
+ size_t component_len, arch_len;
+ retvalue result, r;
+ int i;
+ const char *packagetype = atoms_packagetypes[target->packagetype];
+ const char *architecture = atoms_architectures[target->architecture];
+ const char *component = atoms_components[target->component];
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: targetremovesourcepackage(pkg={sourcename: %s, sourceversion: %s}, distribution.codename=%s, target.identifier=%s) called.\n",
+ pkg->sourcename, pkg->sourceversion, distribution->codename, target->identifier);
+
+ result = RET_NOTHING;
+
+ component_len = strlen(component);
+ arch_len = strlen(architecture);
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ const char *s, *s2, *basefilename, *filekey = pkg->filekeys.values[i];
+ char *packagename, *packageversion;
+ struct package package;
+ struct strlist filekeys;
+ bool savedstaletracking;
+
+ if (pkg->refcounts[i] <= 0)
+ continue;
+ if (strncmp(filekey, "pool/", 5) != 0)
+ continue;
+ if (strncmp(filekey+5, component,
+ component_len) != 0)
+ continue;
+ if (filekey[5+component_len] != '/')
+ continue;
+ /* check this file could actuall be in this target */
+ if (pkg->filetypes[i] == ft_ALL_BINARY) {
+ if (target->packagetype == pt_dsc)
+ continue;
+ s = strrchr(filekey, '.');
+ if (s == NULL)
+ continue;
+ if (strcmp(s+1, packagetype) != 0)
+ continue;
+ } else if (pkg->filetypes[i] == ft_SOURCE) {
+ if (target->packagetype != pt_dsc)
+ continue;
+ s = strrchr(filekey, '.');
+ if (s == NULL)
+ continue;
+ if (strcmp(s+1, "dsc") != 0)
+ continue;
+ } else if (pkg->filetypes[i] == ft_ARCH_BINARY) {
+ if (target->packagetype == pt_dsc)
+ continue;
+ s = strrchr(filekey, '_');
+ if (s == NULL)
+ continue;
+ s++;
+ if (strncmp(s, architecture, arch_len) != 0
+ || s[arch_len] != '.'
+ || strcmp(s+arch_len+1, packagetype) != 0)
+ continue;
+ } else
+ continue;
+ /* get this package, check it has the right source and version,
+ * and if yes, remove... */
+ basefilename = strrchr(filekey, '/');
+ if (basefilename == NULL)
+ basefilename = filekey;
+ else
+ basefilename++;
+ s = strchr(basefilename, '_');
+ packagename = strndup(basefilename, s - basefilename);
+ if (FAILEDTOALLOC(packagename))
+ return RET_ERROR_OOM;
+ s2 = strrchr(s, '.');
+ packageversion = strndup(s + 1, s2 - s - 1);
+ if (FAILEDTOALLOC(packageversion))
+ return RET_ERROR_OOM;
+ r = package_get(target, packagename, packageversion, &package);
+ if (RET_WAS_ERROR(r)) {
+ free(packagename);
+ free(packageversion);
+ return r;
+ }
+ if (r == RET_NOTHING) {
+ if (pkg->filetypes[i] != ft_ALL_BINARY
+ && verbose >= -1) {
+ fprintf(stderr,
+"Warning: tracking data might be inconsistent:\n"
+"cannot find '%s=%s' in '%s', but '%s' should be there.\n",
+ packagename, packageversion, target->identifier,
+ filekey);
+ }
+ free(packagename);
+ free(packageversion);
+ continue;
+ }
+ // TODO: ugly
+ package.pkgname = packagename;
+ packagename = NULL;
+ free(packageversion);
+
+ r = package_getsource(&package);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ package_done(&package);
+ return r;
+ }
+ if (strcmp(package.source, pkg->sourcename) != 0) {
+ if (pkg->filetypes[i] != ft_ALL_BINARY
+ && verbose >= -1) {
+ fprintf(stderr,
+"Warning: tracking data might be inconsistent:\n"
+"'%s' has '%s' of source '%s', but source '%s' contains '%s'.\n",
+ target->identifier, package.name,
+ package.source, pkg->sourcename,
+ filekey);
+ }
+ package_done(&package);
+ continue;
+ }
+ if (strcmp(package.sourceversion, pkg->sourceversion) != 0) {
+ if (pkg->filetypes[i] != ft_ALL_BINARY
+ && verbose >= -1) {
+ fprintf(stderr,
+"Warning: tracking data might be inconsistent:\n"
+"'%s' has '%s' of source version '%s', but version '%s' contains '%s'.\n",
+ target->identifier, package.name,
+ package.sourceversion,
+ pkg->sourceversion,
+ filekey);
+ }
+ package_done(&package);
+ continue;
+ }
+ r = target->getfilekeys(package.control, &filekeys);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ package_done(&package);
+ return r;
+ }
+
+ /* we remove the tracking data outself, so this is not
+ * told to remove the tracking data, so it might mark things
+ * as stale, which we do not want.. */
+ savedstaletracking = target->staletracking;
+ r = package_remove(&package, distribution->logger, NULL);
+ target->staletracking = savedstaletracking;
+ package_done(&package);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ strlist_done(&filekeys);
+ return r;
+ }
+ trackedpackage_removefilekeys(t, pkg, &filekeys);
+ strlist_done(&filekeys);
+ result = RET_OK;
+ }
+ return result;
+}
+
+/* Try to remove all packages causing refcounts in this tracking record */
+static retvalue removesourcepackage(trackingdb t, struct trackedpackage *pkg, struct distribution *distribution) {
+ struct target *target;
+ retvalue result, r;
+ int i;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: removesourcepackage(pkg={sourcename: %s, sourceversion: %s}, distribution={codename: %s}) called.\n",
+ pkg->sourcename, pkg->sourceversion, distribution->codename);
+
+ result = RET_NOTHING;
+ for (target = distribution->targets ; target != NULL ;
+ target = target->next) {
+ r = target_initpackagesdb(target, READWRITE);
+ RET_ENDUPDATE(result, r);
+ if (RET_IS_OK(r)) {
+ r = targetremovesourcepackage(t, pkg,
+ distribution, target);
+ RET_UPDATE(result, r);
+ RET_UPDATE(distribution->status, r);
+ r = target_closepackagesdb(target);
+ RET_ENDUPDATE(result, r);
+ RET_ENDUPDATE(distribution->status, r);
+ if (RET_WAS_ERROR(result))
+ return result;
+ }
+ }
+ for (i = 0 ; i < pkg->filekeys.count ; i++) {
+ const char *filekey = pkg->filekeys.values[i];
+
+ if (pkg->refcounts[i] <= 0)
+ continue;
+ if (pkg->filetypes[i] != ft_ALL_BINARY &&
+ pkg->filetypes[i] != ft_SOURCE &&
+ pkg->filetypes[i] != ft_ARCH_BINARY)
+ continue;
+ fprintf(stderr,
+"There was an inconsistency in the tracking data of '%s':\n"
+"'%s' has refcount > 0, but was nowhere found.\n",
+ distribution->codename,
+ filekey);
+ pkg->refcounts[i] = 0;
+ }
+ return result;
+}
+
+retvalue tracking_removepackages(trackingdb t, struct distribution *distribution, const char *sourcename, /*@null@*/const char *version) {
+ struct trackedpackage *pkg;
+ retvalue result, r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: tracking_removepackages(distribution={codename: %s}, sourcename=%s, version=%s) called.\n",
+ distribution->codename, sourcename, version);
+
+ if (version == NULL)
+ return tracking_foreachversion(t, distribution,
+ sourcename, removesourcepackage);
+ result = tracking_get(t, sourcename, version, &pkg);
+ if (RET_IS_OK(result)) {
+ result = removesourcepackage(t, pkg, distribution);
+ if (RET_IS_OK(result)) {
+ r = trackedpackage_tidy(t, pkg);
+ RET_ENDUPDATE(result, r);
+ r = tracking_save(t, pkg);
+ RET_ENDUPDATE(result, r);
+ } else
+ trackedpackage_free(pkg);
+ }
+ return result;
+}
+
+static retvalue package_retrack(struct package *package, void *data) {
+ trackingdb tracks = data;
+
+ return package->target->doretrack(package->name,
+ package->control, tracks);
+}
+
+retvalue tracking_retrack(struct distribution *d, bool needsretrack) {
+ struct target *t;
+ trackingdb tracks;
+ retvalue r, rr;
+
+ if (d->tracking == dt_NONE)
+ return RET_NOTHING;
+
+ for (t = d->targets ; !needsretrack && t != NULL ; t = t->next) {
+ if (t->staletracking)
+ needsretrack = true;
+ }
+ if (!needsretrack)
+ return RET_NOTHING;
+
+ if (verbose > 0)
+ printf("Retracking %s...\n", d->codename);
+
+ r = tracking_initialize(&tracks, d, false);
+ if (!RET_IS_OK(r))
+ return r;
+ /* first forget that any package is there*/
+ r = tracking_reset(tracks);
+ if (!RET_WAS_ERROR(r)) {
+ /* add back information about actually used files */
+ r = package_foreach(d,
+ atom_unknown, atom_unknown, atom_unknown,
+ package_retrack, NULL, tracks);
+ }
+ if (RET_IS_OK(r)) {
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ t->staletracking = false;
+ }
+ }
+ if (!RET_WAS_ERROR(r)) {
+ /* now remove everything no longer needed */
+ r = tracking_tidyall(tracks);
+ }
+ rr = tracking_done(tracks, d);
+ RET_ENDUPDATE(r, rr);
+ return r;
+}
diff --git a/tracking.h b/tracking.h
new file mode 100644
index 0000000..e44383e
--- /dev/null
+++ b/tracking.h
@@ -0,0 +1,53 @@
+#ifndef REPREPRO_TRACKING_H
+#define REPREPRO_TRACKING_H
+
+
+#ifndef REPREPRO_DATABASE_H
+#include "database.h"
+#endif
+#ifndef REPREPRO_TRACKINGT_H
+#include "trackingt.h"
+#endif
+#ifndef REPREPRO_DISTRIBUTION_H
+#include "distribution.h"
+#endif
+
+retvalue tracking_parse(struct distribution *, struct configiterator *);
+
+/* high-level retrack of the whole distribution */
+retvalue tracking_retrack(struct distribution *, bool /*evenifnotstale*/);
+
+retvalue tracking_initialize(/*@out@*/trackingdb *, struct distribution *, bool readonly);
+retvalue tracking_done(trackingdb, struct distribution *distribution);
+retvalue tracking_listdistributions(/*@out@*/struct strlist *);
+retvalue tracking_drop(const char *);
+
+retvalue tracking_reset(trackingdb);
+retvalue tracking_rereference(struct distribution *);
+
+retvalue trackedpackage_addfilekey(trackingdb, struct trackedpackage *, enum filetype, /*@only@*/char * /*filekey*/, bool /*used*/);
+retvalue trackedpackage_adddupfilekeys(trackingdb, struct trackedpackage *, enum filetype, const struct strlist * /*filekeys*/, bool /*used*/);
+retvalue trackedpackage_removefilekeys(trackingdb, struct trackedpackage *, const struct strlist *);
+void trackedpackage_free(struct trackedpackage *);
+
+retvalue tracking_get(trackingdb, const char * /*sourcename*/, const char * /*version*/, /*@out@*/struct trackedpackage **);
+retvalue tracking_getornew(trackingdb, const char * /*name*/, const char * /*version*/, /*@out@*/struct trackedpackage **);
+retvalue tracking_save(trackingdb, /*@only@*/struct trackedpackage *);
+retvalue tracking_remove(trackingdb, const char * /*sourcename*/, const char * /*version*/);
+retvalue tracking_printall(trackingdb);
+
+retvalue trackingdata_summon(trackingdb, const char *, const char *, struct trackingdata *);
+retvalue trackingdata_new(trackingdb, struct trackingdata *);
+retvalue trackingdata_switch(struct trackingdata *, const char *, const char *);
+struct package;
+retvalue trackingdata_insert(struct trackingdata *, enum filetype, const struct strlist * /*filekeys*/, /*@null@*/const struct package * /*oldpackage*/, /*@null@*/const struct strlist * /*oldfilekeys*/);
+retvalue trackingdata_remove(struct trackingdata *, const char */*oldsource*/, const char * /*oldversion*/, const struct strlist * /*filekeys*/);
+void trackingdata_done(struct trackingdata *);
+/* like _done but actually do something */
+retvalue trackingdata_finish(trackingdb, struct trackingdata *);
+
+/* look at all listed packages and remove everything not needed */
+retvalue tracking_tidyall(trackingdb);
+
+retvalue tracking_removepackages(trackingdb, struct distribution *, const char * /*sourcename*/, /*@null@*/const char * /*version*/);
+#endif /*REPREPRO_TRACKING_H*/
diff --git a/trackingt.h b/trackingt.h
new file mode 100644
index 0000000..6faf093
--- /dev/null
+++ b/trackingt.h
@@ -0,0 +1,38 @@
+#ifndef REPREPRO_TRACKINGT_H
+#define REPREPRO_TRACKINGT_H
+
+enum filetype { ft_ALL_BINARY='a',
+ ft_ARCH_BINARY='b',
+ ft_CHANGES = 'c',
+ ft_LOG='l',
+ ft_BUILDINFO='i',
+ ft_SOURCE='s',
+ ft_XTRA_DATA='x'};
+
+struct trackedpackage {
+ char *sourcename;
+ char *sourceversion;
+ struct strlist filekeys;
+ int *refcounts;
+ enum filetype *filetypes;
+ struct {
+ bool isnew;
+ bool deleted;
+ } flags;
+};
+typedef struct s_tracking *trackingdb;
+
+struct trackingdata {
+ /*@temp@*/trackingdb tracks;
+ struct trackedpackage *pkg;
+ /*@null@*/ struct trackingdata_remember {
+ /*@null@*/struct trackingdata_remember *next;
+ char *name;
+ char *version;
+ } *remembered;
+};
+
+struct distribution;
+typedef retvalue tracking_foreach_ro_action(struct distribution *, const struct trackedpackage *);
+retvalue tracking_foreach_ro(struct distribution *, tracking_foreach_ro_action *);
+#endif /*REPREPRO_TRACKINGT_H*/
diff --git a/uncompression.c b/uncompression.c
new file mode 100644
index 0000000..d738a70
--- /dev/null
+++ b/uncompression.c
@@ -0,0 +1,1794 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2008 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+#include <config.h>
+
+#include <errno.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <poll.h>
+#include <zlib.h>
+#ifdef HAVE_LIBBZ2
+#include <bzlib.h>
+#endif
+#ifdef HAVE_LIBLZMA
+#include <lzma.h>
+#endif
+
+#include "globals.h"
+#include "error.h"
+#include "mprintf.h"
+#include "filecntl.h"
+#include "uncompression.h"
+
+const char * const uncompression_suffix[c_COUNT] = {
+ "", ".gz", ".bz2", ".lzma", ".xz", ".lz", ".zst"};
+
+/* So help messages can hint what option to try */
+const char * const uncompression_option[c_COUNT] = {
+ NULL, NULL, "--bunzip2", "--unlzma", "--unxz", "--lunzip", "--unzstd" };
+/* how those are called in the config file */
+const char * const uncompression_config[c_COUNT] = {
+ ".", ".gz", ".bz2", ".lzma", ".xz", ".lz", ".zst" };
+
+
+/*@null@*/ char *extern_uncompressors[c_COUNT] = {
+ NULL, NULL, NULL, NULL, NULL, NULL};
+
+/*@null@*/ static struct uncompress_task {
+ struct uncompress_task *next;
+ enum compression compression;
+ char *compressedfilename;
+ char *uncompressedfilename;
+ /* when != NULL, call when finished */
+ /*@null@*/finishaction *callback;
+ /*@null@*/void *privdata;
+ /* if already started, the pid > 0 */
+ pid_t pid;
+} *tasks = NULL;
+
+static void uncompress_task_free(/*@only@*/struct uncompress_task *t) {
+ free(t->compressedfilename);
+ free(t->uncompressedfilename);
+ free(t);
+}
+
+static retvalue startchild(enum compression c, int stdinfd, int stdoutfd, /*@out@*/pid_t *pid_p) {
+ int e, i;
+ pid_t pid;
+
+ pid = fork();
+ if (pid < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d forking: %s\n", e, strerror(e));
+ (void)close(stdinfd);
+ (void)close(stdoutfd);
+ return RET_ERRNO(e);
+ }
+ if (pid == 0) {
+ /* setup child */
+ i = dup2(stdoutfd, 1);
+ if (i < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d in dup(%d, 1): %s\n",
+ e, stdoutfd, strerror(e));
+ raise(SIGUSR2);
+ }
+ i = dup2(stdinfd, 0);
+ if (i < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d in dup(%d, 0): %s\n",
+ e, stdinfd, strerror(e));
+ raise(SIGUSR2);
+ }
+ closefrom(3);
+ execlp(extern_uncompressors[c], extern_uncompressors[c],
+ ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "Error %d starting '%s': %s\n",
+ e, extern_uncompressors[c], strerror(e));
+ raise(SIGUSR2);
+ exit(EXIT_FAILURE);
+ }
+ (void)close(stdinfd);
+ (void)close(stdoutfd);
+ *pid_p = pid;
+ return RET_OK;
+}
+
+static retvalue startpipeoutchild(enum compression c, int fd, /*@out@*/int *pipefd, /*@out@*/pid_t *pid_p) {
+ int i, e, filedes[2];
+ retvalue r;
+
+ i = pipe(filedes);
+ if (i < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e));
+ (void)close(fd);
+ return RET_ERRNO(e);
+ }
+ markcloseonexec(filedes[0]);
+ r = startchild(c, fd, filedes[1], pid_p);
+ if (RET_WAS_ERROR(r))
+ /* fd and filedes[1] are closed by startchild on error */
+ (void)close(filedes[0]);
+ else
+ *pipefd = filedes[0];
+ return r;
+}
+
+static retvalue startpipeinoutchild(enum compression c, /*@out@*/int *infd, /*@out@*/int *outfd, /*@out@*/pid_t *pid_p) {
+ int i, e, infiledes[2];
+ retvalue r;
+
+ i = pipe(infiledes);
+ if (i < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ markcloseonexec(infiledes[1]);
+ r = startpipeoutchild(c, infiledes[0], outfd, pid_p);
+ if (RET_WAS_ERROR(r))
+ /* infiledes[0] is closed by startpipeoutchild on error */
+ (void)close(infiledes[1]);
+ else
+ *infd = infiledes[1];
+ return r;
+}
+
+static retvalue uncompress_start_queued(void) {
+ struct uncompress_task *t;
+ int running_count = 0;
+ int e, stdinfd, stdoutfd;
+
+ for (t = tasks ; t != NULL ; t = t->next) {
+ if (t->pid > 0)
+ running_count++;
+ }
+ // TODO: make the maximum number configurable,
+ // until that 1 is the best guess...
+ if (running_count >= 1)
+ return RET_OK;
+ t = tasks;
+ while (t != NULL && t->pid > 0)
+ t = t->next;
+ if (t == NULL)
+ /* nothing to do... */
+ return RET_NOTHING;
+ if (verbose > 1) {
+ fprintf(stderr, "Uncompress '%s' into '%s' using '%s'...\n",
+ t->compressedfilename,
+ t->uncompressedfilename,
+ extern_uncompressors[t->compression]);
+ }
+ stdinfd = open(t->compressedfilename, O_RDONLY|O_NOCTTY);
+ if (stdinfd < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d opening %s: %s\n",
+ e, t->compressedfilename,
+ strerror(e));
+ // TODO: call callback
+ return RET_ERRNO(e);
+ }
+ stdoutfd = open(t->uncompressedfilename,
+ O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW, 0666);
+ if (stdoutfd < 0) {
+ close(stdinfd);
+ e = errno;
+ fprintf(stderr, "Error %d creating %s: %s\n",
+ e, t->uncompressedfilename,
+ strerror(e));
+ // TODO: call callback
+ return RET_ERRNO(e);
+ }
+ return startchild(t->compression, stdinfd, stdoutfd, &t->pid);
+}
+
+static inline retvalue builtin_uncompress(const char *compressed, const char *destination, enum compression compression);
+
+/* we got an pid, check if it is a uncompressor we care for */
+retvalue uncompress_checkpid(pid_t pid, int status) {
+ struct uncompress_task *t, **t_p;
+ retvalue r, r2;
+ bool error = false;
+
+ if (pid <= 0)
+ return RET_NOTHING;
+ t_p = &tasks;
+ while ((t = (*t_p)) != NULL && t->pid != pid)
+ t_p = &t->next;
+ if (t == NULL) {
+ /* not one we started */
+ return RET_NOTHING;
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) != 0) {
+ fprintf(stderr,
+"'%s' < %s > %s exited with errorcode %d!\n",
+ extern_uncompressors[t->compression],
+ t->compressedfilename,
+ t->uncompressedfilename,
+ (int)(WEXITSTATUS(status)));
+ error = true;
+ }
+ } else if (WIFSIGNALED(status)) {
+ if (WTERMSIG(status) != SIGUSR2)
+ fprintf(stderr, "'%s' < %s > %s killed by signal %d!\n",
+ extern_uncompressors[t->compression],
+ t->compressedfilename,
+ t->uncompressedfilename,
+ (int)(WTERMSIG(status)));
+ error = true;
+ } else {
+ fprintf(stderr, "'%s' < %s > %s terminated abnormally!\n",
+ extern_uncompressors[t->compression],
+ t->compressedfilename,
+ t->uncompressedfilename);
+ error = true;
+ }
+ if (error) {
+ /* no need to leave partial stuff around */
+ (void)unlink(t->uncompressedfilename);
+ }
+ if (!error && verbose > 10)
+ printf("'%s' < %s > %s finished successfully!\n",
+ extern_uncompressors[t->compression],
+ t->compressedfilename,
+ t->uncompressedfilename);
+ if (error && uncompression_builtin(t->compression)) {
+ /* try builtin method instead */
+ r = builtin_uncompress(t->compressedfilename,
+ t->uncompressedfilename, t->compression);
+ if (RET_WAS_ERROR(r)) {
+ (void)unlink(t->uncompressedfilename);
+ } else if (RET_IS_OK(r)) {
+ error = false;
+ }
+ }
+ /* call the notification, if asked for */
+ if (t->callback != NULL) {
+ r = t->callback(t->privdata, t->compressedfilename, error);
+ if (r == RET_NOTHING)
+ r = RET_OK;
+ } else if (error)
+ r = RET_ERROR;
+ else
+ r = RET_OK;
+ /* take out of the chain and free */
+ *t_p = t->next;
+ uncompress_task_free(t);
+ r2 = uncompress_start_queued();
+ RET_ENDUPDATE(r, r2);
+ return r;
+}
+
+bool uncompress_running(void) {
+ uncompress_start_queued();
+ return tasks != NULL;
+}
+
+/* check if a program is available. This is needed because things like execlp
+ * are to late (we want to know if downloading a Packages.bz2 does make sense
+ * when compiled without libbz2 before actually calling the uncompressor) */
+
+static void search_binary(/*@null@*/const char *setting, const char *default_program, /*@out@*/char **program_p) {
+ char *program;
+ const char *path, *colon;
+
+ /* not set or empty means default */
+ if (setting == NULL || setting[0] == '\0')
+ setting = default_program;
+ /* all-caps NONE means I do not want any... */
+ if (strcmp(setting, "NONE") == 0)
+ return;
+ /* look for the file, look in $PATH if not qualified,
+ * only check existence, if someone it putting files not executable
+ * by us there it is their fault (as being executable by us is hard
+ * to check) */
+ if (strchr(setting, '/') != NULL) {
+ if (!isregularfile(setting))
+ return;
+ if (access(setting, X_OK) != 0)
+ return;
+ program = strdup(setting);
+ } else {
+ path = getenv("PATH");
+ if (path == NULL)
+ return;
+ program = NULL;
+ while (program == NULL && path[0] != '\0') {
+ if (path[0] == ':') {
+ path++;
+ continue;
+ }
+ colon = strchr(path, ':');
+ if (colon == NULL)
+ colon = path + strlen(path);
+ assert (colon > path);
+ program = mprintf("%.*s/%s", (int)(colon - path), path,
+ setting);
+ if (program == NULL)
+ return;
+ if (!isregularfile(program) ||
+ access(program, X_OK) != 0) {
+ free(program);
+ program = NULL;
+ }
+ if (*colon == ':')
+ path = colon + 1;
+ else
+ path = colon;
+ }
+ }
+ if (program == NULL)
+ return;
+
+ *program_p = program;
+}
+
+/* check for existence of external programs */
+void uncompressions_check(const char *gunzip, const char *bunzip2, const char *unlzma, const char *unxz, const char *lunzip, const char *unzstd) {
+ search_binary(gunzip, "gunzip", &extern_uncompressors[c_gzip]);
+ search_binary(bunzip2, "bunzip2", &extern_uncompressors[c_bzip2]);
+ search_binary(unlzma, "unlzma", &extern_uncompressors[c_lzma]);
+ search_binary(unxz, "unxz", &extern_uncompressors[c_xz]);
+ search_binary(lunzip, "lunzip", &extern_uncompressors[c_lunzip]);
+ search_binary(unzstd, "unzstd", &extern_uncompressors[c_zstd]);
+}
+
+static inline retvalue builtin_uncompress(const char *compressed, const char *destination, enum compression compression) {
+ struct compressedfile *f;
+ char buffer[4096];
+ int bytes_read, bytes_written, written;
+ int destfd;
+ int e;
+ retvalue r;
+
+ r = uncompress_open(&f, compressed, compression);
+ if (!RET_IS_OK(r))
+ return r;
+ destfd = open(destination, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666);
+ if (destfd < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d creating '%s': %s\n",
+ e, destination, strerror(e));
+ uncompress_abort(f);
+ return RET_ERRNO(e);
+ }
+ do {
+ bytes_read = uncompress_read(f, buffer, 4096);
+ if (bytes_read <= 0)
+ break;
+
+ bytes_written = 0;
+ while (bytes_written < bytes_read) {
+ written = write(destfd, buffer + bytes_written,
+ bytes_read - bytes_written);
+ if (written < 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d writing to '%s': %s\n",
+ e, destination, strerror(e));
+ close(destfd);
+ uncompress_abort(f);
+ return RET_ERRNO(e);
+ }
+ bytes_written += written;
+ }
+ } while (true);
+ r = uncompress_close(f);
+ if (RET_WAS_ERROR(r)) {
+ (void)close(destfd);
+ return r;
+ }
+ if (close(destfd) != 0) {
+ e = errno;
+ fprintf(stderr, "Error %d writing to '%s': %s!\n",
+ e, destination, strerror(e));
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue uncompress_queue_external(enum compression compression, const char *compressed, const char *uncompressed, /*@null@*/finishaction *action, /*@null@*/void *privdata) {
+ struct uncompress_task *t, **t_p;
+ retvalue r;
+
+ t_p = &tasks;
+ while ((t = (*t_p)) != NULL)
+ t_p = &t->next;
+
+ t = zNEW(struct uncompress_task);
+ if (FAILEDTOALLOC(t))
+ return RET_ERROR_OOM;
+
+ t->compressedfilename = strdup(compressed);
+ t->uncompressedfilename = strdup(uncompressed);
+ if (FAILEDTOALLOC(t->compressedfilename) ||
+ FAILEDTOALLOC(t->uncompressedfilename)) {
+ uncompress_task_free(t);
+ return RET_ERROR_OOM;
+ }
+ t->compression = compression;
+ t->callback = action;
+ t->privdata = privdata;
+ *t_p = t;
+ r = uncompress_start_queued();
+ if (r == RET_NOTHING)
+ r = RET_ERROR_INTERNAL;
+ return r;
+}
+
+retvalue uncompress_queue_file(const char *compressed, const char *destination, enum compression compression, finishaction *action, void *privdata) {
+ retvalue r;
+
+ (void)unlink(destination);
+ if (extern_uncompressors[compression] != NULL) {
+ r = uncompress_queue_external(compression, compressed,
+ destination, action, privdata);
+ if (r != RET_NOTHING) {
+ return r;
+ }
+ if (!uncompression_builtin(compression))
+ return RET_ERROR;
+ }
+ if (verbose > 1) {
+ fprintf(stderr, "Uncompress '%s' into '%s'...\n",
+ compressed, destination);
+ }
+ assert (uncompression_builtin(compression));
+ r = builtin_uncompress(compressed, destination, compression);
+ if (RET_WAS_ERROR(r)) {
+ (void)unlink(destination);
+ return r;
+ }
+ return action(privdata, compressed, false);
+}
+
+retvalue uncompress_file(const char *compressed, const char *destination, enum compression compression) {
+ retvalue r;
+
+ /* not allowed within a aptmethod session */
+ assert (tasks == NULL);
+
+ (void)unlink(destination);
+ if (uncompression_builtin(compression)) {
+ if (verbose > 1) {
+ fprintf(stderr, "Uncompress '%s' into '%s'...\n",
+ compressed, destination);
+ }
+ r = builtin_uncompress(compressed, destination, compression);
+ } else if (extern_uncompressors[compression] != NULL) {
+ r = uncompress_queue_external(compression,
+ compressed, destination, NULL, NULL);
+ if (r == RET_NOTHING)
+ r = RET_ERROR;
+ if (RET_IS_OK(r)) {
+ /* wait for the child to finish... */
+ assert (tasks != NULL && tasks->next == NULL);
+
+ do {
+ int status;
+ pid_t pid;
+
+ pid = wait(&status);
+
+ if (pid < 0) {
+ int e = errno;
+
+ if (interrupted()) {
+ r = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ if (e == EINTR)
+ continue;
+ fprintf(stderr,
+"Error %d waiting for uncompression child: %s\n",
+ e, strerror(e));
+ r = RET_ERRNO(e);
+ } else
+ r = uncompress_checkpid(pid, status);
+ } while (r == RET_NOTHING);
+ }
+ } else {
+ assert ("Impossible uncompress error" == NULL);
+ r = RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r)) {
+ (void)unlink(destination);
+ return r;
+ }
+ return RET_OK;
+}
+
+struct compressedfile {
+ char *filename;
+ enum compression compression;
+ bool external;
+ bool closefd;
+ int error;
+ pid_t pid;
+ int fd, infd, pipeinfd;
+ off_t len;
+ union {
+ /* used with an external decompressor if the input fd cannot
+ * be used as that programs stdin directly: */
+ struct intermediate_buffer {
+ char *buffer;
+ int ofs;
+ int ready;
+ } intermediate;
+ /* used if an internal decompression != c_none is used: */
+ struct uncompression {
+ unsigned char *buffer;
+ unsigned int available;
+ union {
+ z_stream gz;
+#ifdef HAVE_LIBBZ2
+ bz_stream bz2;
+#endif
+#ifdef HAVE_LIBLZMA
+ lzma_stream lzma;
+#endif
+ };
+ enum uncompression_error {
+ ue_NO_ERROR = 0,
+ ue_TRAILING_GARBAGE,
+ ue_WRONG_LENGTH,
+ ue_UNCOMPRESSION_ERROR,
+ } error;
+ /* compression stream ended */
+ bool hadeos;
+ } uncompress;
+ };
+};
+
+/* This function is called to refill the internal buffer in uncompress.buffer
+ * with data initially or one everything of the previous run was consumed.
+ * It will set uncompress.available to a value >0, unless there is a EOF
+ * condition, then it can also be set to 0.
+ */
+#define UNCOMPRESSION_BUFSIZE 16*1024
+static retvalue uncompression_read_internal_buffer(struct compressedfile *f) {
+ size_t len;
+ ssize_t r;
+ assert (f->uncompress.available == 0);
+
+ if (f->len == 0) {
+ f->uncompress.available = 0;
+ return RET_OK;
+ }
+
+ if (f->uncompress.buffer == NULL) {
+ f->uncompress.buffer = malloc(UNCOMPRESSION_BUFSIZE);
+ if (FAILEDTOALLOC(f->uncompress.buffer)) {
+ f->error = ENOMEM;
+ return RET_ERROR_OOM;
+ }
+ }
+
+ len = UNCOMPRESSION_BUFSIZE;
+ if (f->len >= 0 && len > (size_t)f->len)
+ len = f->len;
+
+ if (len == 0)
+ return RET_OK;
+
+ do {
+ if (interrupted()) {
+ f->error = EINTR;
+ return RET_ERROR_INTERRUPTED;
+ }
+ r = read(f->fd, f->uncompress.buffer, len);
+ } while (r < 0 && errno == EINTR);
+ if (r < 0) {
+ f->error = errno;
+ return RET_ERRNO(errno);
+ }
+ assert ((size_t)r <= len);
+ if (f->len >= 0) {
+ assert (r <= f->len);
+ f->len -= r;
+ } else {
+ if (r == 0) {
+ /* remember EOF
+ * (so it can be checked for to detect
+ * checksum circumventing trailing garbage) */
+ f->len = 0;
+ }
+ }
+ f->uncompress.available = r;
+ return RET_OK;
+}
+
+static inline retvalue start_gz(struct compressedfile *f, int *errno_p, const char **msg_p) {
+ int ret;
+
+ memset(&f->uncompress.gz, 0, sizeof(f->uncompress.gz));
+ f->uncompress.gz.zalloc = Z_NULL; /* use default */
+ f->uncompress.gz.zfree = Z_NULL; /* use default */
+ f->uncompress.gz.next_in = f->uncompress.buffer;
+ f->uncompress.gz.avail_in = f->uncompress.available;
+ /* 32 means accept zlib and gz header
+ * 15 means accept any windowSize */
+ ret = inflateInit2(&f->uncompress.gz, 32 + 15);
+ if (ret != Z_OK) {
+ if (ret == Z_MEM_ERROR) {
+ *errno_p = ENOMEM;
+ *msg_p = "Out of Memory";
+ return RET_ERROR_OOM;
+ }
+ *errno_p = -1;
+ /* f->uncompress.gz.msg will be free'd soon */
+ fprintf(stderr, "zlib error %d: %s", ret, f->uncompress.gz.msg);
+ *msg_p = "Error starting internal gz uncompression using zlib";
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+#ifdef HAVE_LIBBZ2
+static inline retvalue start_bz2(struct compressedfile *f, int *errno_p, const char **msg_p) {
+ int ret;
+
+ memset(&f->uncompress.bz2, 0, sizeof(f->uncompress.bz2));
+
+ /* not used by bzDecompressInit, but not set before next call: */
+ f->uncompress.bz2.next_in = (char*)f->uncompress.buffer;
+ f->uncompress.bz2.avail_in = f->uncompress.available;
+
+ ret = BZ2_bzDecompressInit(&f->uncompress.bz2, 0, 0);
+ if (ret != BZ_OK) {
+ if (ret == BZ_MEM_ERROR) {
+ *errno_p = ENOMEM;
+ *msg_p = "Out of Memory";
+ return RET_ERROR_OOM;
+ }
+ *errno_p = -EINVAL;
+ *msg_p = "libbz2 not working";
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+#endif
+
+#ifdef HAVE_LIBLZMA
+static inline retvalue start_lzma(struct compressedfile *f, int *errno_p, const char **msg_p) {
+ int ret;
+ /* as the API requests: */
+ lzma_stream tmpstream = LZMA_STREAM_INIT;
+
+ memset(&f->uncompress.lzma, 0, sizeof(f->uncompress.lzma));
+ f->uncompress.lzma = tmpstream;
+
+ /* not used here, but needed by uncompression_read_* logic */
+ f->uncompress.lzma.next_in = f->uncompress.buffer;
+ f->uncompress.lzma.avail_in = f->uncompress.available;
+
+ ret = lzma_alone_decoder(&f->uncompress.lzma, UINT64_MAX);
+ if (ret != LZMA_OK) {
+ if (ret == LZMA_MEM_ERROR) {
+ *errno_p = ENOMEM;
+ *msg_p = "Out of Memory";
+ return RET_ERROR_OOM;
+ }
+ *errno_p = -EINVAL;
+ *msg_p = "liblzma not working";
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static inline retvalue start_xz(struct compressedfile *f, int *errno_p, const char **msg_p) {
+ int ret;
+ /* as the API requests: */
+ lzma_stream tmpstream = LZMA_STREAM_INIT;
+
+ memset(&f->uncompress.lzma, 0, sizeof(f->uncompress.lzma));
+ f->uncompress.lzma = tmpstream;
+
+ /* not used here, but needed by uncompression_read_* logic */
+ f->uncompress.lzma.next_in = f->uncompress.buffer;
+ f->uncompress.lzma.avail_in = f->uncompress.available;
+
+ ret = lzma_stream_decoder(&f->uncompress.lzma, UINT64_MAX, LZMA_CONCATENATED);
+ if (ret != LZMA_OK) {
+ if (ret == LZMA_MEM_ERROR) {
+ *errno_p = ENOMEM;
+ *msg_p = "Out of Memory";
+ return RET_ERROR_OOM;
+ }
+ *errno_p = -EINVAL;
+ *msg_p = "liblzma not working";
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+#endif
+
+static retvalue start_builtin(struct compressedfile *f, int *errno_p, const char **msg_p) {
+ retvalue r;
+
+ assert (f->compression != c_none);
+ assert (uncompression_builtin(f->compression));
+
+ r = uncompression_read_internal_buffer(f);
+ if (RET_WAS_ERROR(r)) {
+ free(f->uncompress.buffer);
+ f->uncompress.buffer = NULL;
+ *errno_p = f->error;
+ *msg_p = strerror(f->error);
+ return r;
+ }
+ if (f->uncompress.available == 0) {
+ *errno_p = -EINVAL;
+ *msg_p = "File supposed to be compressed file is empty instead";
+ return RET_ERROR;
+ }
+
+ switch (f->compression) {
+ case c_gzip:
+ return start_gz(f, errno_p, msg_p);
+#ifdef HAVE_LIBBZ2
+ case c_bzip2:
+ return start_bz2(f, errno_p, msg_p);
+#endif
+#ifdef HAVE_LIBLZMA
+ case c_lzma:
+ return start_lzma(f, errno_p, msg_p);
+ case c_xz:
+ return start_xz(f, errno_p, msg_p);
+#endif
+ default:
+ assert (false);
+ return RET_ERROR_INTERNAL;
+ }
+ /* not reached */
+}
+
+retvalue uncompress_open(/*@out@*/struct compressedfile **file_p, const char *filename, enum compression compression) {
+ struct compressedfile *f;
+ int fd, e;
+ retvalue r;
+ const char *msg;
+
+ f = zNEW(struct compressedfile);
+ if (FAILEDTOALLOC(f))
+ return RET_ERROR_OOM;
+ f->filename = strdup(filename);
+ if (FAILEDTOALLOC(f->filename)) {
+ free(f);
+ return RET_ERROR_OOM;
+ }
+ f->compression = compression;
+ f->fd = -1;
+ f->infd = -1;
+ f->pipeinfd = -1;
+ f->len = -1;
+ f->external = false;
+ f->closefd = true;
+
+ if (compression == c_none || uncompression_builtin(compression)) {
+ f->fd = open(filename, O_RDONLY|O_NOCTTY);
+ if (f->fd < 0) {
+ e = errno;
+ free(f->filename);
+ free(f);
+ // if (e == || e ==)
+ // return RET_NOTHING;
+ fprintf(stderr, "Error %d opening '%s': %s!\n",
+ e, filename, strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (f->compression != c_none) {
+ r = start_builtin(f, &e, &msg);
+ if (RET_WAS_ERROR(r)) {
+ (void)close(f->fd);
+ if (e != -EINVAL && e != 0)
+ fprintf(stderr,
+"Error %d stating unpacking '%s': %s\n",
+ e, f->filename, msg);
+ else
+ fprintf(stderr,
+"Error starting unpacking '%s': %s\n",
+ f->filename, msg);
+ free(f->filename);
+ free(f);
+ return r;
+ }
+ }
+ } else {
+ assert (extern_uncompressors[compression] != NULL);
+ /* call external helper instead */
+ fd = open(f->filename, O_RDONLY|O_NOCTTY);
+ if (fd < 0) {
+ e = errno;
+ fprintf(stderr, "Error %d opening '%s': %s\n", e,
+ f->filename, strerror(e));
+ free(f->filename);
+ free(f);
+ return RET_ERRNO(e);
+ }
+ /* startpipeoutchild closes fd on error: */
+ r = startpipeoutchild(compression, fd, &f->fd, &f->pid);
+ if (RET_WAS_ERROR(r)) {
+ free(f->filename);
+ free(f);
+ return r;
+ }
+ assert (f->pid > 0);
+ f->external = true;
+ }
+ *file_p = f;
+ return RET_OK;
+}
+
+static int intermediate_size = 0;
+
+retvalue uncompress_fdopen(struct compressedfile **file_p, int fd, off_t len, enum compression compression, int *errno_p, const char **msg_p) {
+ struct compressedfile *f;
+ retvalue r;
+
+ f = zNEW(struct compressedfile);
+ if (FAILEDTOALLOC(f)) {
+ *errno_p = ENOMEM;
+ *msg_p = "Out of memory";
+ return RET_ERROR_OOM;
+ }
+ f->filename = NULL;
+ f->compression = compression;
+ f->infd = fd;
+ f->fd = -1;
+ f->pipeinfd = -1;
+ f->len = len;
+ f->external = false;
+ f->closefd = false;
+
+ if (compression == c_none) {
+ f->fd = fd;
+ f->infd = -1;
+ } else if (uncompression_builtin(compression)) {
+ f->fd = fd;
+ f->infd = -1;
+ r = start_builtin(f, errno_p, msg_p);
+ if (RET_WAS_ERROR(r)) {
+ free(f);
+ return r;
+ }
+ } else {
+ assert (extern_uncompressors[compression] != NULL);
+
+ f->external = true;
+ if (intermediate_size == 0) {
+ /* pipes are guaranteed to swallow a full
+ * page without blocking if poll
+ * tells you can write */
+ long l = sysconf(_SC_PAGESIZE);
+ if (l <= 0)
+ intermediate_size = 512;
+ else if (l > 4096)
+ intermediate_size = 4096;
+ else
+ intermediate_size = l;
+ }
+ f->intermediate.buffer = malloc(intermediate_size);
+ f->intermediate.ready = 0;
+ f->intermediate.ofs = 0;
+ if (FAILEDTOALLOC(f->intermediate.buffer)) {
+ *errno_p = ENOMEM;
+ *msg_p = "Out of memory";
+ free(f);
+ return RET_ERROR_OOM;
+ }
+ r = startpipeinoutchild(f->compression,
+ &f->pipeinfd, &f->fd, &f->pid);
+ if (RET_WAS_ERROR(r)) {
+ *errno_p = -EINVAL;
+ *msg_p = "Error starting external uncompressor";
+ free(f->intermediate.buffer);
+ free(f);
+ return r;
+ }
+ }
+ *file_p = f;
+ return RET_OK;
+}
+
+static inline int pipebackforth(struct compressedfile *file, void *buffer, int size) {
+ /* we have to make sure we only read when things are available and only
+ * write when there is still space in the pipe, otherwise we can end up
+ * in a because we are waiting for the output of a program that cannot
+ * generate output because it needs more input from us first or because
+ * we wait for a program to accept input that waits for us to consume
+ * the output... */
+ struct pollfd p[2];
+ ssize_t written;
+ int i;
+
+ assert (file->external);
+
+ do {
+
+ p[0].fd = file->pipeinfd;
+ p[0].events = POLLOUT;
+ p[1].fd = file->fd;
+ p[1].events = POLLIN;
+
+ /* wait till there is something to do */
+ i = poll(p, 2, -1);
+ if (i < 0) {
+ if (errno == EINTR)
+ continue;
+ file->error = errno;
+ return -1;
+ }
+ if ((p[0].revents & POLLERR) != 0) {
+ file->error = EIO;
+ return -1;
+ }
+ if ((p[0].revents & POLLHUP) != 0) {
+ /* not being able to send when we have something
+ * is an error */
+ if (file->len > 0 || file->intermediate.ready > 0) {
+ file->error = EIO;
+ return -1;
+ }
+ (void)close(file->pipeinfd);
+ file->pipeinfd = -1;
+ /* wait for the rest */
+ return read(file->fd, buffer, size);
+
+ }
+ if ((p[0].revents & POLLOUT) != 0) {
+ struct intermediate_buffer *im = &file->intermediate;
+
+ if (im->ready < 0)
+ return -1;
+
+ if (im->ready == 0) {
+ // TODO: check if splice is safe or will create
+ // dead-locks...
+ int isize = intermediate_size;
+ im->ofs = 0;
+
+ if (file->len >= 0 && isize > file->len)
+ isize = file->len;
+ if (isize == 0)
+ im->ready = 0;
+ else
+ im->ready = read(file->infd,
+ im->buffer + im->ofs,
+ isize);
+ if (im->ready < 0) {
+ file->error = errno;
+ return -1;
+ }
+ if (im->ready == 0) {
+ (void)close(file->pipeinfd);
+ file->pipeinfd = -1;
+ /* wait for the rest */
+ return read(file->fd, buffer, size);
+ }
+ file->len -= im->ready;
+ }
+ written = write(file->pipeinfd, im->buffer + im->ofs,
+ im->ready);
+ if (written < 0) {
+ file->error = errno;
+ return -1;
+ }
+ im->ofs += written;
+ im->ready -= written;
+ }
+
+ if ((p[1].revents & POLLIN) != 0)
+ return read(file->fd, buffer, size);
+ } while (true);
+}
+
+static inline int restart_gz_if_needed(struct compressedfile *f) {
+ retvalue r;
+ int ret;
+
+ /* first mark end of stream, will be reset if restarted */
+ f->uncompress.hadeos = true;
+
+ if (f->uncompress.gz.avail_in == 0 && f->len != 0) {
+ /* Input buffer consumed and (possibly) more data, so
+ * read more data to check: */
+ f->uncompress.available = 0;
+ r = uncompression_read_internal_buffer(f);
+ if (RET_WAS_ERROR(r))
+ return false;
+ f->uncompress.gz.next_in = f->uncompress.buffer;
+ f->uncompress.gz.avail_in = f->uncompress.available;
+ if (f->uncompress.available == 0 && f->len > 0) {
+ /* stream ends, file ends, but we are
+ * still expecting data? */
+ f->uncompress.error = ue_WRONG_LENGTH;
+ return false;
+ }
+ assert (f->uncompress.gz.avail_in > 0 || f->len == 0);
+ }
+ if (f->uncompress.gz.avail_in > 0 &&
+ f->uncompress.gz.next_in[0] == 0x1F) {
+ /* might be concatenated files, so try to restart */
+ ret = inflateEnd(&f->uncompress.gz);
+ if (ret != Z_OK) {
+ f->uncompress.error = ue_UNCOMPRESSION_ERROR;
+ return false;
+ }
+
+ unsigned int avail_in = f->uncompress.gz.avail_in;
+ unsigned char *next_in = f->uncompress.gz.next_in;
+ memset(&f->uncompress.gz, 0, sizeof(f->uncompress.gz));
+ f->uncompress.gz.zalloc = Z_NULL; /* use default */
+ f->uncompress.gz.zfree = Z_NULL; /* use default */
+ f->uncompress.gz.next_in = next_in;
+ f->uncompress.gz.avail_in = avail_in;
+ /* 32 means accept zlib and gz header
+ * 15 means accept any windowSize */
+ ret = inflateInit2(&f->uncompress.gz, 32 + 15);
+ if (ret != BZ_OK) {
+ if (ret == BZ_MEM_ERROR) {
+ f->error = ENOMEM;
+ return false;
+ }
+ f->uncompress.error = ue_UNCOMPRESSION_ERROR;
+ return false;
+ }
+ if (ret != Z_OK) {
+ if (ret == Z_MEM_ERROR) {
+ f->error = ENOMEM;
+ return false;
+ }
+ f->uncompress.error = ue_TRAILING_GARBAGE;
+ return false;
+ }
+ f->uncompress.hadeos = false;
+ /* successful restarted */
+ return true;
+ } else {
+ /* mark End Of Stream, so bzDecompress is not called again */
+ f->uncompress.hadeos = true;
+ if (f->uncompress.gz.avail_in > 0) {
+ /* trailing garbage */
+ f->uncompress.error = ue_TRAILING_GARBAGE;
+ return false;
+ } else
+ /* normal end of stream, no error and
+ * no restart necessary: */
+ return true;
+ }
+}
+
+static inline int read_gz(struct compressedfile *f, void *buffer, int size) {
+ int ret;
+ int flush = Z_SYNC_FLUSH;
+ retvalue r;
+
+ assert (f->compression == c_gzip);
+ assert (size >= 0);
+
+ if (size == 0)
+ return 0;
+
+ f->uncompress.gz.next_out = buffer;
+ f->uncompress.gz.avail_out = size;
+ do {
+
+ if (f->uncompress.gz.avail_in == 0) {
+ f->uncompress.available = 0;
+ r = uncompression_read_internal_buffer(f);
+ if (RET_WAS_ERROR(r)) {
+ f->error = errno;
+ return -1;
+ }
+ f->uncompress.gz.next_in = f->uncompress.buffer;
+ f->uncompress.gz.avail_in = f->uncompress.available;
+ }
+
+ /* as long as there is new data, never do Z_FINISH */
+ if (f->uncompress.gz.avail_in != 0)
+ flush = Z_SYNC_FLUSH;
+
+ ret = inflate(&f->uncompress.gz, flush);
+
+ if (ret == Z_STREAM_END) {
+ size_t gotdata = size - f->uncompress.gz.avail_out;
+
+ f->uncompress.gz.next_out = NULL;
+ f->uncompress.gz.avail_out = 0;
+
+ if (!restart_gz_if_needed(f))
+ return -1;
+ if (gotdata > 0 || f->uncompress.hadeos)
+ return gotdata;
+
+ /* read the restarted stream for data */
+ ret = Z_OK;
+ flush = Z_SYNC_FLUSH;
+ f->uncompress.gz.next_out = buffer;
+ f->uncompress.gz.avail_out = size;
+ } else {
+ /* use Z_FINISH on second try, unless there is new data */
+ flush = Z_FINISH;
+ }
+
+ /* repeat if no output was produced,
+ * assuming zlib will consume all input otherwise,
+ * as the documentation says: */
+ } while (ret == Z_OK && f->uncompress.gz.avail_out == (size_t)size);
+ if (ret == Z_OK ||
+ (ret == Z_BUF_ERROR && f->uncompress.gz.avail_out != (size_t)size)) {
+ return size - f->uncompress.gz.avail_out;
+ } else if (ret == Z_MEM_ERROR) {
+ fputs("Out of memory!", stderr);
+ f->error = ENOMEM;
+ return -1;
+ } else {
+ // TODO: more information about what is decompressed?
+ fprintf(stderr, "Error decompressing gz data: %s %d\n",
+ f->uncompress.gz.msg, ret);
+ f->uncompress.error = ue_UNCOMPRESSION_ERROR;
+ return -1;
+ }
+ /* not reached */
+}
+
+#ifdef HAVE_LIBBZ2
+static inline int restart_bz2_if_needed(struct compressedfile *f) {
+ retvalue r;
+ int ret;
+
+ /* first mark end of stream, will be reset if restarted */
+ f->uncompress.hadeos = true;
+
+ if (f->uncompress.bz2.avail_in == 0 && f->len != 0) {
+ /* Input buffer consumed and (possibly) more data, so
+ * read more data to check: */
+ f->uncompress.available = 0;
+ r = uncompression_read_internal_buffer(f);
+ if (RET_WAS_ERROR(r))
+ return false;
+ f->uncompress.bz2.next_in = (char*)f->uncompress.buffer;
+ f->uncompress.bz2.avail_in = f->uncompress.available;
+ if (f->uncompress.available == 0 && f->len > 0) {
+ /* stream ends, file ends, but we are
+ * still expecting data? */
+ f->uncompress.error = ue_WRONG_LENGTH;
+ return false;
+ }
+ assert (f->uncompress.bz2.avail_in > 0 || f->len == 0);
+ }
+ if (f->uncompress.bz2.avail_in > 0 &&
+ f->uncompress.bz2.next_in[0] == 'B') {
+
+ /* might be concatenated files, so restart */
+ ret = BZ2_bzDecompressEnd(&f->uncompress.bz2);
+ if (ret != BZ_OK) {
+ f->uncompress.error = ue_UNCOMPRESSION_ERROR;
+ return false;
+ }
+ ret = BZ2_bzDecompressInit(&f->uncompress.bz2, 0, 0);
+ if (ret != BZ_OK) {
+ if (ret == BZ_MEM_ERROR) {
+ f->error = ENOMEM;
+ return false;
+ }
+ f->uncompress.error = ue_TRAILING_GARBAGE;
+ f->uncompress.hadeos = true;
+ return false;
+ }
+ f->uncompress.hadeos = false;
+ /* successful restarted */
+ return true;
+ } else {
+ /* mark End Of Stream, so bzDecompress is not called again */
+ f->uncompress.hadeos = true;
+ if (f->uncompress.bz2.avail_in > 0) {
+ /* trailing garbage */
+ f->uncompress.error = ue_TRAILING_GARBAGE;
+ return false;
+ } else
+ /* normal end of stream, no error and
+ * no restart necessary: */
+ return true;
+ }
+}
+
+
+static inline int read_bz2(struct compressedfile *f, void *buffer, int size) {
+ int ret;
+ retvalue r;
+ bool eoi;
+
+ assert (f->compression == c_bzip2);
+ assert (size >= 0);
+
+ if (size == 0)
+ return 0;
+
+ f->uncompress.bz2.next_out = buffer;
+ f->uncompress.bz2.avail_out = size;
+ do {
+
+ if (f->uncompress.bz2.avail_in == 0) {
+ f->uncompress.available = 0;
+ r = uncompression_read_internal_buffer(f);
+ if (RET_WAS_ERROR(r)) {
+ f->error = errno;
+ return -1;
+ }
+ f->uncompress.bz2.next_in = (char*)f->uncompress.buffer;
+ f->uncompress.bz2.avail_in = f->uncompress.available;
+ }
+ eoi = f->uncompress.bz2.avail_in == 0;
+
+ ret = BZ2_bzDecompress(&f->uncompress.bz2);
+
+ if (eoi && ret == BZ_OK &&
+ f->uncompress.bz2.avail_out == (size_t)size) {
+ /* if libbz2 does not detect an EndOfStream at the
+ * end of the file, let's fake an error: */
+ ret = BZ_UNEXPECTED_EOF;
+ }
+
+ if (ret == BZ_STREAM_END) {
+ size_t gotdata = size - f->uncompress.bz2.avail_out;
+
+ f->uncompress.bz2.next_out = NULL;
+ f->uncompress.bz2.avail_out = 0;
+
+ if (!restart_bz2_if_needed(f))
+ return -1;
+ if (gotdata > 0 || f->uncompress.hadeos)
+ return gotdata;
+
+ /* read the restarted stream for data */
+ ret = BZ_OK;
+ f->uncompress.bz2.next_out = buffer;
+ f->uncompress.bz2.avail_out = size;
+ }
+
+ /* repeat if no output was produced: */
+ } while (ret == BZ_OK && f->uncompress.bz2.avail_out == (size_t)size);
+
+ if (ret == BZ_OK) {
+ return size - f->uncompress.bz2.avail_out;
+ } else if (ret == BZ_MEM_ERROR) {
+ fputs("Out of memory!", stderr);
+ f->error = ENOMEM;
+ return -1;
+ } else {
+ fprintf(stderr, "Error %d decompressing bz2 data\n", ret);
+ f->uncompress.error = ue_UNCOMPRESSION_ERROR;
+ return -1;
+ }
+ /* not reached */
+}
+#endif
+
+#ifdef HAVE_LIBLZMA
+static inline int read_lzma(struct compressedfile *f, void *buffer, int size) {
+ int ret;
+ retvalue r;
+ bool eoi;
+
+ assert (f->compression == c_lzma || f->compression == c_xz);
+ assert (size >= 0);
+
+ if (size == 0)
+ return 0;
+
+ f->uncompress.lzma.next_out = buffer;
+ f->uncompress.lzma.avail_out = size;
+ do {
+
+ if (f->uncompress.lzma.avail_in == 0) {
+ f->uncompress.available = 0;
+ r = uncompression_read_internal_buffer(f);
+ if (RET_WAS_ERROR(r)) {
+ f->error = errno;
+ return -1;
+ }
+ f->uncompress.lzma.next_in = f->uncompress.buffer;
+ f->uncompress.lzma.avail_in = f->uncompress.available;
+ }
+ eoi = f->uncompress.lzma.avail_in == 0;
+
+ ret = lzma_code(&f->uncompress.lzma, eoi?LZMA_FINISH:LZMA_RUN);
+
+ if (eoi && ret == LZMA_OK &&
+ f->uncompress.lzma.avail_out == (size_t)size) {
+ /* not seen with liblzma, but still make sure this
+ * is treated as error (as with libbz2): */
+ ret = LZMA_BUF_ERROR;
+ }
+
+ /* repeat if no output was produced: */
+ } while (ret == LZMA_OK && f->uncompress.lzma.avail_out == (size_t)size);
+ if (ret == LZMA_STREAM_END) {
+ if (f->uncompress.lzma.avail_in > 0) {
+ f->uncompress.error = ue_TRAILING_GARBAGE;
+ return -1;
+ } else if (f->len > 0) {
+ f->uncompress.error = ue_WRONG_LENGTH;
+ return -1;
+ } else {
+ /* check if this is the end of the file: */
+ f->uncompress.available = 0;
+ r = uncompression_read_internal_buffer(f);
+ if (RET_WAS_ERROR(r)) {
+ assert (f->error != 0);
+ return -1;
+ } else if (f->len != 0) {
+ f->uncompress.error =
+ (f->uncompress.available == 0)
+ ? ue_WRONG_LENGTH
+ : ue_TRAILING_GARBAGE;
+ return -1;
+ }
+ }
+ f->uncompress.hadeos = true;
+ return size - f->uncompress.lzma.avail_out;
+ } else if (ret == LZMA_OK) {
+ assert (size - f->uncompress.lzma.avail_out > 0);
+ return size - f->uncompress.lzma.avail_out;
+ } else if (ret == LZMA_MEM_ERROR) {
+ fputs("Out of memory!", stderr);
+ f->error = ENOMEM;
+ return -1;
+ } else {
+ fprintf(stderr, "Error %d decompressing lzma data\n", ret);
+ f->uncompress.error = ue_UNCOMPRESSION_ERROR;
+ return -1;
+ }
+ /* not reached */
+}
+#endif
+
+int uncompress_read(struct compressedfile *file, void *buffer, int size) {
+ ssize_t s;
+
+ if (file->external) {
+ if (file->pipeinfd != -1) {
+ /* things more complicated, as perhaps
+ something needs writing first... */
+ return pipebackforth(file, buffer, size);
+ }
+ s = read(file->fd, buffer, size);
+ if (s < 0)
+ file->error = errno;
+ return s;
+ }
+
+ assert (!file->external);
+
+ if (file->error != 0 || file->uncompress.error != ue_NO_ERROR)
+ return -1;
+
+ /* libbz2 does not like being called after returning end of stream,
+ * so cache that: */
+ if (file->uncompress.hadeos)
+ return 0;
+
+ switch (file->compression) {
+ case c_none:
+ if (file->len == 0)
+ return 0;
+ if (file->len > 0 && size > file->len)
+ size = file->len;
+ s = read(file->fd, buffer, size);
+ if (s < 0)
+ file->error = errno;
+ file->len -= s;
+ return s;
+ case c_gzip:
+ return read_gz(file, buffer, size);
+#ifdef HAVE_LIBBZ2
+ case c_bzip2:
+ return read_bz2(file, buffer, size);
+#endif
+#ifdef HAVE_LIBLZMA
+ case c_xz:
+ case c_lzma:
+ return read_lzma(file, buffer, size);
+#endif
+ default:
+ assert (false);
+ return -1;
+ }
+}
+
+static inline retvalue drain_pipe_fd(struct compressedfile *file, int *errno_p, const char **msg_p) {
+ int e = 0;
+ struct pollfd pollfd = {
+ file->fd,
+ POLLIN,
+ 0
+ };
+ unsigned char buffer[4096] = {};
+ while ((e = poll(&pollfd, 1, -1)) > 0) {
+ e = read(file->fd, buffer, 4096);
+ if (e <= 0)
+ break;
+ }
+ if (e < 0) {
+ *errno_p = e;
+ *msg_p = strerror(file->error);
+ return RET_ERRNO(e);
+ }
+ return RET_OK;
+}
+
+static retvalue uncompress_commonclose(struct compressedfile *file, int *errno_p, const char **msg_p) {
+ retvalue result;
+ int ret;
+ int e;
+ pid_t pid;
+ int status;
+ int output_fd;
+#define ERRORBUFFERSIZE 100
+ static char errorbuffer[ERRORBUFFERSIZE];
+
+ if (file == NULL)
+ return RET_OK;
+
+ if (file->external) {
+ free(file->intermediate.buffer);
+ if (file->pipeinfd != -1)
+ (void)close(file->pipeinfd);
+ // Drain the child's stdout in the unlikely case it's blocking on it
+ e = drain_pipe_fd(file, errno_p, msg_p);
+ if (e != RET_OK)
+ return e;
+ output_fd = file->fd;
+ file->fd = file->infd;
+ result = RET_OK;
+ if (file->pid <= 0) {
+ (void)close(output_fd);
+ return RET_OK;
+ }
+ do {
+ if (interrupted()) {
+ *errno_p = EINTR;
+ *msg_p = "Interrupted";
+ result = RET_ERROR_INTERRUPTED;
+ }
+ pid = waitpid(file->pid, &status, 0);
+ e = errno;
+ } while (pid == -1 && (e == EINTR || e == EAGAIN));
+ (void)close(output_fd);
+ if (pid == -1) {
+ *errno_p = e;
+ *msg_p = strerror(file->error);
+ return RET_ERRNO(e);
+ }
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) == 0)
+ return result;
+ else {
+ *errno_p = -EINVAL;
+ snprintf(errorbuffer, ERRORBUFFERSIZE,
+ "%s exited with code %d",
+ extern_uncompressors[file->compression],
+ (int)(WEXITSTATUS(status)));
+ *msg_p = errorbuffer;
+ return RET_ERROR;
+ }
+ } else if (WIFSIGNALED(status) && WTERMSIG(status) != SIGUSR2) {
+ *errno_p = -EINVAL;
+ snprintf(errorbuffer, ERRORBUFFERSIZE,
+ "%s killed by signal %d",
+ extern_uncompressors[file->compression],
+ (int)(WTERMSIG(status)));
+ *msg_p = errorbuffer;
+ return RET_ERROR;
+ } else {
+ *errno_p = -EINVAL;
+ snprintf(errorbuffer, ERRORBUFFERSIZE,
+ "%s failed",
+ extern_uncompressors[file->compression]);
+ *msg_p = errorbuffer;
+ return RET_ERROR;
+ }
+ return result;
+ }
+ assert (!file->external);
+
+ if (file->error != 0) {
+ *errno_p = file->error;
+ *msg_p = strerror(file->error);
+ result = RET_ERRNO(file->error);
+ } else if (file->uncompress.error != ue_NO_ERROR) {
+ *errno_p = -EINVAL;
+ if (file->uncompress.error == ue_TRAILING_GARBAGE)
+ *msg_p = "Trailing garbage after compressed data";
+ else if (file->uncompress.error == ue_WRONG_LENGTH)
+ *msg_p = "Compressed data of unexpected length";
+ else
+ *msg_p = "Uncompression error";
+ result = RET_ERROR;
+ } else
+ result = RET_OK;
+
+ free(file->uncompress.buffer);
+ file->uncompress.buffer = NULL;
+
+ switch (file->compression) {
+ case c_none:
+ return result;
+ case c_gzip:
+ ret = inflateEnd(&file->uncompress.gz);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (ret != Z_OK) {
+ *errno_p = -EINVAL;
+ if (file->uncompress.gz.msg) {
+ /* static string if set: */
+ *msg_p = file->uncompress.gz.msg;
+ } else {
+ *msg_p =
+"zlib status in inconsistent state at inflateEnd";
+ }
+ return RET_ERROR_Z;
+ }
+ return RET_OK;
+#ifdef HAVE_LIBBZ2
+ case c_bzip2:
+ ret = BZ2_bzDecompressEnd(&file->uncompress.bz2);
+ if (RET_WAS_ERROR(result))
+ return result;
+ if (ret != BZ_OK) {
+ *errno_p = -EINVAL;
+ *msg_p = "Uncompression error";
+ return RET_ERROR_BZ2;
+ }
+ return RET_OK;
+#endif
+#ifdef HAVE_LIBLZMA
+ case c_lzma:
+ case c_xz:
+ lzma_end(&file->uncompress.lzma);
+ if (RET_WAS_ERROR(result))
+ return result;
+ return RET_OK;
+#endif
+ default:
+ assert (file->external);
+ assert (false);
+ return RET_ERROR_INTERNAL;
+ }
+ /* not reached */
+}
+
+/* check if there has been an error yet for this stream */
+retvalue uncompress_error(struct compressedfile *file) {
+ int e, status;
+ pid_t pid;
+
+ if (file == NULL)
+ return RET_NOTHING;
+
+ if (file->error != 0) {
+ fprintf(stderr, "Error %d uncompressing file '%s': %s\n",
+ file->error, file->filename,
+ strerror(file->error));
+ return RET_ERRNO(file->error);
+ }
+
+ if (file->external) {
+ if (file->pid <= 0)
+ /* nothing running any more: no new errors possible */
+ return RET_OK;
+ pid = waitpid(file->pid, &status, WNOHANG);
+ if (pid < 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d looking for child %lu (a '%s'): %s\n", e,
+ (long unsigned)file->pid,
+ extern_uncompressors[file->compression],
+ strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (pid != file->pid) {
+ /* still running */
+ return RET_OK;
+ }
+ file->pid = -1;
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) == 0)
+ return RET_OK;
+ else {
+ fprintf(stderr,
+ "%s exited with code %d\n",
+ extern_uncompressors[file->compression],
+ (int)(WEXITSTATUS(status)));
+ return RET_ERROR;
+ }
+ } else if (WIFSIGNALED(status) && WTERMSIG(status) != SIGUSR2) {
+ fprintf(stderr,
+ "%s killed by signal %d\n",
+ extern_uncompressors[file->compression],
+ (int)(WTERMSIG(status)));
+ return RET_ERROR;
+ } else {
+ fprintf(stderr,
+ "%s failed\n",
+ extern_uncompressors[file->compression]);
+ return RET_ERROR;
+ }
+ }
+ assert (!file->external);
+ if (file->uncompress.error != ue_NO_ERROR) {
+ if (file->uncompress.error == ue_TRAILING_GARBAGE)
+ fprintf(stderr,
+"Trailing garbage after compressed data in %s",
+ file->filename);
+ else if (file->uncompress.error == ue_WRONG_LENGTH)
+ fprintf(stderr,
+"Compressed data of different length than expected in %s",
+ file->filename);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+
+
+void uncompress_abort(struct compressedfile *file) {
+ pid_t pid;
+ int e, status;
+
+ if (file == NULL)
+ return;
+
+ if (file->external) {
+ /* kill before closing, to avoid it getting
+ * a sigpipe */
+ if (file->pid > 0)
+ kill(file->pid, SIGTERM);
+ if (file->infd >= 0)
+ (void)close(file->infd);
+ if (file->pipeinfd != -1)
+ (void)close(file->pipeinfd);
+ pid = -1;
+ do {
+ if (interrupted())
+ break;
+ pid = waitpid(file->pid, &status, 0);
+ e = errno;
+ } while (pid == -1 && (e == EINTR || e == EAGAIN));
+ if (file->fd >= 0)
+ (void)close(file->fd);
+ if (pid != -1 && !(WIFEXITED(status)) && WIFSIGNALED(status)
+ && WTERMSIG(status) != SIGTERM
+ && WTERMSIG(status) != SIGUSR2) {
+ fprintf(stderr, "%s killed by signal %d\n",
+ extern_uncompressors[file->compression],
+ (int)(WTERMSIG(status)));
+ }
+ } else {
+ if (file->closefd && file->fd >= 0)
+ (void)close(file->fd);
+ switch (file->compression) {
+ case c_none:
+ break;
+ case c_gzip:
+ (void)inflateEnd(&file->uncompress.gz);
+ memset(&file->uncompress.gz, 0,
+ sizeof(file->uncompress.gz));
+ break;
+#ifdef HAVE_LIBBZ2
+ case c_bzip2:
+ (void)BZ2_bzDecompressEnd(&file->uncompress.bz2);
+ memset(&file->uncompress.bz2, 0,
+ sizeof(file->uncompress.bz2));
+ break;
+#endif
+#ifdef HAVE_LIBLZMA
+ case c_xz:
+ case c_lzma:
+ lzma_end(&file->uncompress.lzma);
+ memset(&file->uncompress.lzma, 0,
+ sizeof(file->uncompress.lzma));
+ break;
+#endif
+ default:
+ assert (file->external);
+ break;
+ }
+ free(file->uncompress.buffer);
+ file->uncompress.buffer = NULL;
+ }
+ free(file->filename);
+ free(file);
+}
+
+retvalue uncompress_fdclose(struct compressedfile *file, int *errno_p, const char **msg_p) {
+ retvalue r;
+
+ assert(file->closefd == false);
+ r = uncompress_commonclose(file, errno_p, msg_p);
+ free(file);
+ return r;
+}
+
+retvalue uncompress_close(struct compressedfile *file) {
+ const char *msg;
+ retvalue r;
+ int e;
+
+ if (file == NULL)
+ return RET_OK;
+
+ if (file->closefd)
+ assert (file->filename != NULL);
+ else
+ assert (file->filename == NULL);
+
+ r = uncompress_commonclose(file, &e, &msg);
+ if (RET_IS_OK(r)) {
+ if (file->closefd && file->fd >= 0 && close(file->fd) != 0) {
+ e = errno;
+ fprintf(stderr,
+"Error %d reading from %s: %s!\n", e, file->filename, strerror(e));
+ r = RET_ERRNO(e);
+ }
+ free(file->filename);
+ free(file);
+ return r;
+ }
+ if (file->closefd && file->fd >= 0)
+ (void)close(file->fd);
+ if (e == -EINVAL) {
+ fprintf(stderr,
+"Error reading from %s: %s!\n", file->filename, msg);
+ } else {
+ fprintf(stderr,
+"Error %d reading from %s: %s!\n", e, file->filename, msg);
+ }
+ free(file->filename);
+ free(file);
+ return r;
+}
+
+enum compression compression_by_suffix(const char *name, size_t *len_p) {
+ enum compression c;
+ size_t len = *len_p;
+
+ for (c = c_COUNT - 1 ; c > c_none ; c--) {
+ size_t l = strlen(uncompression_suffix[c]);
+
+ if (len <= l)
+ continue;
+ if (strncmp(name + len - l, uncompression_suffix[c], l) == 0) {
+ *len_p -= l;
+ return c;
+ }
+ }
+ return c_none;
+}
+
diff --git a/uncompression.h b/uncompression.h
new file mode 100644
index 0000000..ae0e2e4
--- /dev/null
+++ b/uncompression.h
@@ -0,0 +1,69 @@
+#ifndef REPREPRO_UNCOMPRESS_H
+#define REPREPRO_UNCOMPRESS_H
+
+/* "", ".gz", ... */
+extern const char * const uncompression_suffix[c_COUNT];
+extern /*@null@*/ char *extern_uncompressors[c_COUNT];
+/* so help messages know which option to cite: */
+extern const char * const uncompression_option[c_COUNT];
+extern const char * const uncompression_config[c_COUNT];
+
+/* there are two different modes: uncompress a file to memory,
+ * or uncompress (possibly multiple files) on the filesystem,
+ * controled by aptmethods */
+
+#ifdef HAVE_LIBLZMA
+# ifdef HAVE_LIBBZ2
+#define uncompression_builtin(c) ((c) == c_xz || (c) == c_lzma || (c) == c_bzip2 || (c) == c_gzip)
+# else
+#define uncompression_builtin(c) ((c) == c_xz || (c) == c_lzma || (c) == c_gzip)
+# endif
+#else
+# ifdef HAVE_LIBBZ2
+#define uncompression_builtin(c) ((c) == c_bzip2 || (c) == c_gzip)
+# else
+#define uncompression_builtin(c) ((c) == c_gzip)
+# endif
+#endif
+#define uncompression_supported(c) ((c) == c_none || \
+ uncompression_builtin(c) || \
+ extern_uncompressors[c] != NULL)
+
+enum compression compression_by_suffix(const char *, size_t *);
+
+/**** functions for aptmethod.c ****/
+
+/* we got an pid, check if it is a uncompressor we care for */
+retvalue uncompress_checkpid(pid_t, int);
+/* still waiting for a client to exit */
+bool uncompress_running(void);
+
+typedef retvalue finishaction(void *, const char *, bool /*failed*/);
+/* uncompress and call action when finished */
+retvalue uncompress_queue_file(const char *, const char *, enum compression, finishaction *, void *);
+
+/**** functions for update.c (uncompressing an earlier downloaded file) ****/
+
+retvalue uncompress_file(const char *, const char *, enum compression);
+
+/**** functions for indexfile.c (uncompressing to memory) and ar.c ****/
+// and perhaps also sourceextraction.c
+
+struct compressedfile;
+
+retvalue uncompress_open(/*@out@*/struct compressedfile **, const char *, enum compression);
+int uncompress_read(struct compressedfile *, void *buffer, int);
+retvalue uncompress_error(/*@const@*/struct compressedfile *);
+void uncompress_abort(/*@only@*/struct compressedfile *);
+retvalue uncompress_close(/*@only@*/struct compressedfile *);
+retvalue uncompress_fdclose(/*@only@*/struct compressedfile *, int *, const char **);
+
+retvalue uncompress_fdopen(/*@out@*/struct compressedfile **, int, off_t, enum compression, int *, const char **);
+
+/**** general initialisation ****/
+
+/* check for existence of external programs */
+void uncompressions_check(const char *gunzip, const char *bunzip2, const char *unlzma, const char *unxz, const char *lunzip, const char *unzstd);
+
+#endif
+
diff --git a/updates.c b/updates.c
new file mode 100644
index 0000000..8d81ddf
--- /dev/null
+++ b/updates.c
@@ -0,0 +1,2702 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2003,2004,2005,2006,2007,2008,2009,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+
+/* This module handles the updating of distributions from remote repositories.
+ * It's using apt's methods (the files in /usr/lib/apt/methods) for the
+ * actuall getting of needed lists and package files.
+ *
+ * It's only task is to request the right actions in the right order,
+ * almost everything is done in other modules:
+ *
+ * aptmethod.c start, feed and take care of the apt methods
+ * downloadcache.c keep track of what is downloaded to avoid duplicates
+ * signature.c verify Release.gpg files, if requested
+ * remoterepository.c cache remote index files and decide which to download
+ * upgradelist.c decide which packages (and version) should be installed
+ *
+ * An update run consists of the following steps, in between done some
+ * downloading, checking and so on:
+ *
+ * Step 1: parsing the conf/updates file with the patterns
+ * Step 2: create rules for some distribution based on those patterns
+ * Step 3: calculate which remote indices are to be retrieved and processed
+ * Step 4: <removed>
+ * Step 5: preperations for actually doing anything
+ * Step 6: queue downloading of list of lists (Release, Release.gpg, ...)
+ * Step 7: queue downloading of lists (Packages.gz, Sources.gz, ...)
+ * Step 8: call possible list hooks allowing them to modify the lists
+ * Step 9: search for missing packages i.e. needing to be added or upgraded
+ * Step 10: enqueue downloading of missing packages
+ * Step 11: install the missing packages
+ * Step 12: remember processed index files as processed
+ *
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include "error.h"
+#include "ignore.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "atoms.h"
+#include "dirs.h"
+#include "names.h"
+#include "signature.h"
+#include "aptmethod.h"
+#include "downloadcache.h"
+#include "updates.h"
+#include "upgradelist.h"
+#include "distribution.h"
+#include "tracking.h"
+#include "termdecide.h"
+#include "chunks.h"
+#include "filterlist.h"
+#include "log.h"
+#include "donefile.h"
+#include "freespace.h"
+#include "configparser.h"
+#include "filecntl.h"
+#include "remoterepository.h"
+#include "uncompression.h"
+#include "package.h"
+
+/* The data structures of this one: ("u_" is short for "update_")
+
+updates_getpatterns read a list of patterns from <confdir>/updates:
+
+ u_pattern --> u_pattern --> u_pattern --> NULL
+ / \ / \ / \ / \
+ | \ | |
+ \ ----\ | |
+ ------------ | | |
+ \ . | .
+ | |
+updates_getupstreams instances them for a given distribution:
+ | |
+ u_distribution --> u_origin -> u_origin --> NULL
+ | | / \ / \ / \ / \
+ | \ / | | | |
+ | u_target -> u_index -> u_index -> NULL
+ | | | |
+ | \ / | |
+ | u_target -> u_index -> u_index -> NULL
+ | |
+ | \ /
+ | NULL . .
+ \ / | |
+ u_distribution ---> u_origin -> u_origin -> NULL
+ | | / \ / \
+ | \ / | |
+ | u_target --> u_index ---> u_index -> NULL
+ | |
+ | \ /
+ | NULL omitted in this image:
+ | not every target must have an index in each
+ \ / origin. (Some origin might only support a
+ NULL limited number of architectures or components)
+
+ also omitted are delete rules, i.e. markers
+ that all versions previously found are not to
+ be kept or even installed, unless a later
+ index again adds them.
+*/
+
+/* the data for some upstream part to get updates from, some
+ * some fields can be NULL or empty */
+struct update_pattern {
+ struct update_pattern *next;
+ //e.g. "Name: woody"
+ char *name;
+ /* another pattern to take value from */
+ char *from;
+ /*@dependent@*/struct update_pattern *pattern_from;
+ //e.g. "Method: ftp://ftp.uni-freiburg.de/pub/linux/debian"
+ /*@null@*/ char *method;
+ //e.g. "Fallback: ftp://ftp.debian.org/pub/linux/debian"
+ /*@null@*/ char *fallback; // can be other server or dir, but must be same method
+ //e.g. "Config: Dir=/"
+ struct strlist config;
+ //e.g. "Suite: woody" or "Suite: <asterix>/updates" (NULL means "*")
+ /*@null@*/char *suite_from;
+ //e.g. "VerifyRelease: B629A24C38C6029A" (NULL means not check)
+ /*@null@*/char *verifyrelease;
+ //e.g. "Architectures: i386 sparc mips" (not set means all)
+ struct strlist architectures_from;
+ struct strlist architectures_into;
+ //e.g. "Components: main>main non-free>non-free contrib>contrib"
+ // (empty means all)
+ struct strlist components_from;
+ struct strlist components_into;
+ //e.g. "UDebComponents: main>main"
+ // (empty means all)
+ struct strlist udebcomponents_from;
+ struct strlist udebcomponents_into;
+ // There's no ddeb support here yet, since we don't know what the
+ // Debian archive layout is going to look like.
+
+ // NULL means no condition
+ /*@null@*/term *includecondition;
+ struct filterlist filterlist;
+ struct filterlist filtersrclist;
+ // NULL means nothing to execute after lists are downloaded...
+ /*@null@*/char *listhook;
+ /*@null@*/char *shellhook;
+ /* checksums to not read check in Release file: */
+ bool ignorehashes[cs_hashCOUNT];
+ /* the name of the flat component, causing flat mode if non-NULL*/
+ component_t flat;
+ //e.g. "IgnoreRelease: Yes" for 1 (default is 0)
+ bool ignorerelease;
+ //e.g. "GetInRelease: No" for 0 (default is 1)
+ bool getinrelease;
+ /* the form in which index files are preferably downloaded */
+ struct encoding_preferences downloadlistsas;
+ /* if true ignore sources with Extra-Source-Only */
+ bool omitextrasource;
+ /* if the specific field is there (to destinguish from an empty one) */
+ bool omitextrasource_set;
+ bool ignorehashes_set;
+ bool ignorerelease_set;
+ bool getinrelease_set;
+ bool architectures_set;
+ bool components_set;
+ bool udebcomponents_set;
+ bool includecondition_set;
+ bool config_set;
+ bool downloadlistsas_set;
+ /* to check circular references */
+ bool visited;
+
+ bool used;
+ struct remote_repository *repository;
+};
+
+struct update_origin {
+ struct update_origin *next;
+ /* all following are NULL when this is a delete rule */
+ /*@null@*/const struct update_pattern *pattern;
+ /*@null@*/char *suite_from;
+ /*@null@*/const struct distribution *distribution;
+ /*@null@*/struct remote_distribution *from;
+ /* cache for flat mode */
+ bool flat;
+ /* set when there was a error and it should no longer be used */
+ bool failed;
+};
+
+struct update_index_connector {
+ struct update_index_connector *next;
+
+ /* NULL when this is a delete rule */
+ /*@null@*/ struct remote_index *remote;
+ /*@null@*/ struct update_origin *origin;
+
+ /*@null@*/char *afterhookfilename;
+
+ /* ignore wrong architecture packages (arch1>arch2 or flat) */
+ bool ignorewrongarchitecture;
+ /* if newly downloaded or not in done file */
+ bool new;
+ /* content needed (i.e. listhooks have to be run) */
+ bool needed;
+ /* there was something missed here */
+ bool failed;
+ /* do not generate 'done' file */
+ bool incomplete;
+};
+
+struct update_target {
+ /*@null@*/struct update_target *next;
+ /*@null@*/struct update_index_connector *indices;
+ /*@dependent@*/struct target *target;
+ /*@null@*/struct upgradelist *upgradelist;
+ /* Ignore delete marks (as some lists were missing) */
+ bool ignoredelete;
+ /* don't do anything because of --skipold */
+ bool nothingnew;
+ /* if true do not generate donefiles */
+ bool incomplete;
+};
+
+struct update_distribution {
+ struct update_distribution *next;
+ struct distribution *distribution;
+ struct update_pattern **patterns;
+ struct update_origin *origins;
+ struct update_target *targets;
+};
+
+static void update_pattern_free(/*@only@*/struct update_pattern *update) {
+ if (update == NULL)
+ return;
+ free(update->name);
+ free(update->from);
+ free(update->method);
+ free(update->fallback);
+ free(update->suite_from);
+ free(update->verifyrelease);
+ strlist_done(&update->config);
+ strlist_done(&update->architectures_from);
+ strlist_done(&update->architectures_into);
+ strlist_done(&update->components_from);
+ strlist_done(&update->components_into);
+ strlist_done(&update->udebcomponents_from);
+ strlist_done(&update->udebcomponents_into);
+ term_free(update->includecondition);
+ filterlist_release(&update->filterlist);
+ filterlist_release(&update->filtersrclist);
+ free(update->listhook);
+ free(update->shellhook);
+ remote_repository_free(update->repository);
+ free(update);
+}
+
+void updates_freepatterns(struct update_pattern *p) {
+ while (p != NULL) {
+ struct update_pattern *pattern;
+
+ pattern = p;
+ p = pattern->next;
+ update_pattern_free(pattern);
+ }
+}
+
+static void updates_freeorigins(/*@only@*/struct update_origin *o) {
+ while (o != NULL) {
+ struct update_origin *origin;
+
+ origin = o;
+ o = origin->next;
+ free(origin->suite_from);
+ free(origin);
+ }
+}
+
+static void updates_freetargets(/*@only@*/struct update_target *t) {
+ while (t != NULL) {
+ struct update_target *ut;
+
+ ut = t;
+ t = ut->next;
+ while (ut->indices != NULL) {
+ struct update_index_connector *ui;
+
+ ui = ut->indices;
+ ut->indices = ui->next;
+ free(ui->afterhookfilename);
+ free(ui);
+ }
+ free(ut);
+ }
+}
+
+void updates_freeupdatedistributions(struct update_distribution *d) {
+ while (d != NULL) {
+ struct update_distribution *next;
+
+ next = d->next;
+ free(d->patterns);
+ updates_freetargets(d->targets);
+ updates_freeorigins(d->origins);
+ free(d);
+ d = next;
+ }
+}
+
+static inline retvalue newupdatetarget(struct update_target **ts, /*@dependent@*/struct target *target) {
+ struct update_target *ut;
+
+ ut = malloc(sizeof(struct update_target));
+ if (FAILEDTOALLOC(ut))
+ return RET_ERROR_OOM;
+ ut->target = target;
+ ut->next = *ts;
+ ut->indices = NULL;
+ ut->upgradelist = NULL;
+ ut->ignoredelete = false;
+ ut->nothingnew = false;
+ ut->incomplete = false;
+ *ts = ut;
+ return RET_OK;
+}
+
+/****************************************************************************
+ * Step 1: parsing the conf/updates file with the patterns *
+ ****************************************************************************/
+
+CFlinkedlistinit(update_pattern)
+CFvalueSETPROC(update_pattern, name)
+CFvalueSETPROC(update_pattern, suite_from)
+CFatomSETPROC(update_pattern, flat, at_component)
+CFvalueSETPROC(update_pattern, from)
+CFurlSETPROC(update_pattern, method)
+CFurlSETPROC(update_pattern, fallback)
+/* what here? */
+CFallSETPROC(update_pattern, verifyrelease)
+CFlinelistSETPROC(update_pattern, config)
+CFtruthSETPROC(update_pattern, ignorerelease)
+CFtruthSETPROC(update_pattern, getinrelease)
+CFscriptSETPROC(update_pattern, listhook)
+CFallSETPROC(update_pattern, shellhook)
+CFfilterlistSETPROC(update_pattern, filterlist)
+CFfilterlistSETPROC(update_pattern, filtersrclist)
+CFtermSSETPROC(update_pattern, includecondition)
+CFtruthSETPROC(update_pattern, omitextrasource)
+
+CFUSETPROC(update_pattern, downloadlistsas) {
+ CFSETPROCVAR(update_pattern, this);
+ char *word;
+ const char *u;
+ retvalue r;
+ unsigned int e = 0;
+ enum compression c;
+
+ this->downloadlistsas_set = true;
+ r = config_getword(iter, &word);
+ while (RET_IS_OK(r)) {
+ bool force;
+ if (e >= ARRAYCOUNT(this->downloadlistsas.requested)) {
+ fprintf(stderr,
+"%s:%d:%d: Ignoring all but first %d entries...\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ (int)(ARRAYCOUNT(
+ this->downloadlistsas.requested)));
+ free(word);
+ break;
+ }
+ if (strncmp(word, "force.", 6) == 0) {
+ u = word + 5;
+ force = true;
+ } else {
+ u = word;
+ force = false;
+ }
+ for (c = 0 ; c < c_COUNT ; c++) {
+ if (strcmp(uncompression_config[c], u) == 0 ||
+ strcmp(uncompression_config[c]+1, u) == 0) {
+ break;
+ }
+ }
+ if (c < c_COUNT) {
+ this->downloadlistsas.requested[e].compression = c;
+ this->downloadlistsas.requested[e].diff = false;
+ this->downloadlistsas.requested[e].force = force;
+ e++;
+ free(word);
+ r = config_getword(iter, &word);
+ continue;
+ }
+ if (strcmp(u, ".diff") == 0 || strcmp(u, "diff") == 0) {
+ this->downloadlistsas.requested[e].compression = c_gzip;
+ this->downloadlistsas.requested[e].diff = true;
+ this->downloadlistsas.requested[e].force = force;
+ e++;
+ free(word);
+ r = config_getword(iter, &word);
+ continue;
+ }
+ fprintf(stderr,
+"%s:%d:%d: Error: unknown list download mode '%s'!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ config_markercolumn(iter),
+ u);
+ free(word);
+ return RET_ERROR;
+ }
+ if (RET_WAS_ERROR(r))
+ return r;
+ this->downloadlistsas.count = e;
+ return RET_OK;
+}
+
+CFUSETPROC(update_pattern, components) {
+ CFSETPROCVAR(update_pattern, this);
+ retvalue r;
+ int i;
+
+ this->components_set = true;
+ r = config_getsplitwords(iter, "Components",
+ &this->components_from,
+ &this->components_into);
+ if (RET_IS_OK(r)) {
+ // TODO: instead of this save numbers directly...
+ for (i = 0 ; i < this->components_into.count ; i++) {
+ component_t c;
+ c = component_find(this->components_into.values[i]);
+ if (c == atom_unknown) {
+ fprintf(stderr,
+"Warning parsing %s, line %u: unknown component '%s' will be ignored!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ this->components_into.values[i]);
+ }
+ }
+ }
+ return r;
+}
+
+CFUSETPROC(update_pattern, udebcomponents) {
+ CFSETPROCVAR(update_pattern, this);
+ retvalue r;
+ int i;
+
+ this->udebcomponents_set = true;
+ r = config_getsplitwords(iter, "UdebComponents",
+ &this->udebcomponents_from,
+ &this->udebcomponents_into);
+ if (RET_IS_OK(r)) {
+ // TODO: instead of this save numbers directly...
+ for (i = 0 ; i < this->udebcomponents_into.count ; i++) {
+ component_t c;
+ c = component_find(this->udebcomponents_into.values[i]);
+ if (c == atom_unknown) {
+ fprintf(stderr,
+"Warning parsing %s, line %u: unknown udeb component '%s' will be ignored!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ this->udebcomponents_into.values[i]);
+ }
+ }
+ }
+ return r;
+}
+
+CFUSETPROC(update_pattern, architectures) {
+ CFSETPROCVAR(update_pattern, this);
+ retvalue r;
+ int i;
+
+ this->architectures_set = true;
+ r = config_getsplitwords(iter, "Architectures",
+ &this->architectures_from,
+ &this->architectures_into);
+ if (r == RET_NOTHING) {
+ strlist_init(&this->architectures_from);
+ strlist_init(&this->architectures_into);
+ fprintf(stderr,
+"Warning parsing %s, line %u: an empty Architectures field\n"
+"causes the whole pattern to do nothing.\n",
+ config_filename(iter),
+ config_markerline(iter));
+ }
+ if (RET_IS_OK(r)) {
+ // TODO: instead of this save numbers directly...
+ for (i = 0 ; i < this->architectures_into.count ; i++) {
+ architecture_t a;
+ a = architecture_find(this->architectures_into.values[i]);
+ if (a == atom_unknown) {
+ fprintf(stderr,
+"Warning parsing %s, line %u: unknown architecture '%s' will be ignored!\n",
+ config_filename(iter),
+ config_markerline(iter),
+ this->architectures_into.values[i]);
+ }
+ }
+ }
+ return r;
+}
+CFhashesSETPROC(update_pattern, ignorehashes);
+
+static const struct configfield updateconfigfields[] = {
+ CFr("Name", update_pattern, name),
+ CF("From", update_pattern, from),
+ CF("Method", update_pattern, method),
+ CF("Fallback", update_pattern, fallback),
+ CF("Config", update_pattern, config),
+ CF("Suite", update_pattern, suite_from),
+ CF("Architectures", update_pattern, architectures),
+ CF("Components", update_pattern, components),
+ CF("Flat", update_pattern, flat),
+ CF("UDebComponents", update_pattern, udebcomponents),
+ CF("GetInRelease", update_pattern, getinrelease),
+ CF("IgnoreRelease", update_pattern, ignorerelease),
+ CF("IgnoreHashes", update_pattern, ignorehashes),
+ CF("VerifyRelease", update_pattern, verifyrelease),
+ CF("ListHook", update_pattern, listhook),
+ CF("ListShellHook", update_pattern, shellhook),
+ CF("FilterFormula", update_pattern, includecondition),
+ CF("OmitExtraSourceOnly", update_pattern, omitextrasource),
+ CF("FilterList", update_pattern, filterlist),
+ CF("FilterSrcList", update_pattern, filtersrclist),
+ CF("DownloadListsAs", update_pattern, downloadlistsas)
+};
+
+CFfinishparse(update_pattern) {
+ CFUfinishparseVARS(update_pattern, n, last_p, mydata);
+
+ if (complete) {
+ if (n->components_set && atom_defined(n->flat)) {
+ fprintf(stderr,
+"%s:%u to %u: Update pattern may not contain Components and Flat fields ad the same time.\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter));
+ return RET_ERROR;
+ }
+ if (n->udebcomponents_set && atom_defined(n->flat)) {
+ fprintf(stderr,
+"%s:%u to %u: Update pattern may not contain UDebComponents and Flat fields ad the same time.\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter));
+ return RET_ERROR;
+ }
+ if (n->from != NULL && n->method != NULL) {
+ fprintf(stderr,
+"%s:%u to %u: Update pattern may not contain From: and Method: fields ad the same time.\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter));
+ return RET_ERROR;
+ }
+ if (n->from == NULL && n->method == NULL) {
+ fprintf(stderr,
+"%s:%u to %u: Update pattern must either contain a Methods: field or reference another one with a From: field.\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter));
+ return RET_ERROR;
+ }
+ if (n->from != NULL && n->fallback != NULL) {
+ fprintf(stderr,
+"%s:%u to %u: Update pattern may not contain From: and Fallback: fields ad the same time.\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter));
+ return RET_ERROR;
+ }
+ if (n->from != NULL && n->config_set) {
+ fprintf(stderr,
+"%s:%u to %u: Update pattern may not contain From: and Config: fields ad the same time.\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter));
+ return RET_ERROR;
+ }
+ if (n->suite_from != NULL && strcmp(n->suite_from, "*") != 0 &&
+ strncmp(n->suite_from, "*/", 2) != 0 &&
+ strchr(n->suite_from, '*') != NULL) {
+ fprintf(stderr,
+"%s:%u to %u: Unsupported suite pattern '%s'\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter), n->suite_from);
+ return RET_ERROR;
+ }
+ if (n->listhook != NULL && n->shellhook != NULL) {
+ fprintf(stderr,
+"%s:%u to %u: Only one of ListHook and ListShellHook allowed per update rule\n",
+ config_filename(iter), config_firstline(iter),
+ config_line(iter));
+ return RET_ERROR;
+ }
+ }
+ return linkedlistfinish(privdata_update_pattern,
+ thisdata_update_pattern,
+ lastdata_p_update_pattern, complete, iter);
+}
+
+
+retvalue updates_getpatterns(struct update_pattern **patterns) {
+ struct update_pattern *update = NULL, *u, *v;
+ bool progress;
+ int i;
+ retvalue r;
+
+ r = configfile_parse("updates", IGNORABLE(unknownfield),
+ configparser_update_pattern_init,
+ finishparseupdate_pattern,
+ "update rule",
+ updateconfigfields, ARRAYCOUNT(updateconfigfields),
+ &update);
+ if (RET_IS_OK(r)) {
+ for (u = update ; u != NULL ; u = u->next) {
+ v = update;
+ while (v != NULL &&
+ (v == u || strcmp(v->name, u->name) != 0))
+ v = v->next;
+ if (v != NULL) {
+ // TODO: store line information...
+ fprintf(stderr,
+"%s/updates: Multiple update patterns named '%s'!\n",
+ global.confdir, u->name);
+ updates_freepatterns(update);
+ return RET_ERROR;
+ }
+ if (u->from == NULL)
+ continue;
+ v = update;
+ while (v != NULL && strcmp(v->name, u->from) != 0)
+ v = v->next;
+ if (v == NULL) {
+ fprintf(stderr,
+"%s/updates: Update pattern '%s' references unknown pattern '%s' via From!\n",
+ global.confdir, u->name, u->from);
+ updates_freepatterns(update);
+ return RET_ERROR;
+ }
+ u->pattern_from = v;
+ }
+ /* check for circular references */
+ do {
+ progress = false;
+ for (u = update ; u != NULL ; u = u->next) {
+ if (u->visited)
+ continue;
+ if (u->pattern_from == NULL ||
+ u->pattern_from->visited) {
+ u->visited = true;
+ progress = true;
+ }
+ }
+ } while (progress);
+ u = update;
+ while (u != NULL && u->visited)
+ u = u->next;
+ if (u != NULL) {
+ /* The actual error is more likely found later.
+ * If someone creates a cycle and a chain into that
+ * more than 1000 rules long, having a slightly
+ * misleading error message will be the last of
+ * their problems... */
+ for (i = 0 ; i < 1000 ; i++) {
+ u = u->pattern_from;
+ assert (u != NULL && !u->visited);
+ }
+ fprintf(stderr,
+"Error: Update rule '%s' part of circular From-referencing.\n",
+ u->name);
+ updates_freepatterns(update);
+ return RET_ERROR;
+ }
+ *patterns = update;
+ } else if (r == RET_NOTHING) {
+ assert (update == NULL);
+ *patterns = NULL;
+ r = RET_OK;
+ } else {
+ if (r == RET_ERROR_UNKNOWNFIELD)
+ (void)fputs(
+"To ignore unknown fields use --ignore=unknownfield\n",
+ stderr);
+ updates_freepatterns(update);
+ }
+ return r;
+}
+
+static inline void markfound(int count, struct update_pattern * const *patterns, const struct update_pattern *lookfor, const struct strlist *searched, const struct strlist *have, bool *found, bool (*hasattribute)(const struct update_pattern*)) {
+ int i, j, o;
+
+ for (i = 0 ; i < count ; i++) {
+ const struct update_pattern *p = patterns[i];
+
+ /* check if this uses this attribute */
+ while (p != NULL && !hasattribute(p))
+ p = p->pattern_from;
+ if (p != lookfor)
+ continue;
+
+ for (j = 0 ; j < have->count ; j++) {
+ o = strlist_ofs(searched, have->values[j]);
+ if (o >= 0)
+ found[o] = true;
+ }
+ break;
+ }
+}
+
+static inline bool hasarchitectures(const struct update_pattern *p) {
+ return p->architectures_set;
+}
+static inline bool hascomponents(const struct update_pattern *p) {
+ return p->components_set;
+}
+static inline bool hasudebcomponents(const struct update_pattern *p) {
+ return p->udebcomponents_set;
+}
+
+/****************************************************************************
+ * Step 2: create rules for some distribution based on those patterns *
+ ****************************************************************************/
+
+static retvalue new_deleterule(struct update_origin **origins) {
+
+ struct update_origin *update;
+
+ update = zNEW(struct update_origin);
+ if (FAILEDTOALLOC(update))
+ return RET_ERROR_OOM;
+
+ *origins = update;
+ return RET_OK;
+}
+
+static inline char *translate_suite_pattern(const struct update_pattern *p, const char *codename) {
+ /* look for first specified suite: */
+ while (p != NULL && p->suite_from == NULL)
+ p = p->pattern_from;
+
+ if (p == NULL || strcmp(p->suite_from, "*") == 0)
+ return strdup(codename);
+ if (p->suite_from[0] == '*' && p->suite_from[1] == '/')
+ return calc_dirconcat(codename, p->suite_from + 2);
+ else if (strchr(p->suite_from, '*') == NULL)
+ return strdup(p->suite_from);
+ //TODO: implement this
+ // but already checked in parsing...
+ assert(0);
+ return NULL;
+}
+
+static retvalue instance_pattern(struct update_pattern *pattern, const struct distribution *distribution, struct update_origin **origins) {
+
+ struct update_origin *update;
+ /*@dependant@*/struct update_pattern *declaration, *p, *listscomponents;
+ bool ignorehashes[cs_hashCOUNT], ignorerelease, getinrelease;
+ const char *verifyrelease;
+ retvalue r;
+
+ update = zNEW(struct update_origin);
+ if (FAILEDTOALLOC(update))
+ return RET_ERROR_OOM;
+
+ update->suite_from = translate_suite_pattern(pattern,
+ distribution->codename);
+ if (FAILEDTOALLOC(update->suite_from)) {
+ free(update);
+ return RET_ERROR_OOM;
+ }
+ if (!pattern->used) {
+ declaration = pattern;
+ while (declaration->pattern_from != NULL)
+ declaration = declaration->pattern_from;
+ if (declaration->repository == NULL)
+ declaration->repository = remote_repository_prepare(
+ declaration->name, declaration->method,
+ declaration->fallback,
+ &declaration->config);
+ if (FAILEDTOALLOC(declaration->repository)) {
+ free(update->suite_from);
+ free(update);
+ return RET_ERROR_OOM;
+ }
+ pattern->used = true;
+ } else {
+ declaration = pattern;
+ while (declaration->pattern_from != NULL)
+ declaration = declaration->pattern_from;
+ assert (declaration->repository != NULL);
+ }
+
+ update->distribution = distribution;
+ update->pattern = pattern;
+ update->failed = false;
+
+ p = pattern;
+ while (p != NULL && !p->ignorerelease_set)
+ p = p->pattern_from;
+ if (p == NULL)
+ ignorerelease = false;
+ else
+ ignorerelease = p->ignorerelease;
+ p = pattern;
+ while (p != NULL && !p->getinrelease_set)
+ p = p->pattern_from;
+ if (p == NULL)
+ getinrelease = true;
+ else
+ getinrelease = p->getinrelease;
+ /* find the first set values: */
+ p = pattern;
+ while (p != NULL && p->verifyrelease == NULL)
+ p = p->pattern_from;
+ if (p == NULL)
+ verifyrelease = NULL;
+ else
+ verifyrelease = p->verifyrelease;
+ if (!ignorerelease && verifyrelease == NULL && verbose >= 0) {
+ fprintf(stderr,
+"Warning: No VerifyRelease line in '%s' or any rule it includes via 'From:'.\n"
+"Release.gpg cannot be checked unless you tell which key to check with.\n"
+"(To avoid this warning and not check signatures add 'VerifyRelease: blindtrust').\n",
+ pattern->name);
+
+ }
+ p = pattern;
+ while (p != NULL && !p->ignorehashes_set)
+ p = p->pattern_from;
+ if (p == NULL)
+ memset(ignorehashes, 0, sizeof(ignorehashes));
+ else {
+ assert (sizeof(ignorehashes) == sizeof(p->ignorehashes));
+ memcpy(ignorehashes, p->ignorehashes, sizeof(ignorehashes));
+ }
+
+ listscomponents = NULL;
+ p = pattern;
+ while (p != NULL && !atom_defined(p->flat)) {
+ if (p->components_set || p->udebcomponents_set)
+ listscomponents = p;
+ p = p->pattern_from;
+ }
+ update->flat = p != NULL;
+ if (update->flat && listscomponents != NULL) {
+ fprintf(stderr,
+"WARNING: update pattern '%s' (first encountered via '%s' in '%s')\n"
+"sets components that are always ignored as '%s' sets Flat mode.\n",
+ listscomponents->name, pattern->name,
+ distribution->codename, p->name);
+ }
+ if (p != NULL && !atomlist_in(&distribution->components, p->flat)) {
+ fprintf(stderr,
+"Error: distribution '%s' uses flat update pattern '%s'\n"
+"with target component '%s' which it does not contain!\n",
+ distribution->codename,
+ pattern->name, atoms_components[p->flat]);
+ updates_freeorigins(update);
+ return RET_ERROR;
+ }
+ r = remote_distribution_prepare(declaration->repository,
+ update->suite_from, ignorerelease,
+ getinrelease, verifyrelease, update->flat,
+ ignorehashes, &update->from);
+ if (RET_WAS_ERROR(r)) {
+ updates_freeorigins(update);
+ return r;
+ }
+
+ *origins = update;
+ return RET_OK;
+}
+
+static retvalue findpatterns(struct update_pattern *patterns, const struct distribution *distribution, struct update_pattern ***patterns_p) {
+ int i;
+ struct update_pattern **used_patterns;
+
+ if (distribution->updates.count == 0)
+ return RET_NOTHING;
+
+ used_patterns = nzNEW(distribution->updates.count,
+ struct update_pattern *);
+ if (FAILEDTOALLOC(used_patterns))
+ return RET_ERROR_OOM;
+
+ for (i = 0; i < distribution->updates.count ; i++) {
+ const char *name = distribution->updates.values[i];
+ struct update_pattern *pattern;
+
+ if (strcmp(name, "-") == 0)
+ continue;
+
+ pattern = patterns;
+ while (pattern != NULL && strcmp(name, pattern->name) != 0)
+ pattern = pattern->next;
+ if (pattern == NULL) {
+ fprintf(stderr,
+"Cannot find definition of upgrade-rule '%s' for distribution '%s'!\n",
+ name, distribution->codename);
+ if (distribution->selected) {
+ free(used_patterns);
+ return RET_ERROR;
+ } else
+ fprintf(stderr,
+"This is no error now as '%s' is not used, bug might cause spurious warnings...\n",
+ distribution->codename);
+ }
+ used_patterns[i] = pattern;
+ }
+ *patterns_p = used_patterns;
+ return RET_OK;
+}
+
+static retvalue getorigins(struct update_distribution *d) {
+ const struct distribution *distribution = d->distribution;
+ struct update_origin *updates = NULL;
+ retvalue result;
+ int i;
+
+ assert (d->patterns != NULL);
+
+ result = RET_NOTHING;
+ for (i = 0; i < distribution->updates.count ; i++) {
+ struct update_pattern *pattern = d->patterns[i];
+ struct update_origin *update SETBUTNOTUSED(= NULL);
+ retvalue r;
+
+ if (pattern == NULL) {
+ assert (strcmp(distribution->updates.values[i], "-") == 0);
+ r = new_deleterule(&update);
+ } else {
+ r = instance_pattern(pattern, distribution, &update);
+ }
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (RET_IS_OK(r)) {
+ assert (update != NULL);
+ update->next = updates;
+ updates = update;
+ }
+ }
+
+ if (RET_WAS_ERROR(result)) {
+ updates_freeorigins(updates);
+ } else {
+ d->origins = updates;
+ }
+ return result;
+}
+
+/****************************************************************************
+ * Step 3: calculate which remote indices are to be retrieved and processed *
+ ****************************************************************************/
+
+static inline bool addremoteindex(struct update_origin *origin, struct target *target, struct update_target *updatetargets, const char *architecture, const char *component) {
+ struct update_index_connector *uindex;
+ const struct update_pattern *p;
+
+ uindex = zNEW(struct update_index_connector);
+ if (FAILEDTOALLOC(uindex))
+ return false;
+
+ p = origin->pattern;
+ while (p != NULL && !p->downloadlistsas_set)
+ p = p->pattern_from;
+
+ uindex->origin = origin;
+ uindex->remote = remote_index(origin->from,
+ architecture, component,
+ target->packagetype,
+ (p == NULL)?NULL:&p->downloadlistsas);
+ if (FAILEDTOALLOC(uindex->remote)) {
+ free(uindex);
+ return false;
+ }
+ assert (!origin->flat);
+ uindex->next = updatetargets->indices;
+ uindex->ignorewrongarchitecture = strcmp(architecture,
+ atoms_architectures[
+ target->architecture]) != 0;
+ updatetargets->indices = uindex;
+ return true;
+}
+
+static retvalue addorigintotarget(struct update_origin *origin, struct target *target, struct distribution *distribution, struct update_target *updatetargets) {
+ const struct update_pattern *p;
+ const struct strlist *c_from = NULL, *c_into = NULL;
+ const struct strlist *a_from = NULL, *a_into = NULL;
+ const char *architecture = atoms_architectures[target->architecture];
+ const char *component = atoms_components[target->component];
+ int ai, ci;
+
+ assert (origin != NULL && origin->pattern != NULL);
+
+ p = origin->pattern;
+ while (p != NULL && !p->architectures_set)
+ p = p->pattern_from;
+ if (p != NULL) {
+ a_from = &p->architectures_from;
+ a_into = &p->architectures_into;
+ }
+ p = origin->pattern;
+ if (target->packagetype == pt_udeb) {
+ while (p != NULL && !p->udebcomponents_set)
+ p = p->pattern_from;
+ if (p != NULL) {
+ c_from = &p->udebcomponents_from;
+ c_into = &p->udebcomponents_into;
+ }
+ } else {
+ while (p != NULL && !p->components_set)
+ p = p->pattern_from;
+ if (p != NULL) {
+ c_from = &p->components_from;
+ c_into = &p->components_into;
+ }
+ }
+
+ if (a_into == NULL) {
+ assert (atomlist_in(&distribution->architectures,
+ target->architecture));
+
+ if (c_into == NULL) {
+ if (!addremoteindex(origin, target, updatetargets,
+ architecture, component))
+ return RET_ERROR_OOM;
+ return RET_OK;
+ }
+ for (ci = 0 ; ci < c_into->count ; ci++) {
+ if (strcmp(c_into->values[ci], component) != 0)
+ continue;
+
+ if (!addremoteindex(origin, target, updatetargets,
+ architecture, c_from->values[ci]))
+ return RET_ERROR_OOM;
+ }
+ return RET_OK;
+ }
+ for (ai = 0 ; ai < a_into->count ; ai++) {
+ if (strcmp(architecture, a_into->values[ai]) != 0)
+ continue;
+ if (c_into == NULL) {
+ if (!addremoteindex(origin, target, updatetargets,
+ a_from->values[ai], component))
+ return RET_ERROR_OOM;
+ continue;
+ }
+
+ for (ci = 0 ; ci < c_into->count ; ci++) {
+ if (strcmp(component, c_into->values[ci]) != 0)
+ continue;
+
+ if (!addremoteindex(origin, target, updatetargets,
+ a_from->values[ai], c_from->values[ci]))
+ return RET_ERROR_OOM;
+ }
+ }
+ return RET_OK;
+}
+
+static retvalue addflatorigintotarget(struct update_origin *origin, struct target *target, struct update_target *updatetargets) {
+ const struct update_pattern *p;
+ const struct strlist *a_into;
+ const struct encoding_preferences *downloadlistsas;
+ int ai;
+
+ assert (origin != NULL);
+
+ if (target->packagetype == pt_udeb)
+ return RET_NOTHING;
+
+ p = origin->pattern;
+ while (p != NULL && !p->downloadlistsas_set)
+ p = p->pattern_from;
+ if (p == NULL)
+ downloadlistsas = NULL;
+ else
+ downloadlistsas = &p->downloadlistsas;
+
+ p = origin->pattern;
+ while (p != NULL && !atom_defined(p->flat))
+ p = p->pattern_from;
+ assert (p != NULL);
+ if (p->flat != target->component)
+ return RET_NOTHING;
+
+ p = origin->pattern;
+ while (p != NULL && !p->architectures_set)
+ p = p->pattern_from;
+ if (p == NULL) {
+ struct update_index_connector *uindex;
+
+ uindex = zNEW(struct update_index_connector);
+ if (FAILEDTOALLOC(uindex))
+ return RET_ERROR_OOM;
+
+
+ uindex->origin = origin;
+ uindex->remote = remote_flat_index(origin->from,
+ target->packagetype,
+ downloadlistsas);
+ if (FAILEDTOALLOC(uindex->remote)) {
+ free(uindex);
+ return RET_ERROR_OOM;
+ }
+ uindex->next = updatetargets->indices;
+ assert (origin->flat);
+ uindex->ignorewrongarchitecture = true;
+ updatetargets->indices = uindex;
+ return RET_OK;
+ }
+
+ a_into = &p->architectures_into;
+
+ for (ai = 0 ; ai < a_into->count ; ai++) {
+ struct update_index_connector *uindex;
+ const char *a = atoms_architectures[target->architecture];
+
+ if (strcmp(a_into->values[ai], a) != 0)
+ continue;
+
+ uindex = zNEW(struct update_index_connector);
+ if (FAILEDTOALLOC(uindex))
+ return RET_ERROR_OOM;
+
+ uindex->origin = origin;
+ uindex->remote = remote_flat_index(origin->from,
+ target->packagetype,
+ downloadlistsas);
+ if (FAILEDTOALLOC(uindex->remote)) {
+ free(uindex);
+ return RET_ERROR_OOM;
+ }
+ uindex->next = updatetargets->indices;
+ assert (origin->flat);
+ uindex->ignorewrongarchitecture = true;
+ updatetargets->indices = uindex;
+ }
+ return RET_OK;
+}
+
+static retvalue adddeleteruletotarget(struct update_target *updatetargets) {
+ struct update_index_connector *uindex;
+
+ uindex = zNEW(struct update_index_connector);
+ if (FAILEDTOALLOC(uindex))
+ return RET_ERROR_OOM;
+ uindex->next = updatetargets->indices;
+ updatetargets->indices = uindex;
+ return RET_OK;
+}
+
+static retvalue gettargets(struct update_origin *origins, struct distribution *distribution, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *types, struct update_target **ts) {
+ struct target *target;
+ struct update_origin *origin;
+ struct update_target *updatetargets;
+ retvalue r;
+
+ updatetargets = NULL;
+
+ for (target = distribution->targets ; target != NULL ;
+ target = target->next) {
+ if (!target_matches(target, components, architectures, types))
+ continue;
+ r = newupdatetarget(&updatetargets, target);
+ if (RET_WAS_ERROR(r)) {
+ updates_freetargets(updatetargets);
+ return r;
+ }
+
+ for (origin = origins ; origin != NULL ; origin=origin->next) {
+ if (origin->pattern == NULL)
+ r = adddeleteruletotarget(updatetargets);
+ else if (!origin->flat)
+ r = addorigintotarget(origin, target,
+ distribution, updatetargets);
+ else
+ r = addflatorigintotarget(origin, target,
+ updatetargets);
+ if (RET_WAS_ERROR(r)) {
+ updates_freetargets(updatetargets);
+ return r;
+ }
+ }
+ }
+
+ *ts = updatetargets;
+ return RET_OK;
+}
+
+static inline retvalue findmissingupdate(const struct distribution *distribution, struct update_origin *updates) {
+ retvalue result;
+ struct update_origin *last;
+ int count;
+
+ assert (updates != NULL);
+ last = updates;
+ count = 1;
+ while (last->next != NULL) {
+ last = last->next;
+ count++;
+ }
+
+ result = RET_OK;
+
+ if (count != distribution->updates.count) {
+ int i;
+
+ // TODO: why is this here? can this actually happen?
+
+ for (i=0; i<distribution->updates.count; i++){
+ const char *update = distribution->updates.values[i];
+ struct update_origin *u;
+
+ u = updates;
+ while (u != NULL && strcmp(u->pattern->name, update) != 0)
+ u = u->next;
+ if (u == NULL) {
+ fprintf(stderr,
+"Update '%s' is listed in distribution '%s', but was not found!\n",
+ update, distribution->codename);
+ result = RET_ERROR_MISSING;
+ break;
+ }
+ }
+ if (RET_IS_OK(result)) {
+ fprintf(stderr,
+"Did you write an update two times in the update-line of '%s'?\n",
+ distribution->codename);
+ result = RET_NOTHING;
+ }
+ }
+
+ return result;
+}
+
+retvalue updates_calcindices(struct update_pattern *patterns, struct distribution *distributions, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *types, struct update_distribution **update_distributions) {
+ struct distribution *distribution;
+ struct update_distribution *u_ds;
+ retvalue result, r;
+
+ u_ds = NULL;
+ result = RET_NOTHING;
+
+ for (distribution = distributions ; distribution != NULL ;
+ distribution = distribution->next) {
+ struct update_distribution *u_d;
+ struct update_pattern **translated_updates;
+
+ if (!distribution->selected)
+ continue;
+
+ r = findpatterns(patterns, distribution, &translated_updates);
+ if (r == RET_NOTHING)
+ continue;
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+
+ u_d = zNEW(struct update_distribution);
+ if (FAILEDTOALLOC(u_d)) {
+ free(translated_updates);
+ result = RET_ERROR_OOM;
+ break;
+ }
+
+ u_d->distribution = distribution;
+ u_d->patterns = translated_updates;
+ u_d->next = u_ds;
+ u_ds = u_d;
+
+ r = getorigins(u_d);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ if (RET_IS_OK(r)) {
+ /* Check if we got all: */
+ r = findmissingupdate(distribution, u_d->origins);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+
+ r = gettargets(u_d->origins, distribution,
+ components, architectures, types,
+ &u_d->targets);
+ if (RET_WAS_ERROR(r)) {
+ result = r;
+ break;
+ }
+ }
+ result = RET_OK;
+ }
+ if (RET_IS_OK(result)) {
+ *update_distributions = u_ds;
+ } else
+ updates_freeupdatedistributions(u_ds);
+ return result;
+}
+
+/****************************************************************************
+ * Step 5: preperations for actually doing anything: *
+ * - printing some warnings *
+ * - prepare distribution for writing *
+ * - rest moved to remote_startup *
+ ****************************************************************************/
+
+static retvalue updates_startup(struct aptmethodrun *run, struct update_distribution *distributions, bool willwrite) {
+ retvalue r;
+ struct update_distribution *d;
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ if (willwrite) {
+ r = distribution_prepareforwriting(d->distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ r = distribution_loadalloverrides(d->distribution);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return remote_startup(run);
+}
+
+/****************************************************************************
+ * Step 6: queue downloading of list of lists (Release, Release.gpg, ...) *
+ ****************************************************************************
+ -> moved to remoterepository.c */
+
+/****************************************************************************
+ * Step 7: queue downloading of lists *
+ * (using information from previously downloaded meta-lists) *
+ ****************************************************************************
+ -> moved to remoterepository.c */
+
+/****************************************************************************
+ * Step 8: call possible list hooks allowing them to modify the lists *
+ ****************************************************************************/
+
+static retvalue calllisthook(struct update_target *ut, struct update_index_connector *f, const char *listhook) {
+ struct update_origin *origin = f->origin;
+ const char *oldfilename = remote_index_file(f->remote);
+ const char *oldbasefilename = remote_index_basefile(f->remote);
+ char *newfilename;
+ pid_t child, c;
+ int status;
+
+ /* distribution, component, architecture and pattern specific... */
+ newfilename = genlistsfilename(oldbasefilename, 5, "",
+ ut->target->distribution->codename,
+ atoms_components[ut->target->component],
+ atoms_architectures[ut->target->architecture],
+ origin->pattern->name, ENDOFARGUMENTS);
+ if (FAILEDTOALLOC(newfilename))
+ return RET_ERROR_OOM;
+ child = fork();
+ if (child < 0) {
+ int e = errno;
+ free(newfilename);
+ fprintf(stderr, "Error %d while forking for listhook: %s\n",
+ e, strerror(e));
+ return RET_ERRNO(e);
+ }
+ if (child == 0) {
+ int e;
+ (void)closefrom(3);
+ sethookenvironment(NULL, NULL, NULL, NULL);
+ setenv("REPREPRO_FILTER_CODENAME",
+ ut->target->distribution->codename, true);
+ setenv("REPREPRO_FILTER_PACKAGETYPE",
+ atoms_architectures[ut->target->packagetype],
+ true);
+ setenv("REPREPRO_FILTER_COMPONENT",
+ atoms_components[ut->target->component],
+ true);
+ setenv("REPREPRO_FILTER_ARCHITECTURE",
+ atoms_architectures[ut->target->architecture],
+ true);
+ setenv("REPREPRO_FILTER_PATTERN", origin->pattern->name, true);
+ execl(listhook, listhook, oldfilename, newfilename,
+ ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "Error %d while executing '%s': %s\n",
+ e, listhook, strerror(e));
+ exit(255);
+ }
+ if (verbose > 5)
+ fprintf(stderr, "Called %s '%s' '%s'\n", listhook,
+ oldfilename, newfilename);
+ f->afterhookfilename = newfilename;
+ do {
+ c = waitpid(child, &status, WUNTRACED);
+ if (c < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d while waiting for hook '%s' to finish: %s\n",
+ e, listhook, strerror(e));
+ return RET_ERRNO(e);
+ }
+ } while (c != child);
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) == 0) {
+ if (verbose > 5)
+ fprintf(stderr,
+"Listhook successfully returned!\n");
+ return RET_OK;
+ } else {
+ fprintf(stderr,
+"Listhook failed with exitcode %d!\n",
+ (int)WEXITSTATUS(status));
+ return RET_ERROR;
+ }
+ } else {
+ fprintf(stderr,
+"Listhook terminated abnormally. (status is %x)!\n",
+ status);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue callshellhook(struct update_target *ut, struct update_index_connector *f, const char *shellhook) {
+ struct update_origin *origin = f->origin;
+ const char *oldfilename = remote_index_file(f->remote);
+ const char *oldbasefilename = remote_index_basefile(f->remote);
+ char *newfilename;
+ pid_t child, c;
+ int status;
+ int infd, outfd;
+
+ /* distribution, component, architecture and pattern specific... */
+ newfilename = genlistsfilename(oldbasefilename, 5, "",
+ ut->target->distribution->codename,
+ atoms_components[ut->target->component],
+ atoms_architectures[ut->target->architecture],
+ origin->pattern->name, ENDOFARGUMENTS);
+ if (FAILEDTOALLOC(newfilename))
+ return RET_ERROR_OOM;
+ infd = open(oldfilename, O_RDONLY|O_NOCTTY|O_NOFOLLOW);
+ if (infd < 0) {
+ int e = errno;
+
+ fprintf(stderr,
+"Error %d opening expected file '%s': %s!\n"
+"Something strange must go on!\n", e, oldfilename, strerror(e));
+ return RET_ERRNO(e);
+ }
+ (void)unlink(newfilename);
+ outfd = open(newfilename,
+ O_WRONLY|O_NOCTTY|O_NOFOLLOW|O_CREAT|O_EXCL, 0666);
+ if (outfd < 0) {
+ int e = errno;
+
+ fprintf(stderr, "Error %d creating '%s': %s!\n", e,
+ newfilename, strerror(e));
+ close(infd);
+ return RET_ERRNO(e);
+ }
+ child = fork();
+ if (child < 0) {
+ int e = errno;
+ free(newfilename);
+ fprintf(stderr, "Error %d while forking for shell hook: %s\n",
+ e, strerror(e));
+ (void)close(infd);
+ (void)close(outfd);
+ (void)unlink(newfilename);
+ return RET_ERRNO(e);
+ }
+ if (child == 0) {
+ int e;
+
+ assert (dup2(infd, 0) == 0);
+ assert (dup2(outfd, 1) == 1);
+ close(infd);
+ close(outfd);
+ (void)closefrom(3);
+ sethookenvironment(NULL, NULL, NULL, NULL);
+ setenv("REPREPRO_FILTER_CODENAME",
+ ut->target->distribution->codename, true);
+ setenv("REPREPRO_FILTER_PACKAGETYPE",
+ atoms_architectures[ut->target->packagetype],
+ true);
+ setenv("REPREPRO_FILTER_COMPONENT",
+ atoms_components[ut->target->component],
+ true);
+ setenv("REPREPRO_FILTER_ARCHITECTURE",
+ atoms_architectures[ut->target->architecture],
+ true);
+ setenv("REPREPRO_FILTER_PATTERN", origin->pattern->name, true);
+ execlp("sh", "sh", "-c", shellhook, ENDOFARGUMENTS);
+ e = errno;
+ fprintf(stderr, "Error %d while executing sh -c '%s': %s\n",
+ e, shellhook, strerror(e));
+ exit(255);
+ }
+ (void)close(infd);
+ (void)close(outfd);
+ if (verbose > 5)
+ fprintf(stderr, "Called sh -c '%s' <'%s' >'%s'\n", shellhook,
+ oldfilename, newfilename);
+ f->afterhookfilename = newfilename;
+ do {
+ c = waitpid(child, &status, WUNTRACED);
+ if (c < 0) {
+ int e = errno;
+ fprintf(stderr,
+"Error %d while waiting for shell hook '%s' to finish: %s\n",
+ e, shellhook, strerror(e));
+ return RET_ERRNO(e);
+ }
+ } while (c != child);
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) == 0) {
+ if (verbose > 5)
+ fprintf(stderr,
+"shell hook successfully returned!\n");
+ return RET_OK;
+ } else {
+ fprintf(stderr,
+"shell hook '%s' failed with exitcode %d!\n",
+ shellhook, (int)WEXITSTATUS(status));
+ return RET_ERROR;
+ }
+ } else {
+ fprintf(stderr,
+"shell hook '%s' terminated abnormally. (status is %x)!\n",
+ shellhook, status);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue calllisthooks(struct update_distribution *d) {
+ retvalue result, r;
+ struct update_target *target;
+ struct update_index_connector *uindex;
+
+ result = RET_NOTHING;
+ for (target = d->targets; target != NULL ; target = target->next) {
+ if (target->nothingnew)
+ continue;
+ /* if anything is new, we will to need to look at
+ * all (in case there are delete rules) */
+ for (uindex = target->indices ; uindex != NULL ;
+ uindex = uindex->next) {
+ const struct update_pattern *p;
+
+ if (uindex->remote == NULL)
+ continue;
+ if (uindex->failed)
+ continue;
+ p = uindex->origin->pattern;
+ while (p != NULL && p->listhook == NULL
+ && p->shellhook == NULL)
+ p = p->pattern_from;
+ if (p == NULL)
+ continue;
+ if (p->listhook != NULL)
+ r = calllisthook(target, uindex, p->listhook);
+ else {
+ assert (p->shellhook != NULL);
+ r = callshellhook(target, uindex, p->shellhook);
+ }
+ if (RET_WAS_ERROR(r)) {
+ uindex->failed = true;
+ return r;
+ }
+ RET_UPDATE(result, r);
+ }
+ }
+ return result;
+}
+
+static retvalue updates_calllisthooks(struct update_distribution *distributions) {
+ retvalue result, r;
+ struct update_distribution *d;
+
+ result = RET_NOTHING;
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = calllisthooks(d);
+ RET_UPDATE(result, r);
+ }
+ return result;
+}
+
+/****************************************************************************
+ * Step 9: search for missing packages i.e. needing to be added or upgraded *
+ * (all the logic in upgradelist.c, this is only clue code) *
+ ****************************************************************************/
+
+static upgrade_decision ud_decide_by_pattern(void *privdata, struct target *target, struct package *new, /*@null@*/const char *old_version) {
+ const struct update_pattern *pattern = privdata, *p;
+ retvalue r;
+ upgrade_decision decision = UD_UPGRADE;
+ enum filterlisttype listdecision;
+ bool cmdline_still_undecided;
+
+ if (target->packagetype == pt_dsc) {
+ p = pattern;
+ while (p != NULL && !p->filtersrclist.set)
+ p = p->pattern_from;
+ if (p != NULL)
+ listdecision = filterlist_find(new->name, new->version,
+ &p->filtersrclist);
+ else {
+ p = pattern;
+ while (p != NULL && !p->filterlist.set)
+ p = p->pattern_from;
+ if (p == NULL)
+ listdecision = flt_install;
+ else
+ listdecision = filterlist_find(new->name,
+ new->version, &p->filterlist);
+ }
+ } else {
+ p = pattern;
+ while (p != NULL && !p->filterlist.set)
+ p = p->pattern_from;
+ if (p != NULL)
+ listdecision = filterlist_find(new->name, new->version,
+ &p->filterlist);
+ else {
+ p = pattern;
+ while (p != NULL && !p->filtersrclist.set)
+ p = p->pattern_from;
+ if (p == NULL)
+ listdecision = flt_install;
+ else
+ listdecision = filterlist_find(new->source,
+ new->sourceversion,
+ &p->filtersrclist);
+ }
+ }
+
+ switch (listdecision) {
+ case flt_deinstall:
+ case flt_purge:
+ return UD_NO;
+ case flt_warning:
+ return UD_LOUDNO;
+ case flt_supersede:
+ decision = UD_SUPERSEDE;
+ break;
+ case flt_hold:
+ decision = UD_HOLD;
+ break;
+ case flt_error:
+ /* cannot yet be handled! */
+ fprintf(stderr,
+"Package name marked to be unexpected('error'): '%s'!\n", new->name);
+ return UD_ERROR;
+ case flt_upgradeonly:
+ if (old_version == NULL)
+ return UD_NO;
+ break;
+ case flt_install:
+ break;
+ case flt_unchanged:
+ case flt_auto_hold:
+ assert (listdecision != listdecision);
+ }
+
+ cmdline_still_undecided = false;
+ switch (filterlist_find(new->source, new->sourceversion,
+ &cmdline_src_filter)) {
+ case flt_deinstall:
+ case flt_purge:
+ return UD_NO;
+ case flt_warning:
+ return UD_LOUDNO;
+ case flt_auto_hold:
+ cmdline_still_undecided = true;
+ decision = UD_HOLD;
+ break;
+ case flt_hold:
+ decision = UD_HOLD;
+ break;
+ case flt_supersede:
+ decision = UD_SUPERSEDE;
+ break;
+ case flt_error:
+ /* cannot yet be handled! */
+ fprintf(stderr,
+"Package name marked to be unexpected('error'): '%s'!\n", new->name);
+ return UD_ERROR;
+ case flt_upgradeonly:
+ if (old_version == NULL)
+ return UD_NO;
+ break;
+ case flt_install:
+ decision = UD_UPGRADE;
+ break;
+ case flt_unchanged:
+ cmdline_still_undecided = true;
+ break;
+ }
+
+
+ if (target->packagetype != pt_dsc) {
+ switch (filterlist_find(new->name, new->version,
+ &cmdline_bin_filter)) {
+ case flt_deinstall:
+ case flt_purge:
+ return UD_NO;
+ case flt_warning:
+ return UD_LOUDNO;
+ case flt_supersede:
+ decision = UD_SUPERSEDE;
+ break;
+ case flt_hold:
+ decision = UD_HOLD;
+ break;
+ case flt_error:
+ /* cannot yet be handled! */
+ fprintf(stderr,
+"Package name marked to be unexpected('error'): '%s'!\n", new->name);
+ return UD_ERROR;
+ case flt_upgradeonly:
+ if (old_version == NULL)
+ return UD_NO;
+ break;
+ case flt_install:
+ decision = UD_UPGRADE;
+ break;
+ case flt_unchanged:
+ break;
+ case flt_auto_hold:
+ /* hold only if it was not in the src-filter */
+ if (cmdline_still_undecided)
+ decision = UD_HOLD;
+ break;
+ }
+ }
+
+ p = pattern;
+ while (p != NULL && !p->includecondition_set)
+ p = p->pattern_from;
+ if (p != NULL) {
+ r = term_decidepackage(p->includecondition, new, target);
+ if (RET_WAS_ERROR(r))
+ return UD_ERROR;
+ if (r == RET_NOTHING) {
+ return UD_NO;
+ }
+ }
+
+ if (target->packagetype != pt_dsc)
+ return decision;
+
+ p = pattern;
+ while (p != NULL && !p->omitextrasource_set)
+ p = p->pattern_from;
+ /* if unset or set to true, ignore new->source having that field */
+ if (p == NULL || p->omitextrasource == true) {
+ if (chunk_gettruth(new->control, "Extra-Source-Only"))
+ return UD_NO;
+ }
+
+ return decision;
+}
+
+
+static inline retvalue searchformissing(/*@null@*/FILE *out, struct update_target *u) {
+ struct update_index_connector *uindex;
+ retvalue result, r;
+
+ if (u->nothingnew) {
+ if (u->indices == NULL && verbose >= 4 && out != NULL)
+ fprintf(out,
+" nothing to do for '%s'\n",
+ u->target->identifier);
+ else if (u->indices != NULL && verbose >= 0 && out != NULL)
+ fprintf(out,
+" nothing new for '%s' (use --noskipold to process anyway)\n",
+ u->target->identifier);
+ return RET_NOTHING;
+ }
+ if (verbose > 2 && out != NULL)
+ fprintf(out, " processing updates for '%s'\n",
+ u->target->identifier);
+ r = upgradelist_initialize(&u->upgradelist, u->target);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ result = RET_NOTHING;
+
+ for (uindex = u->indices ; uindex != NULL ; uindex = uindex->next) {
+ const char *filename;
+
+ if (uindex->origin == NULL) {
+ if (verbose > 4 && out != NULL)
+ fprintf(out,
+" marking everything to be deleted\n");
+ r = upgradelist_deleteall(u->upgradelist);
+ if (RET_WAS_ERROR(r))
+ u->incomplete = true;
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return result;
+ u->ignoredelete = false;
+ continue;
+ }
+
+ if (uindex->afterhookfilename != NULL)
+ filename = uindex->afterhookfilename;
+ else
+ filename = remote_index_file(uindex->remote);
+
+ if (uindex->failed || uindex->origin->failed) {
+ if (verbose >= 1)
+ fprintf(stderr,
+" missing '%s'\n", filename);
+ u->incomplete = true;
+ u->ignoredelete = true;
+ continue;
+ }
+
+ if (verbose > 4 && out != NULL)
+ fprintf(out, " reading '%s'\n", filename);
+ r = upgradelist_update(u->upgradelist, uindex,
+ filename,
+ ud_decide_by_pattern,
+ (void*)uindex->origin->pattern,
+ uindex->ignorewrongarchitecture);
+ if (RET_WAS_ERROR(r)) {
+ u->incomplete = true;
+ u->ignoredelete = true;
+ }
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ return result;
+ }
+
+ return result;
+}
+
+static retvalue updates_readindices(/*@null@*/FILE *out, struct update_distribution *d) {
+ retvalue result, r;
+ struct update_target *u;
+
+ result = RET_NOTHING;
+ for (u=d->targets ; u != NULL ; u=u->next) {
+ r = searchformissing(out, u);
+ if (RET_WAS_ERROR(r))
+ u->incomplete = true;
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ return result;
+}
+
+/****************************************************************************
+ * Step 10: enqueue downloading of missing packages *
+ ****************************************************************************/
+
+static retvalue enqueue_upgrade_package(void *calldata, const struct checksumsarray *origfiles, const struct strlist *filekeys, void *privdata) {
+ struct update_index_connector *uindex = privdata;
+ struct aptmethod *aptmethod;
+ struct downloadcache *cache = calldata;
+
+ assert(privdata != NULL);
+ aptmethod = remote_aptmethod(uindex->origin->from);
+ assert(aptmethod != NULL);
+ return downloadcache_addfiles(cache, aptmethod, origfiles, filekeys);
+}
+
+static retvalue updates_enqueue(struct downloadcache *cache, struct update_distribution *distribution) {
+ retvalue result, r;
+ struct update_target *u;
+
+ result = RET_NOTHING;
+ for (u=distribution->targets ; u != NULL ; u=u->next) {
+ if (u->nothingnew)
+ continue;
+ r = upgradelist_enqueue(u->upgradelist, enqueue_upgrade_package,
+ cache);
+ if (RET_WAS_ERROR(r))
+ u->incomplete = true;
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ return result;
+}
+
+/****************************************************************************
+ * Step 11: install the missing packages *
+ * (missing files should have been downloaded first) *
+ ****************************************************************************/
+static bool isbigdelete(struct update_distribution *d) {
+ struct update_target *u, *v;
+
+ for (u = d->targets ; u != NULL ; u=u->next) {
+ if (u->nothingnew || u->ignoredelete)
+ continue;
+ if (upgradelist_isbigdelete(u->upgradelist)) {
+ d->distribution->omitted = true;
+ for (v = d->targets ; v != NULL ; v = v->next) {
+ upgradelist_free(v->upgradelist);
+ v->upgradelist = NULL;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static void updates_from_callback(void *privdata, const char **rule_p, const char **from_p) {
+ struct update_index_connector *uindex = privdata;
+
+ *from_p = uindex->origin->suite_from;
+ *rule_p = uindex->origin->pattern->name;
+}
+
+static retvalue updates_install(struct update_distribution *distribution) {
+ retvalue result, r;
+ struct update_target *u;
+ struct distribution *d = distribution->distribution;
+
+ assert (logger_isprepared(d->logger));
+
+ result = RET_NOTHING;
+ for (u=distribution->targets ; u != NULL ; u=u->next) {
+ if (u->nothingnew)
+ continue;
+ r = upgradelist_install(u->upgradelist, d->logger,
+ u->ignoredelete, updates_from_callback);
+ RET_UPDATE(d->status, r);
+ if (RET_WAS_ERROR(r))
+ u->incomplete = true;
+ RET_UPDATE(result, r);
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ if (RET_IS_OK(result) && d->tracking != dt_NONE) {
+ r = tracking_retrack(d, false);
+ RET_ENDUPDATE(result, r);
+ }
+ return result;
+}
+
+/****************************************************************************
+ * Step 12: mark index files as processed, so they won't process a second *
+ * time, unless --noskipold is given *
+ ****************************************************************************/
+
+static void markdone(struct update_distribution *d) {
+ struct markdonefile *done;
+ struct update_index_connector *i;
+ struct update_target *t;
+ retvalue r;
+
+ r = markdone_create(d->distribution->codename, &done);
+ if (!RET_IS_OK(r))
+ return;
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (t->incomplete)
+ continue;
+ markdone_target(done, t->target->identifier);
+ for (i = t->indices ; i != NULL ; i = i->next)
+ if (i->remote == NULL)
+ markdone_cleaner(done);
+ else
+ remote_index_markdone(i->remote, done);
+ }
+ markdone_finish(done);
+}
+
+
+/****************************************************************************
+ * All together now: everything done step after step, in between telling *
+ * the apt methods to actually download what was enqueued. *
+ ****************************************************************************/
+
+static retvalue markold(struct update_distribution *ud) {
+ struct update_target *ut;
+ struct update_index_connector *ui;
+ retvalue r;
+ struct donefile *donefile;
+ const char *identifier;
+
+ r = donefile_open(ud->distribution->codename, &donefile);
+ if (!RET_IS_OK(r))
+ return r;
+
+ while (donefile_nexttarget(donefile, &identifier)) {
+ ut = ud->targets;
+ while (ut != NULL && strcmp(identifier,
+ ut->target->identifier) != 0)
+ ut = ut->next;
+ if (ut == NULL)
+ continue;
+ ut->nothingnew = true;
+ for (ui = ut->indices ; ui != NULL ; ui = ui->next) {
+ /* if the order does not match, it does not matter
+ * if they are new or not, they should be processed
+ * anyway */
+
+ if (ui->remote == NULL) {
+ if (!donefile_iscleaner(donefile)) {
+ ut->nothingnew = false;
+ break;
+ }
+ continue;
+ }
+ if (remote_index_isnew(ui->remote, donefile)) {
+ ut->nothingnew = false;
+ break;
+ }
+ }
+
+ }
+ donefile_close(donefile);
+ return RET_OK;
+}
+
+static retvalue updates_preparelists(struct aptmethodrun *run, struct update_distribution *distributions, bool nolistsdownload, bool skipold, bool *anythingtodo) {
+ struct update_distribution *d;
+ struct update_target *ut;
+ struct update_index_connector *ui;
+ retvalue r;
+
+ r = remote_preparemetalists(run, nolistsdownload);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ for (d = distributions ; d != NULL ; d = d->next) {
+ /* first check what is old */
+ if (skipold) {
+ r = markold(d);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ /* we need anything that is needed in a target
+ * where something is new (as new might mean
+ * a package is left hiding leftmore packages,
+ * and everything in rightmore packages is needed
+ * to see what in the new takes effect) */
+ for (ut = d->targets; ut != NULL ; ut = ut->next) {
+ if (ut->nothingnew)
+ continue;
+ if (ut->indices == NULL) {
+ ut->nothingnew = true;
+ continue;
+ }
+ for (ui = ut->indices ; ui != NULL ; ui = ui->next) {
+ if (ui->remote == NULL)
+ continue;
+ remote_index_needed(ui->remote);
+ *anythingtodo = true;
+ }
+ }
+ }
+
+ r = remote_preparelists(run, nolistsdownload);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+}
+
+static retvalue updates_prepare(struct update_distribution *distributions, bool willwrite, bool nolistsdownload, bool skipold, struct aptmethodrun **run_p) {
+ retvalue result, r;
+ struct aptmethodrun *run;
+ bool anythingtodo = !skipold;
+
+ r = aptmethod_initialize_run(&run);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ /* preperations */
+ result = updates_startup(run, distributions, willwrite);
+ if (RET_WAS_ERROR(result)) {
+ aptmethod_shutdown(run);
+ return result;
+ }
+
+ r = updates_preparelists(run, distributions, nolistsdownload, skipold,
+ &anythingtodo);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result)) {
+ aptmethod_shutdown(run);
+ return result;
+ }
+ if (!anythingtodo && skipold) {
+ if (verbose >= 0) {
+ if (willwrite)
+ printf(
+"Nothing to do found. (Use --noskipold to force processing)\n");
+ else
+ fprintf(stderr,
+"Nothing to do found. (Use --noskipold to force processing)\n");
+ }
+
+ aptmethod_shutdown(run);
+ return RET_NOTHING;
+ }
+
+ /* Call ListHooks (if given) on the downloaded index files.
+ * (This is done even when nolistsdownload is given, as otherwise
+ * the filename to look in is not calculated) */
+ r = updates_calllisthooks(distributions);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result)) {
+ aptmethod_shutdown(run);
+ return result;
+ }
+
+ *run_p = run;
+ return RET_OK;
+}
+
+
+retvalue updates_update(struct update_distribution *distributions, bool nolistsdownload, bool skipold, enum spacecheckmode mode, off_t reserveddb, off_t reservedother) {
+ retvalue result, r;
+ struct update_distribution *d;
+ struct downloadcache *cache;
+ struct aptmethodrun *run;
+ bool todo;
+
+ causingfile = NULL;
+
+ result = updates_prepare(distributions, true, nolistsdownload, skipold,
+ &run);
+ if (!RET_IS_OK(result))
+ return result;
+
+ /* Then get all packages */
+ if (verbose >= 0)
+ printf("Calculating packages to get...\n");
+ r = downloadcache_initialize(mode, reserveddb, reservedother, &cache);
+ if (!RET_IS_OK(r)) {
+ aptmethod_shutdown(run);
+ RET_UPDATE(result, r);
+ return result;
+ }
+
+ todo = false;
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = updates_readindices(stdout, d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ if (global.onlysmalldeletes) {
+ if (isbigdelete(d))
+ continue;
+ }
+ r = updates_enqueue(cache, d);
+ if (RET_IS_OK(r))
+ todo = true;
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ if (!RET_WAS_ERROR(result)) {
+ r = space_check(cache->devices);
+ RET_ENDUPDATE(result, r);
+ }
+ if (!RET_WAS_ERROR(result) && !todo) {
+ for (d=distributions ; !todo && d != NULL ; d=d->next) {
+ struct update_target *u;
+ if (d->distribution->omitted)
+ continue;
+ for (u = d->targets ; u != NULL ; u = u->next) {
+ if (u->nothingnew || u->ignoredelete)
+ continue;
+ if (upgradelist_woulddelete(u->upgradelist)) {
+ todo = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (RET_WAS_ERROR(result) || !todo) {
+ for (d=distributions ; d != NULL ; d=d->next) {
+ struct update_target *u;
+ if (d->distribution->omitted) {
+ fprintf(stderr,
+"Not processing updates for '%s' because of --onlysmalldeletes!\n",
+ d->distribution->codename);
+ } else if (RET_IS_OK(result))
+ markdone(d);
+ for (u=d->targets ; u != NULL ; u=u->next) {
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ }
+ }
+ r = downloadcache_free(cache);
+ RET_UPDATE(result, r);
+ aptmethod_shutdown(run);
+ return result;
+ }
+ if (verbose >= 0)
+ printf("Getting packages...\n");
+ r = aptmethod_download(run);
+ RET_UPDATE(result, r);
+ r = downloadcache_free(cache);
+ RET_ENDUPDATE(result, r);
+ if (verbose > 0)
+ printf("Shutting down aptmethods...\n");
+ r = aptmethod_shutdown(run);
+ RET_UPDATE(result, r);
+
+ if (RET_WAS_ERROR(result)) {
+ for (d=distributions ; d != NULL ; d=d->next) {
+ struct update_target *u;
+ for (u=d->targets ; u != NULL ; u=u->next) {
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ }
+ }
+ return result;
+ }
+ if (verbose >= 0)
+ printf("Installing (and possibly deleting) packages...\n");
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ if (d->distribution->omitted)
+ continue;
+ r = updates_install(d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ if (d->distribution->omitted) {
+ fprintf(stderr,
+"Not processing updates for '%s' because of --onlysmalldeletes!\n",
+ d->distribution->codename);
+ } else
+ markdone(d);
+ }
+ logger_wait();
+
+ return result;
+}
+
+/****************************************************************************
+ * Alternatively, don't download and install, but list what is needed to be *
+ * done. (For the checkupdate command) *
+ ****************************************************************************/
+
+static void upgrade_dumppackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) {
+ struct update_index_connector *uindex = privdata;
+
+ if (newversion == NULL) {
+ if (oldversion != NULL && bestcandidate != NULL) {
+ printf("'%s': '%s' will be deleted"
+ " (best new: '%s')\n",
+ packagename, oldversion, bestcandidate);
+ } else if (oldversion != NULL) {
+ printf("'%s': '%s' will be deleted"
+ " (no longer available or superseded)\n",
+ packagename, oldversion);
+ } else {
+ printf("'%s': will NOT be added as '%s'\n",
+ packagename, bestcandidate);
+ }
+ } else if (newversion == oldversion) {
+ if (bestcandidate != NULL) {
+ if (verbose > 1)
+ printf("'%s': '%s' will be kept"
+ " (best new: '%s')\n",
+ packagename, oldversion,
+ bestcandidate);
+ } else {
+ if (verbose > 0)
+ printf("'%s': '%s' will be kept"
+ " (unavailable for reload)\n",
+ packagename, oldversion);
+ }
+ } else {
+ const char *via = uindex->origin->pattern->name;
+
+ assert (newfilekeys != NULL);
+ assert (newcontrol != NULL);
+ if (oldversion != NULL)
+ (void)printf(
+"'%s': '%s' will be upgraded to '%s' (from '%s'):\n files needed: ",
+ packagename, oldversion,
+ newversion, via);
+ else
+ (void)printf(
+"'%s': newly installed as '%s' (from '%s'):\n files needed: ",
+ packagename, newversion, via);
+ (void)strlist_fprint(stdout, newfilekeys);
+ if (verbose > 2)
+ (void)printf("\n installing as: '%s'\n",
+ newcontrol);
+ else
+ (void)putchar('\n');
+ }
+}
+
+static void updates_dump(struct update_distribution *distribution) {
+ struct update_target *u;
+
+ for (u=distribution->targets ; u != NULL ; u=u->next) {
+ if (u->nothingnew)
+ continue;
+ printf("Updates needed for '%s':\n", u->target->identifier);
+ upgradelist_dump(u->upgradelist, upgrade_dumppackage);
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ }
+}
+
+static void upgrade_dumplistpackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) {
+ struct update_index_connector *uindex = privdata;
+
+ if (newversion == NULL) {
+ if (oldversion == NULL)
+ return;
+ printf("delete '%s' '%s'\n", packagename, oldversion);
+ } else if (newversion == oldversion) {
+ if (bestcandidate != NULL)
+ printf("keep '%s' '%s' '%s'\n", packagename,
+ oldversion, bestcandidate);
+ else
+ printf("keep '%s' '%s' unavailable\n", packagename,
+ oldversion);
+ } else {
+ const char *via = uindex->origin->pattern->name;
+
+ assert (newfilekeys != NULL);
+ assert (newcontrol != NULL);
+ if (oldversion != NULL)
+ (void)printf("update '%s' '%s' '%s' '%s'\n",
+ packagename, oldversion,
+ newversion, via);
+ else
+ (void)printf("add '%s' - '%s' '%s'\n",
+ packagename, newversion, via);
+ }
+}
+
+static void updates_dumplist(struct update_distribution *distribution) {
+ struct update_target *u;
+
+ for (u=distribution->targets ; u != NULL ; u=u->next) {
+ if (u->nothingnew)
+ continue;
+ printf("Updates needed for '%s':\n", u->target->identifier);
+ upgradelist_dump(u->upgradelist, upgrade_dumplistpackage);
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ }
+}
+
+retvalue updates_checkupdate(struct update_distribution *distributions, bool nolistsdownload, bool skipold) {
+ struct update_distribution *d;
+ retvalue result, r;
+ struct aptmethodrun *run;
+
+ result = updates_prepare(distributions, false, nolistsdownload, skipold,
+ &run);
+ if (!RET_IS_OK(result))
+ return result;
+
+ if (verbose > 0)
+ fprintf(stderr, "Shutting down aptmethods...\n");
+ r = aptmethod_shutdown(run);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ /* Then look what packages to get */
+ if (verbose >= 0)
+ fprintf(stderr, "Calculating packages to get...\n");
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = updates_readindices(stderr, d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ updates_dump(d);
+ }
+
+ return result;
+}
+
+retvalue updates_dumpupdate(struct update_distribution *distributions, bool nolistsdownload, bool skipold) {
+ struct update_distribution *d;
+ retvalue result, r;
+ struct aptmethodrun *run;
+
+ result = updates_prepare(distributions, false, nolistsdownload, skipold,
+ &run);
+ if (!RET_IS_OK(result))
+ return result;
+
+ r = aptmethod_shutdown(run);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ for (d=distributions ; d != NULL ; d=d->next) {
+ r = updates_readindices(NULL, d);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ updates_dumplist(d);
+ }
+
+ return result;
+}
+
+/******************************************************************************
+ * For the predelete command: delete everything a following update run would *
+ * delete. (Assuming no unexpected errors occur, like a file missing upstream.*
+ *****************************************************************************/
+
+retvalue updates_predelete(struct update_distribution *distributions, bool nolistsdownload, bool skipold) {
+ retvalue result, r;
+ struct update_distribution *d;
+ struct aptmethodrun *run;
+
+ causingfile = NULL;
+
+ result = updates_prepare(distributions, true, nolistsdownload, skipold,
+ &run);
+ if (!RET_IS_OK(result))
+ return result;
+
+ if (verbose > 0)
+ printf("Shutting down aptmethods...\n");
+ r = aptmethod_shutdown(run);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(result)) {
+ return result;
+ }
+
+ if (verbose >= 0)
+ printf("Removing obsolete or to be replaced packages...\n");
+ for (d=distributions ; d != NULL ; d=d->next) {
+ struct distribution *dd = d->distribution;
+ struct update_target *u;
+
+ for (u=d->targets ; u != NULL ; u=u->next) {
+ r = searchformissing(stdout, u);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r)) {
+ u->incomplete = true;
+ continue;
+ }
+ if (u->nothingnew || u->ignoredelete) {
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ continue;
+ }
+ r = upgradelist_predelete(u->upgradelist, dd->logger);
+ RET_UPDATE(dd->status, r);
+ if (RET_WAS_ERROR(r))
+ u->incomplete = true;
+ RET_UPDATE(result, r);
+ upgradelist_free(u->upgradelist);
+ u->upgradelist = NULL;
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (RET_IS_OK(result) && dd->tracking != dt_NONE) {
+ r = tracking_retrack(dd, false);
+ RET_ENDUPDATE(result, r);
+ }
+ }
+ }
+ logger_wait();
+ return result;
+}
+
+/******************************************************************************
+ * The cleanlists command has to mark all files that might be scheduled to be *
+ * downloaded again, so that the rest can be deleted *
+ ******************************************************************************/
+
+static void marktargetsneeded(struct cachedlistfile *files, const struct distribution *d, component_t flat, /*@null@*/const struct strlist *a_from, /*@null@*/const struct strlist *a_into, /*@null@*/const struct strlist *c_from, /*@null@*/const struct strlist *uc_from, const char *repository, const char *suite) {
+ struct target *t;
+ int i, ai;
+
+ if (atom_defined(flat)) {
+ bool deb_needed = false, dsc_needed = false;
+
+ for (t = d->targets ; t != NULL ; t = t->next) {
+ if (t->packagetype == pt_udeb)
+ continue;
+ if (flat != t->architecture)
+ continue;
+ if (a_into != NULL &&
+ !strlist_in(a_into,
+ atoms_architectures[
+ t->architecture]))
+ continue;
+ if (t->packagetype == pt_deb)
+ deb_needed = true;
+ else if (t->packagetype == pt_dsc)
+ dsc_needed = true;
+ }
+ if (deb_needed)
+ cachedlistfile_need_flat_index(files,
+ repository, suite, pt_deb);
+ if (dsc_needed)
+ cachedlistfile_need_flat_index(files,
+ repository, suite, pt_dsc);
+ return;
+ }
+ /* .dsc */
+ if ((a_into != NULL && strlist_in(a_into, "source")) ||
+ (a_into == NULL && atomlist_in(&d->architectures,
+ architecture_source))) {
+ if (c_from != NULL)
+ for (i = 0 ; i < c_from->count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, "source",
+ c_from->values[i], pt_dsc);
+ else
+ for (i = 0 ; i < d->components.count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, "source",
+ atoms_components[
+ d->components.atoms[i]],
+ pt_dsc);
+ }
+ /* .deb and .udeb */
+ if (a_into != NULL) {
+ for (ai = 0 ; ai < a_into->count ; ai++) {
+ const char *a = a_from->values[ai];
+
+ if (strcmp(a_into->values[ai], "source") == 0)
+ continue;
+ if (c_from != NULL)
+ for (i = 0 ; i < c_from->count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ c_from->values[i],
+ pt_deb);
+ else
+ for (i = 0 ; i < d->components.count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ atoms_components[
+ d->components.atoms[i]],
+ pt_deb);
+ if (uc_from != NULL)
+ for (i = 0 ; i < uc_from->count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ uc_from->values[i],
+ pt_udeb);
+ else
+ for (i = 0 ; i < d->udebcomponents.count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ atoms_components[
+ d->components.atoms[i]],
+ pt_udeb);
+ }
+ } else {
+ for (ai = 0 ; ai < d->architectures.count ; ai++) {
+ const char *a = atoms_architectures[
+ d->architectures.atoms[ai]];
+
+ if (d->architectures.atoms[ai] == architecture_source)
+ continue;
+ if (c_from != NULL)
+ for (i = 0 ; i < c_from->count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ c_from->values[i],
+ pt_deb);
+ else
+ for (i = 0 ; i < d->components.count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ atoms_components[
+ d->components.atoms[i]],
+ pt_deb);
+ if (uc_from != NULL)
+ for (i = 0 ; i < uc_from->count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ uc_from->values[i],
+ pt_udeb);
+ else
+ for (i = 0 ; i < d->udebcomponents.count ; i++)
+ cachedlistfile_need_index(files,
+ repository, suite, a,
+ atoms_components[
+ d->components.atoms[i]],
+ pt_udeb);
+ }
+ }
+}
+
+retvalue updates_cleanlists(const struct distribution *distributions, const struct update_pattern *patterns) {
+ retvalue result;
+ const struct distribution *d;
+ const struct update_pattern *p, *q;
+ struct cachedlistfile *files;
+ int i;
+ bool isflat;
+ const struct strlist *uc_from = NULL;
+ const struct strlist *c_from = NULL;
+ const struct strlist *a_from = NULL, *a_into = NULL;
+ const char *repository;
+ char *suite;
+
+ result = cachedlists_scandir(&files);
+ if (!RET_IS_OK(result))
+ return result;
+
+ result = RET_OK;
+ for (d = distributions ; d != NULL ; d = d->next) {
+ if (d->updates.count == 0)
+ continue;
+ cachedlistfile_need(files, "lastseen", 2, "", d->codename, NULL);
+ for (i = 0; i < d->updates.count ; i++) {
+ const char *name = d->updates.values[i];
+
+ if (strcmp(name, "-") == 0)
+ continue;
+
+ p = patterns;
+ while (p != NULL && strcmp(name, p->name) != 0)
+ p = p->next;
+ if (p == NULL) {
+ fprintf(stderr,
+"Cannot find definition of upgrade-rule '%s' for distribution '%s'!\n",
+ name, d->codename);
+ result = RET_ERROR;
+ continue;
+ }
+ q = p;
+ while (q != NULL && q->pattern_from != NULL)
+ q = q->pattern_from;
+ repository = q->name;
+ q = p;
+ while (q != NULL && !atom_defined(q->flat))
+ q = q->pattern_from;
+ isflat = q != NULL;
+ q = p;
+ while (q != NULL && !q->architectures_set)
+ q = q->pattern_from;
+ if (q != NULL) {
+ a_from = &q->architectures_from;
+ a_into = &q->architectures_into;
+ }
+ q = p;
+ while (q != NULL && !q->components_set)
+ q = q->pattern_from;
+ if (q != NULL)
+ c_from = &q->components_from;
+ q = p;
+ while (q != NULL && !q->udebcomponents_set)
+ q = q->pattern_from;
+ if (q != NULL)
+ uc_from = &q->udebcomponents_from;
+ suite = translate_suite_pattern(p, d->codename);
+ if (FAILEDTOALLOC(suite)) {
+ cachedlistfile_freelist(files);
+ return RET_ERROR_OOM;
+ }
+ /* Only index files are intresting, everything else
+ * Release, Release.gpg, compressed files, hook processed
+ * files is deleted */
+ marktargetsneeded(files, d, isflat, a_from, a_into,
+ c_from, uc_from, repository, suite);
+ free(suite);
+
+ }
+ }
+ cachedlistfile_deleteunneeded(files);
+ cachedlistfile_freelist(files);
+ return RET_OK;
+}
diff --git a/updates.h b/updates.h
new file mode 100644
index 0000000..dc072ee
--- /dev/null
+++ b/updates.h
@@ -0,0 +1,40 @@
+#ifndef REPREPRO_UPDATES_H
+#define REPREPRO_UPDATES_H
+
+#ifndef REPREPRO_ERROR_H
+#include "error.h"
+#warning "What's hapening here?"
+#endif
+#ifndef REPREPRO_RELEASE_H
+#include "release.h"
+#endif
+#ifndef REPREPRO_DISTRIBUTION_H
+#include "distribution.h"
+#endif
+#ifndef REPREPRO_STRLIST_H
+#include "strlist.h"
+#endif
+#ifndef REPREPRO_FREESPACE_H
+#include "freespace.h"
+#endif
+
+
+struct update_pattern;
+struct update_origin;
+struct update_target;
+struct update_distribution;
+
+retvalue updates_getpatterns(/*@out@*/struct update_pattern **);
+
+void updates_freepatterns(/*@only@*/struct update_pattern *p);
+void updates_freeupdatedistributions(/*@only@*/struct update_distribution *d);
+
+retvalue updates_calcindices(struct update_pattern *, struct distribution *, const struct atomlist * /*components*/, const struct atomlist */*architectures*/, const struct atomlist */*types*/, /*@out@*/struct update_distribution **);
+
+retvalue updates_update(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/, enum spacecheckmode, off_t /*reserveddb*/, off_t /*reservedother*/);
+retvalue updates_checkupdate(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/);
+retvalue updates_dumpupdate(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/);
+retvalue updates_predelete(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/);
+
+retvalue updates_cleanlists(const struct distribution *, const struct update_pattern *);
+#endif
diff --git a/upgradelist.c b/upgradelist.c
new file mode 100644
index 0000000..be82173
--- /dev/null
+++ b/upgradelist.c
@@ -0,0 +1,756 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2004,2005,2006,2007,2008,2016 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "error.h"
+#include "ignore.h"
+#include "strlist.h"
+#include "indexfile.h"
+#include "dpkgversions.h"
+#include "target.h"
+#include "files.h"
+#include "descriptions.h"
+#include "package.h"
+#include "upgradelist.h"
+
+struct package_data {
+ struct package_data *next;
+ /* the name of the package: */
+ char *name;
+ /* the version in our repository:
+ * NULL means not yet in the archive */
+ char *version_in_use;
+ /* the most recent version we found
+ * (either is version_in_use or version_new)*/
+ /*@dependent@*/const char *version;
+
+ /* if this is != 0, package will be deleted afterwards,
+ * (or new version simply ignored if it is not yet in the
+ * archive) */
+ bool deleted;
+
+ /* The most recent version we found upstream:
+ * NULL means nothing found. */
+ char *new_version;
+ /* where the recent version comes from: */
+ /*@dependent@*/void *privdata;
+
+ /* the new control-chunk for the package to go in
+ * non-NULL if new_version && newversion == version_in_use */
+ char *new_control;
+ /* the list of files that will belong to this:
+ * same validity */
+ struct strlist new_filekeys;
+ struct checksumsarray new_origfiles;
+ /* to destinguish arch all from not arch all */
+ architecture_t architecture;
+};
+
+struct upgradelist {
+ /*@dependent@*/struct target *target;
+ struct package_data *list;
+ /* package the next package will most probably be after.
+ * (NULL=before start of list) */
+ /*@null@*//*@dependent@*/struct package_data *last;
+ /* internal...*/
+};
+
+static void package_data_free(/*@only@*/struct package_data *data){
+ if (data == NULL)
+ return;
+ free(data->name);
+ free(data->version_in_use);
+ free(data->new_version);
+ //free(data->new_from);
+ free(data->new_control);
+ strlist_done(&data->new_filekeys);
+ checksumsarray_done(&data->new_origfiles);
+ free(data);
+}
+
+/* This is called before any package lists are read.
+ * It is called once for every package we already have in this target.
+ * upgrade->list points to the first in the sorted list,
+ * upgrade->last to the last one inserted */
+static retvalue save_package_version(struct upgradelist *upgrade, struct package *pkg) {
+ retvalue r;
+ struct package_data *package;
+
+ r = package_getversion(pkg);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: save_package_version(upgrade.target={identifier: %s}, pkg={name: %s, version: %s, pkgname: %s}) called.\n",
+ upgrade->target == NULL ? NULL : upgrade->target->identifier, pkg->name, pkg->version, pkg->pkgname);
+
+ package = zNEW(struct package_data);
+ if (FAILEDTOALLOC(package))
+ return RET_ERROR_OOM;
+
+ package->privdata = NULL;
+ package->name = strdup(pkg->name);
+ if (FAILEDTOALLOC(package->name)) {
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->version_in_use = package_dupversion(pkg);
+ if (FAILEDTOALLOC(package->version_in_use)) {
+ free(package->name);
+ free(package);
+ return RET_ERROR_OOM;
+ }
+ package->version = package->version_in_use;
+
+ if (upgrade->list == NULL) {
+ /* first package to add: */
+ upgrade->list = package;
+ upgrade->last = package;
+ } else {
+ if (strcmp(pkg->name, upgrade->last->name) > 0) {
+ upgrade->last->next = package;
+ upgrade->last = package;
+ } else {
+ /* this should only happen if the underlying
+ * database-method get changed, so just throwing
+ * out here */
+ fprintf(stderr, "Package database is not sorted!!!\n");
+ assert(false);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ return RET_OK;
+}
+
+retvalue upgradelist_initialize(struct upgradelist **ul, struct target *t) {
+ struct upgradelist *upgrade;
+ retvalue r, r2;
+ struct package_cursor iterator;
+
+ if (verbose >= 15)
+ fprintf(stderr, "trace: upgradelist_initialize(target={identifier: %s}) called.\n",
+ t == NULL ? NULL : t->identifier);
+
+ upgrade = zNEW(struct upgradelist);
+ if (FAILEDTOALLOC(upgrade))
+ return RET_ERROR_OOM;
+
+ upgrade->target = t;
+
+ /* Beginn with the packages currently in the archive */
+
+ r = package_openiterator(t, READONLY, false, &iterator);
+ if (RET_WAS_ERROR(r)) {
+ upgradelist_free(upgrade);
+ return r;
+ }
+ while (package_next(&iterator)) {
+ r2 = save_package_version(upgrade, &iterator.current);
+ RET_UPDATE(r, r2);
+ if (RET_WAS_ERROR(r2))
+ break;
+ }
+ r2 = package_closeiterator(&iterator);
+ RET_UPDATE(r, r2);
+
+ if (RET_WAS_ERROR(r)) {
+ upgradelist_free(upgrade);
+ return r;
+ }
+
+ upgrade->last = NULL;
+
+ *ul = upgrade;
+ return RET_OK;
+}
+
+void upgradelist_free(struct upgradelist *upgrade) {
+ struct package_data *l;
+
+ if (upgrade == NULL)
+ return;
+
+ l = upgrade->list;
+ while (l != NULL) {
+ struct package_data *n = l->next;
+ package_data_free(l);
+ l = n;
+ }
+
+ free(upgrade);
+ return;
+}
+
+static retvalue upgradelist_trypackage(struct upgradelist *upgrade, void *privdata, upgrade_decide_function *predecide, void *predecide_data, struct package *package) {
+ char *version;
+ retvalue r;
+ upgrade_decision decision;
+ struct package_data *current, *insertafter;
+
+
+ if (package->architecture == architecture_all) {
+ if (upgrade->target->packagetype == pt_dsc) {
+ fputs("Internal error: trying to put binary ('all')"
+ " package into source architecture!\n",
+ stderr);
+ return RET_ERROR_INTERNAL;
+ }
+ }
+
+ version = package_dupversion(package);
+ if (FAILEDTOALLOC(version))
+ return RET_ERROR_OOM;
+
+ /* insertafter = NULL will mean insert before list */
+ insertafter = upgrade->last;
+ /* the next one to test, current = NULL will mean not found */
+ if (insertafter != NULL)
+ current = insertafter->next;
+ else
+ current = upgrade->list;
+
+ /* the algorithm assumes almost all packages are feed in
+ * alphabetically. So the next package will likely be quite
+ * after the last one. Otherwise we walk down the long list
+ * again and again... and again... and even some more...*/
+
+ while (true) {
+ int cmp;
+
+ assert (insertafter == NULL || insertafter->next == current);
+ assert (insertafter != NULL || current == upgrade->list);
+
+ if (current == NULL)
+ cmp = -1; /* every package is before the end of list */
+ else
+ cmp = strcmp(package->name, current->name);
+
+ if (cmp == 0)
+ break;
+
+ if (cmp < 0) {
+ int precmp;
+
+ if (insertafter == NULL) {
+ /* if we are before the first
+ * package, add us there...*/
+ current = NULL;
+ break;
+ }
+ // I only hope no one creates indices anti-sorted:
+ precmp = strcmp(package->name, insertafter->name);
+ if (precmp == 0) {
+ current = insertafter;
+ break;
+ } else if (precmp < 0) {
+ /* restart at the beginning: */
+ current = upgrade->list;
+ insertafter = NULL;
+ if (verbose > 10) {
+ fprintf(stderr, "restarting search...");
+ }
+ continue;
+ } else { // precmp > 0
+ /* insert after insertafter: */
+ current = NULL;
+ break;
+ }
+ assert ("This is not reached" == NULL);
+ }
+ /* cmp > 0 : may come later... */
+ assert (current != NULL);
+ insertafter = current;
+ current = current->next;
+ if (current == NULL) {
+ /* add behind insertafter at end of list */
+ break;
+ }
+ /* otherwise repeat until place found */
+ }
+ if (current == NULL) {
+ /* adding a package not yet known */
+ struct package_data *new;
+ char *newcontrol;
+
+ decision = predecide(predecide_data, upgrade->target,
+ package, NULL);
+ if (decision != UD_UPGRADE) {
+ upgrade->last = insertafter;
+ if (decision == UD_LOUDNO)
+ fprintf(stderr,
+"Loudly rejecting '%s' '%s' to enter '%s'!\n",
+ package->name, version,
+ upgrade->target->identifier);
+ free(version);
+ return (decision==UD_ERROR)?RET_ERROR:RET_NOTHING;
+ }
+
+ new = zNEW(struct package_data);
+ if (FAILEDTOALLOC(new)) {
+ free(version);
+ return RET_ERROR_OOM;
+ }
+ new->deleted = false; //to be sure...
+ new->privdata = privdata;
+ new->name = strdup(package->name);
+ if (FAILEDTOALLOC(new->name)) {
+ free(version);
+ free(new);
+ return RET_ERROR_OOM;
+ }
+ new->new_version = version;
+ new->version = version;
+ new->architecture = package->architecture;
+ version = NULL; //to be sure...
+ r = upgrade->target->getinstalldata(upgrade->target,
+ package,
+ &new->new_control, &new->new_filekeys,
+ &new->new_origfiles);
+ if (RET_WAS_ERROR(r)) {
+ package_data_free(new);
+ return r;
+ }
+ /* apply override data */
+ r = upgrade->target->doreoverride(upgrade->target,
+ new->name, new->new_control, &newcontrol);
+ if (RET_WAS_ERROR(r)) {
+ package_data_free(new);
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ free(new->new_control);
+ new->new_control = newcontrol;
+ }
+ if (insertafter != NULL) {
+ new->next = insertafter->next;
+ insertafter->next = new;
+ } else {
+ new->next = upgrade->list;
+ upgrade->list = new;
+ }
+ upgrade->last = new;
+ } else {
+ /* The package already exists: */
+ char *control, *newcontrol;
+ struct strlist files;
+ struct checksumsarray origfiles;
+ int versioncmp;
+
+ upgrade->last = current;
+
+ r = dpkgversions_cmp(version, current->version, &versioncmp);
+ if (RET_WAS_ERROR(r)) {
+ free(version);
+ return r;
+ }
+ if (versioncmp <= 0 && !current->deleted) {
+ /* there already is a newer version, so
+ * doing nothing but perhaps updating what
+ * versions are around, when we are newer
+ * than yet known candidates... */
+ int c = 0;
+
+ if (current->new_version == current->version)
+ c =versioncmp;
+ else if (current->new_version == NULL)
+ c = 1;
+ else (void)dpkgversions_cmp(version,
+ current->new_version, &c);
+
+ if (c > 0) {
+ free(current->new_version);
+ current->new_version = version;
+ } else
+ free(version);
+
+ return RET_NOTHING;
+ }
+ if (versioncmp > 0 && verbose > 30)
+ fprintf(stderr,
+"'%s' from '%s' is newer than '%s' currently\n",
+ version, package->name, current->version);
+ decision = predecide(predecide_data, upgrade->target,
+ package, current->version);
+ if (decision != UD_UPGRADE) {
+ if (decision == UD_LOUDNO)
+ fprintf(stderr,
+"Loudly rejecting '%s' '%s' to enter '%s'!\n",
+ package->name, version,
+ upgrade->target->identifier);
+ /* Even if we do not install it, setting it on hold
+ * will keep it or even install from a mirror before
+ * the delete was applied */
+ if (decision == UD_HOLD)
+ current->deleted = false;
+ free(version);
+ /* while supersede will remove the current package */
+ if (decision == UD_SUPERSEDE) {
+ current->deleted = true;
+ return RET_OK;
+ }
+ return (decision==UD_ERROR)?RET_ERROR:RET_NOTHING;
+ }
+
+ if (versioncmp == 0) {
+ /* we are replacing a package with the same version,
+ * so we keep the old one for sake of speed. */
+ if (current->deleted &&
+ current->version != current->new_version) {
+ /* remember the version for checkupdate/pull */
+ free(current->new_version);
+ current->new_version = version;
+ } else
+ free(version);
+ current->deleted = false;
+ return RET_NOTHING;
+ }
+ if (versioncmp != 0 && current->version == current->new_version
+ && current->version_in_use != NULL) {
+ /* The version to include is not the newest after the
+ * last deletion round), but maybe older, maybe newer.
+ * So we get to the question: it is also not the same
+ * like the version we already have? */
+ int vcmp = 1;
+ (void)dpkgversions_cmp(version,
+ current->version_in_use, &vcmp);
+ if (vcmp == 0) {
+ current->version = current->version_in_use;
+ if (current->deleted) {
+ free(current->new_version);
+ current->new_version = version;
+ } else
+ free(version);
+ current->deleted = false;
+ return RET_NOTHING;
+ }
+ }
+
+// TODO: the following case might be worth considering, but sadly new_version
+// might have changed without the proper data set.
+// if (versioncmp >= 0 && current->version == current->version_in_use
+// && current->new_version != NULL)
+
+ current->architecture = package->architecture;
+ r = upgrade->target->getinstalldata(upgrade->target,
+ package,
+ &control, &files, &origfiles);
+ if (RET_WAS_ERROR(r)) {
+ free(version);
+ return r;
+ }
+ /* apply override data */
+ r = upgrade->target->doreoverride(upgrade->target,
+ package->name, control, &newcontrol);
+ if (RET_WAS_ERROR(r)) {
+ free(version);
+ free(control);
+ strlist_done(&files);
+ checksumsarray_done(&origfiles);
+ return r;
+ }
+ if (RET_IS_OK(r)) {
+ free(control);
+ control = newcontrol;
+ }
+ current->deleted = false;
+ free(current->new_version);
+ current->new_version = version;
+ current->version = version;
+ current->privdata = privdata;
+ strlist_move(&current->new_filekeys, &files);
+ checksumsarray_move(&current->new_origfiles, &origfiles);
+ free(current->new_control);
+ current->new_control = control;
+ }
+ return RET_OK;
+}
+
+retvalue upgradelist_update(struct upgradelist *upgrade, void *privdata, const char *filename, upgrade_decide_function *decide, void *decide_data, bool ignorewrongarchitecture) {
+ struct indexfile *i;
+ struct package package;
+ retvalue result, r;
+
+ r = indexfile_open(&i, filename, c_none);
+ if (!RET_IS_OK(r))
+ return r;
+
+ result = RET_NOTHING;
+ upgrade->last = NULL;
+ setzero(struct package, &package);
+ while (indexfile_getnext(i, &package,
+ upgrade->target, ignorewrongarchitecture)) {
+ r = package_getsource(&package);
+ if (RET_IS_OK(r)) {
+ r = upgradelist_trypackage(upgrade, privdata,
+ decide, decide_data, &package);
+ RET_UPDATE(result, r);
+ }
+ package_done(&package);
+ if (RET_WAS_ERROR(r)) {
+ if (verbose > 0)
+ fprintf(stderr,
+"Stop reading further chunks from '%s' due to previous errors.\n", filename);
+ break;
+ }
+ if (interrupted()) {
+ result = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ }
+ r = indexfile_close(i);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+retvalue upgradelist_pull(struct upgradelist *upgrade, struct target *source, upgrade_decide_function *predecide, void *decide_data, void *privdata) {
+ retvalue result, r;
+ struct package_cursor iterator;
+
+ upgrade->last = NULL;
+ r = package_openiterator(source, READONLY, true, &iterator);
+ if (RET_WAS_ERROR(r))
+ return r;
+ result = RET_NOTHING;
+ while (package_next(&iterator)) {
+ assert (source->packagetype == upgrade->target->packagetype);
+
+ r = package_getversion(&iterator.current);
+ assert (r != RET_NOTHING);
+ if (!RET_IS_OK(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ r = package_getarchitecture(&iterator.current);
+ if (!RET_IS_OK(r)) {
+ RET_UPDATE(result, r);
+ break;
+ }
+ if (iterator.current.architecture != architecture_all &&
+ iterator.current.architecture !=
+ upgrade->target->architecture) {
+ continue;
+ }
+
+ r = package_getsource(&iterator.current);
+ if (RET_IS_OK(r)) {
+ r = upgradelist_trypackage(upgrade, privdata,
+ predecide, decide_data,
+ &iterator.current);
+ RET_UPDATE(result, r);
+ }
+ if (RET_WAS_ERROR(r))
+ break;
+ if (interrupted()) {
+ result = RET_ERROR_INTERRUPTED;
+ break;
+ }
+ }
+ r = package_closeiterator(&iterator);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+/* mark all packages as deleted, so they will vanis unless readded or reholded */
+retvalue upgradelist_deleteall(struct upgradelist *upgrade) {
+ struct package_data *pkg;
+
+ for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) {
+ pkg->deleted = true;
+ }
+
+ return RET_OK;
+}
+
+/* request all wanted files in the downloadlists given before */
+retvalue upgradelist_enqueue(struct upgradelist *upgrade, enqueueaction *action, void *calldata) {
+ struct package_data *pkg;
+ retvalue result, r;
+ result = RET_NOTHING;
+ assert(upgrade != NULL);
+ for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) {
+ if (pkg->version == pkg->new_version && !pkg->deleted) {
+ r = action(calldata, &pkg->new_origfiles,
+ &pkg->new_filekeys, pkg->privdata);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ }
+ return result;
+}
+
+/* delete all packages that will not be kept (i.e. either deleted or upgraded) */
+retvalue upgradelist_predelete(struct upgradelist *upgrade, struct logger *logger) {
+ struct package_data *pkg;
+ retvalue result, r;
+ result = RET_NOTHING;
+ assert(upgrade != NULL);
+
+ result = target_initpackagesdb(upgrade->target, READWRITE);
+ if (RET_WAS_ERROR(result))
+ return result;
+ for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) {
+ if (pkg->version_in_use != NULL &&
+ (pkg->version == pkg->new_version
+ || pkg->deleted)) {
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_removepackage(upgrade->target,
+ logger, pkg->name, NULL, NULL);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ }
+ r = target_closepackagesdb(upgrade->target);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+bool upgradelist_isbigdelete(const struct upgradelist *upgrade) {
+ struct package_data *pkg;
+ long long deleted = 0, all = 0;
+
+ if (upgrade->list == NULL)
+ return false;
+ for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) {
+ if (pkg->version_in_use == NULL)
+ continue;
+ all++;
+ if (pkg->deleted)
+ deleted++;
+ }
+ return deleted >= 10 && all/deleted < 5;
+}
+
+bool upgradelist_woulddelete(const struct upgradelist *upgrade) {
+ struct package_data *pkg;
+
+ if (upgrade->list == NULL)
+ return false;
+ for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) {
+ if (pkg->version_in_use == NULL)
+ continue;
+ if (pkg->deleted)
+ return true;
+ }
+ return false;
+}
+
+retvalue upgradelist_install(struct upgradelist *upgrade, struct logger *logger, bool ignoredelete, void (*callback)(void *, const char **, const char **)){
+ struct package_data *pkg;
+ retvalue result, r;
+
+ if (upgrade->list == NULL)
+ return RET_NOTHING;
+
+ result = target_initpackagesdb(upgrade->target, READWRITE);
+ if (RET_WAS_ERROR(result))
+ return result;
+ result = RET_NOTHING;
+ for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) {
+ if (pkg->version == pkg->new_version && !pkg->deleted) {
+ char *newcontrol;
+
+ assert ((pkg->architecture == architecture_all &&
+ upgrade->target->packagetype != pt_dsc)
+ || pkg->architecture ==
+ upgrade->target->architecture);
+
+ r = files_checkorimprove(&pkg->new_filekeys,
+ pkg->new_origfiles.checksums);
+ if (! RET_WAS_ERROR(r)) {
+
+ r = upgrade->target->completechecksums(
+ pkg->new_control,
+ &pkg->new_filekeys,
+ pkg->new_origfiles.checksums,
+ &newcontrol);
+ assert (r != RET_NOTHING);
+ }
+ if (! RET_WAS_ERROR(r)) {
+ /* upgrade (or possibly downgrade) */
+ const char *causingrule = NULL,
+ *suitefrom = NULL;
+
+ free(pkg->new_control);
+ pkg->new_control = newcontrol;
+ newcontrol = NULL;
+ callback(pkg->privdata,
+ &causingrule, &suitefrom);
+// TODO: trackingdata?
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_addpackage(upgrade->target,
+ logger, pkg->name,
+ pkg->new_version,
+ pkg->new_control,
+ &pkg->new_filekeys, true,
+ NULL, pkg->architecture,
+ causingrule, suitefrom);
+ }
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ if (pkg->deleted && pkg->version_in_use != NULL
+ && !ignoredelete) {
+ if (interrupted())
+ r = RET_ERROR_INTERRUPTED;
+ else
+ r = target_removepackage(upgrade->target,
+ logger, pkg->name, NULL, NULL);
+ RET_UPDATE(result, r);
+ if (RET_WAS_ERROR(r))
+ break;
+ }
+ }
+ r = target_closepackagesdb(upgrade->target);
+ RET_ENDUPDATE(result, r);
+ return result;
+}
+
+void upgradelist_dump(struct upgradelist *upgrade, dumpaction action){
+ struct package_data *pkg;
+
+ assert(upgrade != NULL);
+
+ for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) {
+ if (interrupted())
+ return;
+ if (pkg->deleted)
+ action(pkg->name, pkg->version_in_use,
+ NULL, pkg->new_version,
+ NULL, NULL, pkg->privdata);
+ else if (pkg->version == pkg->version_in_use)
+ action(pkg->name, pkg->version_in_use,
+ pkg->version_in_use, pkg->new_version,
+ NULL, NULL, pkg->privdata);
+ else
+ action(pkg->name, pkg->version_in_use,
+ pkg->new_version, NULL,
+ &pkg->new_filekeys, pkg->new_control,
+ pkg->privdata);
+ }
+}
diff --git a/upgradelist.h b/upgradelist.h
new file mode 100644
index 0000000..a0f63bd
--- /dev/null
+++ b/upgradelist.h
@@ -0,0 +1,45 @@
+#ifndef REPREPRO_UPGRADELIST_H
+#define REPREPRO_UPGRADELIST_H
+
+/* Things for making decisions what to upgrade and what not */
+
+typedef enum { UD_ERROR, UD_LOUDNO, UD_NO, UD_UPGRADE, UD_HOLD, UD_SUPERSEDE } upgrade_decision;
+
+struct package;
+typedef upgrade_decision upgrade_decide_function(void *privdata, struct target *, struct package *, /*@null@*/ const char */*oldversion*/);
+
+/* The main part: */
+
+struct target;
+struct logger;
+struct upgradelist;
+
+retvalue upgradelist_initialize(struct upgradelist **, /*@dependent@*/struct target *);
+void upgradelist_free(/*@only@*/struct upgradelist *);
+
+typedef void dumpaction(const char */*packagename*/, /*@null@*/const char */*oldversion*/, /*@null@*/const char */*newversion*/, /*@null@*/const char */*bestcandidate*/, /*@null@*/const struct strlist */*newfilekeys*/, /*@null@*/const char */*newcontrol*/, void *);
+
+void upgradelist_dump(struct upgradelist *, dumpaction *);
+
+/* Take all items in 'filename' into account, and remember them coming from 'method' */
+retvalue upgradelist_update(struct upgradelist *, /*@dependent@*/void *, const char * /*filename*/, upgrade_decide_function *, void *, bool /*ignorewrongarchitecture*/);
+
+/* Take all items in source into account */
+retvalue upgradelist_pull(struct upgradelist *, struct target *, upgrade_decide_function *, void *, void *);
+
+/* mark all packages as deleted, so they will vanis unless readded or reholded */
+retvalue upgradelist_deleteall(struct upgradelist *);
+
+typedef retvalue enqueueaction(void *, const struct checksumsarray *, const struct strlist *, void *);
+/* request all wanted files refering the methods given before */
+retvalue upgradelist_enqueue(struct upgradelist *, enqueueaction *, void *);
+
+bool upgradelist_isbigdelete(const struct upgradelist *);
+bool upgradelist_woulddelete(const struct upgradelist *);
+
+retvalue upgradelist_install(struct upgradelist *, /*@null@*/struct logger *, bool /*ignoredelete*/, void (*)(void *, const char **, const char **));
+
+/* remove all packages that would either be removed or upgraded by an upgrade */
+retvalue upgradelist_predelete(struct upgradelist *, /*@null@*/struct logger *);
+
+#endif
diff --git a/uploaderslist.c b/uploaderslist.c
new file mode 100644
index 0000000..3e50a36
--- /dev/null
+++ b/uploaderslist.c
@@ -0,0 +1,1520 @@
+/* This file is part of "reprepro"
+ * Copyright (C) 2005,2006,2007,2009,2011 Bernhard R. Link
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA
+ */
+#include <config.h>
+
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <alloca.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#include "error.h"
+#include "mprintf.h"
+#include "strlist.h"
+#include "names.h"
+#include "atoms.h"
+#include "signature.h"
+#include "globmatch.h"
+#include "uploaderslist.h"
+#include "configparser.h"
+#include "ignore.h"
+
+struct upload_condition {
+ /* linked list of all sub-nodes */
+ /*@null@*/struct upload_condition *next;
+
+ enum upload_condition_type type;
+ const struct upload_condition *next_if_true, *next_if_false;
+ bool accept_if_true, accept_if_false;
+ enum {
+ /* none matching means false, at least one being from
+ * the set means true */
+ needs_any = 0,
+ /* one not matching means false, otherwise true */
+ needs_all,
+ /* one not matching means false,
+ * otherwise true iff there is at least one */
+ needs_existsall,
+ /* having a candidate means true, otherwise false */
+ needs_anycandidate
+ } needs;
+ union {
+ /* uc_SECTIONS, uc_BINARIES, uc_SOURCENAME, uc_BYHAND,
+ * uc_CODENAME, */
+ struct strlist strings;
+ /* uc_COMPONENTS, uc_ARCHITECTURES */
+ struct atomlist atoms;
+ };
+};
+struct upload_conditions {
+ /* condition currently tested */
+ const struct upload_condition *current;
+ /* current state of top most condition */
+ bool matching;
+ /* top most condition will not be true unless cleared*/
+ bool needscandidate;
+ /* always use last next, then decrement */
+ int count;
+ const struct upload_condition *conditions[];
+};
+
+static retvalue upload_conditions_add(struct upload_conditions **c_p, const struct upload_condition *a) {
+ int newcount;
+ struct upload_conditions *n;
+
+ if (a->type == uc_REJECTED) {
+ /* due to groups, there can be empty conditions.
+ * Don't include those in this list... */
+ return RET_OK;
+ }
+
+ if (*c_p == NULL)
+ newcount = 1;
+ else
+ newcount = (*c_p)->count + 1;
+ n = realloc(*c_p, sizeof(struct upload_conditions)
+ + newcount * sizeof(const struct upload_condition*));
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ n->current = NULL;
+ n->count = newcount;
+ n->conditions[newcount - 1] = a;
+ *c_p = n;
+ return RET_OK;
+}
+
+struct fileposition {
+ const struct filebeingparsed {
+ struct filebeingparsed *next, *includedby;
+ char *filename;
+ unsigned long lineno;
+ FILE *f;
+ int depth;
+ } *file;
+ unsigned long lineno;
+};
+
+#define set_position(at, fbp) ({ \
+ (at).file = fbp; \
+ (at).lineno = fbp->lineno; \
+})
+#define unset_pos(fileposition) ((fileposition).lineno == 0)
+#define errorcol(fbp, column, format, ...) ({ \
+ fprintf(stderr, "%s:%lu:%u: ", (fbp)->filename, (fbp)->lineno, (column)); \
+ fprintf(stderr, format "\n" , ## __VA_ARGS__); \
+ print_include_trace((fbp)->includedby); \
+})
+#define errorline(fbp, format, ...) ({ \
+ fprintf(stderr, "%s:%lu: ", (fbp)->filename, (fbp)->lineno); \
+ fprintf(stderr, format "\n" , ## __VA_ARGS__); \
+ print_include_trace((fbp)->includedby); \
+})
+#define errorpos(pos, format, ...) ({ \
+ fprintf(stderr, "%s:%lu: ", (pos).file->filename, (pos).lineno); \
+ fprintf(stderr, format "\n" , ## __VA_ARGS__); \
+})
+
+static void print_include_trace(struct filebeingparsed *includedby) {
+ for ( ; includedby != NULL ; includedby = includedby->includedby ) {
+ fprintf(stderr, "included from '%s' line %lu\n",
+ includedby->filename,
+ includedby->lineno);
+ }
+}
+
+
+struct uploadergroup {
+ struct uploadergroup *next;
+ size_t len;
+ char *name;
+ /* NULL terminated list of pointers, or NULL for none */
+ const struct uploadergroup **memberof;
+ struct upload_condition permissions;
+ /* line numbers (if != 0) to allow some diagnostics */
+ struct fileposition firstmemberat, emptyat, firstusedat, unusedat;
+};
+
+struct uploader {
+ struct uploader *next;
+ /* NULL terminated list of pointers, or NULL for none */
+ const struct uploadergroup **memberof;
+ size_t len;
+ char *reversed_fingerprint;
+ struct upload_condition permissions;
+ bool allow_subkeys;
+};
+
+static struct uploaders {
+ struct uploaders *next;
+ size_t reference_count;
+ char *filename;
+ size_t filename_len;
+
+ struct uploadergroup *groups;
+ struct uploader *by_fingerprint;
+ struct upload_condition anyvalidkeypermissions;
+ struct upload_condition unsignedpermissions;
+ struct upload_condition anybodypermissions;
+} *uploaderslists = NULL;
+
+static void uploadpermission_release(struct upload_condition *p) {
+ struct upload_condition *h, *f = NULL;
+
+ assert (p != NULL);
+
+ do {
+ h = p->next;
+ switch (p->type) {
+ case uc_BINARIES:
+ case uc_SECTIONS:
+ case uc_SOURCENAME:
+ case uc_BYHAND:
+ case uc_CODENAME:
+ strlist_done(&p->strings);
+ break;
+
+ case uc_ARCHITECTURES:
+ atomlist_done(&p->atoms);
+ break;
+
+ case uc_ALWAYS:
+ case uc_REJECTED:
+ break;
+ }
+ free(f);
+ /* next one must be freed: */
+ f = h;
+ /* and processed: */
+ p = h;
+ } while (p != NULL);
+}
+
+static void uploadergroup_free(struct uploadergroup *u) {
+ if (u == NULL)
+ return;
+ free(u->name);
+ free(u->memberof);
+ uploadpermission_release(&u->permissions);
+ free(u);
+}
+
+static void uploader_free(struct uploader *u) {
+ if (u == NULL)
+ return;
+ free(u->reversed_fingerprint);
+ free(u->memberof);
+ uploadpermission_release(&u->permissions);
+ free(u);
+}
+
+static void uploaders_free(struct uploaders *u) {
+ if (u == NULL)
+ return;
+ while (u->by_fingerprint != NULL) {
+ struct uploader *next = u->by_fingerprint->next;
+
+ uploader_free(u->by_fingerprint);
+ u->by_fingerprint = next;
+ }
+ while (u->groups != NULL) {
+ struct uploadergroup *next = u->groups->next;
+
+ uploadergroup_free(u->groups);
+ u->groups = next;
+ }
+ uploadpermission_release(&u->anyvalidkeypermissions);
+ uploadpermission_release(&u->anybodypermissions);
+ uploadpermission_release(&u->unsignedpermissions);
+ free(u->filename);
+ free(u);
+}
+
+void uploaders_unlock(struct uploaders *u) {
+ if (u->reference_count > 1) {
+ u->reference_count--;
+ } else {
+ struct uploaders **p = &uploaderslists;
+
+ assert (u->reference_count == 1);
+ /* avoid double free: */
+ if (u->reference_count == 0)
+ return;
+
+ while (*p != NULL && *p != u)
+ p = &(*p)->next;
+ assert (p != NULL && *p == u);
+ if (*p == u) {
+ *p = u->next;
+ uploaders_free(u);
+ }
+ }
+}
+
+static retvalue upload_conditions_add_group(struct upload_conditions **c_p, const struct uploadergroup **groups) {
+ const struct uploadergroup *group;
+ retvalue r;
+
+ while ((group = *(groups++)) != NULL) {
+ r = upload_conditions_add(c_p, &group->permissions);
+ if (!RET_WAS_ERROR(r) && group->memberof != NULL)
+ r = upload_conditions_add_group(c_p, group->memberof);
+ if (RET_WAS_ERROR(r))
+ return r;
+ }
+ return RET_OK;
+}
+
+static retvalue find_key_and_add(struct uploaders *u, struct upload_conditions **c_p, const struct signature *s) {
+ size_t len, i, primary_len;
+ char *reversed;
+ const char *fingerprint, *primary_fingerprint;
+ char *reversed_primary_key;
+ const struct uploader *uploader;
+ retvalue r;
+
+ assert (u != NULL);
+
+ fingerprint = s->keyid;
+ assert (fingerprint != NULL);
+ len = strlen(fingerprint);
+ reversed = alloca(len+1);
+ if (FAILEDTOALLOC(reversed))
+ return RET_ERROR_OOM;
+ for (i = 0 ; i < len ; i++) {
+ char c = fingerprint[len-i-1];
+ if (c >= 'a' && c <= 'f')
+ c -= 'a' - 'A';
+ else if (c == 'x' && len-i-1 == 1 && fingerprint[0] == '0')
+ break;
+ if ((c < '0' || c > '9') && (c <'A' || c > 'F')) {
+ fprintf(stderr,
+"Strange character '%c'(=%hhu) in fingerprint '%s'.\n"
+"Search for appropriate rules in the uploaders file might fail.\n",
+ c, c, fingerprint);
+ break;
+ }
+ reversed[i] = c;
+ }
+ len = i;
+ reversed[len] = '\0';
+
+ /* hm, this only sees the key is expired when it is kind of late... */
+ primary_fingerprint = s->primary_keyid;
+ primary_len = strlen(primary_fingerprint);
+ reversed_primary_key = alloca(len+1);
+ if (FAILEDTOALLOC(reversed_primary_key))
+ return RET_ERROR_OOM;
+
+ for (i = 0 ; i < primary_len ; i++) {
+ char c = primary_fingerprint[primary_len-i-1];
+ if (c >= 'a' && c <= 'f')
+ c -= 'a' - 'A';
+ else if (c == 'x' && primary_len-i-1 == 1 &&
+ primary_fingerprint[0] == '0')
+ break;
+ if ((c < '0' || c > '9') && (c <'A' || c > 'F')) {
+ fprintf(stderr,
+"Strange character '%c'(=%hhu) in fingerprint/key-id '%s'.\n"
+"Search for appropriate rules in the uploaders file might fail.\n",
+ c, c, primary_fingerprint);
+ break;
+ }
+ reversed_primary_key[i] = c;
+ }
+ primary_len = i;
+ reversed_primary_key[primary_len] = '\0';
+
+ for (uploader = u->by_fingerprint ; uploader != NULL ;
+ uploader = uploader->next) {
+ /* TODO: allow ignoring */
+ if (s->state != sist_valid)
+ continue;
+ if (uploader->allow_subkeys) {
+ if (uploader->len > primary_len)
+ continue;
+ if (memcmp(uploader->reversed_fingerprint,
+ reversed_primary_key,
+ uploader->len) != 0)
+ continue;
+ } else {
+ if (uploader->len > len)
+ continue;
+ if (memcmp(uploader->reversed_fingerprint,
+ reversed, uploader->len) != 0)
+ continue;
+ }
+ r = upload_conditions_add(c_p, &uploader->permissions);
+ if (!RET_WAS_ERROR(r) && uploader->memberof != NULL)
+ r = upload_conditions_add_group(c_p,
+ uploader->memberof);
+ if (RET_WAS_ERROR(r))
+ return r;
+ /* no break here, as a key might match
+ * multiple specifications of different length */
+ }
+ return RET_OK;
+}
+
+retvalue uploaders_permissions(struct uploaders *u, const struct signatures *signatures, struct upload_conditions **c_p) {
+ struct upload_conditions *conditions = NULL;
+ retvalue r;
+ int j;
+
+ r = upload_conditions_add(&conditions,
+ &u->anybodypermissions);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (signatures == NULL) {
+ /* signatures.count might be 0 meaning there is
+ * something lile a gpg header but we could not get
+ * keys, because of a gpg error or because of being
+ * compiling without libgpgme */
+ r = upload_conditions_add(&conditions,
+ &u->unsignedpermissions);
+ if (RET_WAS_ERROR(r)) {
+ free(conditions);
+ return r;
+ }
+ }
+ if (signatures != NULL && signatures->validcount > 0) {
+ r = upload_conditions_add(&conditions,
+ &u->anyvalidkeypermissions);
+ if (RET_WAS_ERROR(r)) {
+ free(conditions);
+ return r;
+ }
+ }
+ if (signatures != NULL) {
+ for (j = 0 ; j < signatures->count ; j++) {
+ r = find_key_and_add(u, &conditions,
+ &signatures->signatures[j]);
+ if (RET_WAS_ERROR(r)) {
+ free(conditions);
+ return r;
+ }
+ }
+ }
+ *c_p = conditions;
+ return RET_OK;
+}
+
+/* uc_FAILED means rejected, uc_ACCEPTED means can go in */
+enum upload_condition_type uploaders_nextcondition(struct upload_conditions *c) {
+
+ if (c->current != NULL) {
+ if (c->matching && !c->needscandidate) {
+ if (c->current->accept_if_true)
+ return uc_ACCEPTED;
+ c->current = c->current->next_if_true;
+ } else {
+ if (c->current->accept_if_false)
+ return uc_ACCEPTED;
+ c->current = c->current->next_if_false;
+ }
+ }
+
+ /* return the first non-trivial one left: */
+ while (true) {
+ while (c->current != NULL) {
+ assert (c->current->type > uc_REJECTED);
+ if (c->current->type == uc_ALWAYS) {
+ if (c->current->accept_if_true)
+ return uc_ACCEPTED;
+ c->current = c->current->next_if_true;
+ } else {
+ /* empty set fulfills all conditions,
+ but not an exists condition */
+ switch (c->current->needs) {
+ case needs_any:
+ c->matching = false;
+ c->needscandidate = false;
+ break;
+ case needs_all:
+ c->matching = true;
+ c->needscandidate = false;
+ break;
+ case needs_existsall:
+ case needs_anycandidate:
+ c->matching = true;
+ c->needscandidate = true;
+ break;
+ }
+ return c->current->type;
+ }
+ }
+ if (c->count == 0)
+ return uc_REJECTED;
+ c->count--;
+ c->current = c->conditions[c->count];
+ }
+ /* not reached */
+}
+
+static bool match_namecheck(const struct strlist *strings, const char *name) {
+ int i;
+
+ for (i = 0 ; i < strings->count ; i++) {
+ if (globmatch(name, strings->values[i]))
+ return true;
+ }
+ return false;
+}
+
+bool uploaders_verifystring(struct upload_conditions *conditions, const char *name) {
+ const struct upload_condition *c = conditions->current;
+
+ assert (c != NULL);
+ assert (c->type == uc_BINARIES || c->type == uc_SECTIONS ||
+ c->type == uc_CODENAME ||
+ c->type == uc_SOURCENAME || c->type == uc_BYHAND);
+
+ conditions->needscandidate = false;
+ switch (conditions->current->needs) {
+ case needs_all:
+ case needs_existsall:
+ /* once one condition is false, the case is settled */
+
+ if (conditions->matching &&
+ !match_namecheck(&c->strings, name))
+ conditions->matching = false;
+ /* but while it is true, more info is needed */
+ return conditions->matching;
+ case needs_any:
+ /* once one condition is true, the case is settled */
+ if (!conditions->matching &&
+ match_namecheck(&c->strings, name))
+ conditions->matching = true;
+ conditions->needscandidate = false;
+ /* but while it is false, more info is needed */
+ return !conditions->matching;
+ case needs_anycandidate:
+ /* we are settled, no more information needed */
+ return false;
+ }
+ /* NOT REACHED */
+ assert (conditions->current->needs != conditions->current->needs);
+}
+
+bool uploaders_verifyatom(struct upload_conditions *conditions, atom_t atom) {
+ const struct upload_condition *c = conditions->current;
+
+ assert (c != NULL);
+ assert (c->type == uc_ARCHITECTURES);
+
+ conditions->needscandidate = false;
+ switch (conditions->current->needs) {
+ case needs_all:
+ case needs_existsall:
+ /* once one condition is false, the case is settled */
+
+ if (conditions->matching &&
+ !atomlist_in(&c->atoms, atom))
+ conditions->matching = false;
+ /* but while it is true, more info is needed */
+ return conditions->matching;
+ case needs_any:
+ /* once one condition is true, the case is settled */
+ if (!conditions->matching &&
+ atomlist_in(&c->atoms, atom))
+ conditions->matching = true;
+ /* but while it is false, more info is needed */
+ return !conditions->matching;
+ case needs_anycandidate:
+ /* we are settled, no more information needed */
+ return false;
+ }
+ /* NOT REACHED */
+ assert (conditions->current->needs != conditions->current->needs);
+}
+
+static struct uploader *addfingerprint(struct uploaders *u, const char *fingerprint, size_t len, bool allow_subkeys) {
+ size_t i;
+ char *reversed = malloc(len+1);
+ struct uploader *uploader, **last;
+
+ if (FAILEDTOALLOC(reversed))
+ return NULL;
+ for (i = 0 ; i < len ; i++) {
+ char c = fingerprint[len-i-1];
+ if (c >= 'a' && c <= 'f')
+ c -= 'a' - 'A';
+ assert ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F'));
+ reversed[i] = c;
+ }
+ reversed[len] = '\0';
+ last = &u->by_fingerprint;
+ for (uploader = u->by_fingerprint ;
+ uploader != NULL ;
+ uploader = *(last = &uploader->next)) {
+ if (uploader->len != len)
+ continue;
+ if (memcmp(uploader->reversed_fingerprint, reversed, len) != 0)
+ continue;
+ if (uploader->allow_subkeys != allow_subkeys)
+ continue;
+ free(reversed);
+ return uploader;
+ }
+ assert (*last == NULL);
+ uploader = zNEW(struct uploader);
+ if (FAILEDTOALLOC(uploader))
+ return NULL;
+ *last = uploader;
+ uploader->reversed_fingerprint = reversed;
+ uploader->len = len;
+ uploader->allow_subkeys = allow_subkeys;
+ return uploader;
+}
+
+static struct uploadergroup *addgroup(struct uploaders *u, const char *name, size_t len) {
+ struct uploadergroup *group, **last;
+
+ last = &u->groups;
+ for (group = u->groups ;
+ group != NULL ; group = *(last = &group->next)) {
+ if (group->len != len)
+ continue;
+ if (memcmp(group->name, name, len) != 0)
+ continue;
+ return group;
+ }
+ assert (*last == NULL);
+ group = zNEW(struct uploadergroup);
+ if (FAILEDTOALLOC(group))
+ return NULL;
+ group->name = strndup(name, len);
+ group->len = len;
+ if (FAILEDTOALLOC(group->name)) {
+ free(group);
+ return NULL;
+ }
+ *last = group;
+ return group;
+}
+
+static inline const char *overkey(const char *p) {
+ while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')
+ || (*p >= 'A' && *p <= 'F')) {
+ p++;
+ }
+ return p;
+}
+
+static retvalue parse_stringpart(/*@out@*/struct strlist *strings, const char **pp, const struct filebeingparsed *fbp, int column) {
+ const char *p = *pp;
+ retvalue r;
+
+ strlist_init(strings);
+ do {
+ const char *startp, *endp;
+ char *n;
+
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (*p != '\'') {
+ errorcol(fbp, column + (int)(p - *pp),
+"starting \"'\" expected!");
+ return RET_ERROR;
+ }
+ p++;
+ startp = p;
+ while (*p != '\0' && *p != '\'')
+ p++;
+ if (*p == '\0') {
+ errorcol(fbp, column + (int)(p - *pp),
+"closing \"'\" expected!");
+ return RET_ERROR;
+ }
+ assert (*p == '\'');
+ endp = p;
+ p++;
+ n = strndup(startp, endp - startp);
+ if (FAILEDTOALLOC(n))
+ return RET_ERROR_OOM;
+ r = strlist_adduniq(strings, n);
+ if (RET_WAS_ERROR(r))
+ return r;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ column += (p - *pp);
+ *pp = p;
+ if (**pp == '|') {
+ p++;
+ }
+ } while (**pp == '|');
+ *pp = p;
+ return RET_OK;
+}
+
+static retvalue parse_architectures(/*@out@*/struct atomlist *atoms, const char **pp, const struct filebeingparsed *fbp, int column) {
+ const char *p = *pp;
+ retvalue r;
+
+ atomlist_init(atoms);
+ do {
+ const char *startp, *endp;
+ atom_t atom;
+
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (*p != '\'') {
+ errorcol(fbp, column + (int)(p - *pp),
+"starting \"'\" expected!");
+ return RET_ERROR;
+ }
+ p++;
+ startp = p;
+ while (*p != '\0' && *p != '\'' && *p != '*' && *p != '?')
+ p++;
+ if (*p == '*' || *p == '?') {
+ errorcol(fbp, column + (int)(p - *pp),
+"Wildcards are not allowed in architectures!");
+ return RET_ERROR;
+ }
+ if (*p == '\0') {
+ errorcol(fbp, column + (int)(p - *pp),
+"closing \"'\" expected!");
+ return RET_ERROR;
+ }
+ assert (*p == '\'');
+ endp = p;
+ p++;
+ atom = architecture_find_l(startp, endp - startp);
+ if (!atom_defined(atom)) {
+ errorcol(fbp, column + (int)(startp-*pp),
+"Unknown architecture '%.*s'! (Did you mistype?)",
+ (int)(endp-startp), startp);
+ return RET_ERROR;
+ }
+ r = atomlist_add_uniq(atoms, atom);
+ if (RET_WAS_ERROR(r))
+ return r;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ column += (p - *pp);
+ *pp = p;
+ if (**pp == '|') {
+ p++;
+ }
+ } while (**pp == '|');
+ *pp = p;
+ return RET_OK;
+}
+
+static retvalue parse_condition(const struct filebeingparsed *fbp, int column, const char **pp, /*@out@*/struct upload_condition *condition) {
+ const char *p = *pp;
+ struct upload_condition *fallback, *last, *or_scope;
+
+ setzero(struct upload_condition, condition);
+
+ /* allocate a new fallback-node:
+ * (this one is used to make it easier to concatenate those decision
+ * trees, especially it keeps open the possibility to have deny
+ * decisions) */
+ fallback = zNEW(struct upload_condition);
+ if (FAILEDTOALLOC(fallback))
+ return RET_ERROR_OOM;
+ fallback->type = uc_ALWAYS;
+ assert(!fallback->accept_if_true);
+
+ /* the queue with next has all nodes, so they can be freed
+ * (or otherwise modified) */
+ condition->next = fallback;
+
+
+ last = condition;
+ or_scope = condition;
+
+ while (true) {
+ if (strncmp(p, "not", 3) == 0 &&
+ xisspace(p[3])) {
+ p += 3;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ /* negate means false is good and true
+ * is bad: */
+ last->accept_if_false = true;
+ last->accept_if_true = false;
+ last->next_if_false = NULL;
+ last->next_if_true = fallback;
+ } else {
+ last->accept_if_false = false;
+ last->accept_if_true = true;
+ last->next_if_false = fallback;
+ last->next_if_true = NULL;
+ }
+ if (p[0] == '*' && xisspace(p[1])) {
+ last->type = uc_ALWAYS;
+ p++;
+ } else if (strncmp(p, "architectures", 13) == 0 &&
+ strchr(" \t'", p[13]) != NULL) {
+ retvalue r;
+
+ last->type = uc_ARCHITECTURES;
+ last->needs = needs_all;
+ p += 13;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (strncmp(p, "contain", 7) == 0 &&
+ strchr(" \t'", p[7]) != NULL) {
+ last->needs = needs_any;
+ p += 7;
+ }
+
+ r = parse_architectures(&last->atoms, &p,
+ fbp, column + (p-*pp));
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(condition);
+ return r;
+ }
+ } else if (strncmp(p, "binaries", 8) == 0 &&
+ strchr(" \t'", p[8]) != NULL) {
+ retvalue r;
+
+ last->type = uc_BINARIES;
+ last->needs = needs_all;
+ p += 8;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (strncmp(p, "contain", 7) == 0 &&
+ strchr(" \t'", p[7]) != NULL) {
+ last->needs = needs_any;
+ p += 7;
+ }
+
+ r = parse_stringpart(&last->strings, &p,
+ fbp, column + (p-*pp));
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(condition);
+ return r;
+ }
+ } else if (strncmp(p, "byhand", 6) == 0 &&
+ strchr(" \t'", p[6]) != NULL) {
+ retvalue r;
+
+ last->type = uc_BYHAND;
+ last->needs = needs_existsall;
+ p += 8;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (*p != '\'') {
+ strlist_init(&last->strings);
+ r = RET_OK;
+ } else
+ r = parse_stringpart(&last->strings, &p,
+ fbp, column + (p-*pp));
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(condition);
+ return r;
+ }
+ } else if (strncmp(p, "sections", 8) == 0 &&
+ strchr(" \t'", p[8]) != NULL) {
+ retvalue r;
+
+ last->type = uc_SECTIONS;
+ last->needs = needs_all;
+ p += 8;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (strncmp(p, "contain", 7) == 0 &&
+ strchr(" \t'", p[7]) != NULL) {
+ last->needs = needs_any;
+ p += 7;
+ }
+
+ r = parse_stringpart(&last->strings, &p,
+ fbp, column + (p-*pp));
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(condition);
+ return r;
+ }
+ } else if (strncmp(p, "source", 6) == 0 &&
+ strchr(" \t'", p[6]) != NULL) {
+ retvalue r;
+
+ last->type = uc_SOURCENAME;
+ p += 6;
+
+ r = parse_stringpart(&last->strings, &p,
+ fbp, column + (p-*pp));
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(condition);
+ return r;
+ }
+
+ } else if (strncmp(p, "distribution", 12) == 0 &&
+ strchr(" \t'", p[12]) != NULL) {
+ retvalue r;
+
+ last->type = uc_CODENAME;
+ p += 12;
+
+ r = parse_stringpart(&last->strings, &p,
+ fbp, column + (p-*pp));
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(condition);
+ return r;
+ }
+
+ } else {
+ errorcol(fbp, column + (int)(p - *pp),
+"condition expected after 'allow' keyword!");
+ uploadpermission_release(condition);
+ return RET_ERROR;
+ }
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (strncmp(p, "and", 3) == 0 && xisspace(p[3])) {
+ struct upload_condition *n, *c;
+
+ p += 3;
+
+ n = zNEW(struct upload_condition);
+ if (FAILEDTOALLOC(n)) {
+ uploadpermission_release(condition);
+ return RET_ERROR_OOM;
+ }
+ /* everything that yet made it succeed makes it need
+ * to check this condition: */
+ for (c = condition ; c != NULL ; c = c->next) {
+ if (c->accept_if_true) {
+ c->next_if_true = n;
+ c->accept_if_true = false;
+ }
+ if (c->accept_if_false) {
+ c->next_if_false = n;
+ c->accept_if_false = false;
+ }
+ }
+ /* or will only bind to this one */
+ or_scope = n;
+
+ /* add it to queue: */
+ assert (last->next == fallback);
+ n->next = fallback;
+ last->next = n;
+ last = n;
+ } else if (strncmp(p, "or", 2) == 0 && xisspace(p[2])) {
+ struct upload_condition *n, *c;
+
+ p += 2;
+
+ n = zNEW(struct upload_condition);
+ if (FAILEDTOALLOC(n)) {
+ uploadpermission_release(condition);
+ return RET_ERROR_OOM;
+ }
+ /* everything in current scope that made it fail
+ * now makes it check this: (currently that will
+ * only be true at most for c == last, but with
+ * parentheses this all will be needed) */
+ for (c = or_scope ; c != NULL ; c = c->next) {
+ if (c->next_if_true == fallback)
+ c->next_if_true = n;
+ if (c->next_if_false == fallback)
+ c->next_if_false = n;
+ }
+ /* add it to queue: */
+ assert (last->next == fallback);
+ n->next = fallback;
+ last->next = n;
+ last = n;
+ } else if (strncmp(p, "by", 2) == 0 && xisspace(p[2])) {
+ p += 2;
+ break;
+ } else {
+ errorcol(fbp, column + (int)(p - *pp),
+"'by','and' or 'or' keyword expected!");
+ uploadpermission_release(condition);
+ setzero(struct upload_condition, condition);
+ return RET_ERROR;
+ }
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ }
+ *pp = p;
+ return RET_OK;
+}
+
+static void condition_add(struct upload_condition *permissions, struct upload_condition *c) {
+ if (permissions->next == NULL) {
+ /* first condition, as no fallback yet allocated */
+ *permissions = *c;
+ setzero(struct upload_condition, c);
+ } else {
+ struct upload_condition *last;
+
+ last = permissions->next;
+ assert (last != NULL);
+ while (last->next != NULL)
+ last = last->next;
+
+ /* the very last is always the fallback-node to which all
+ * other conditions fall back if they have no decision */
+ assert(last->type == uc_ALWAYS);
+ assert(!last->accept_if_true);
+
+ *last = *c;
+ setzero(struct upload_condition, c);
+ }
+}
+
+static retvalue find_group(struct uploadergroup **g, struct uploaders *u, const char **pp, const struct filebeingparsed *fbp, const char *buffer) {
+ const char *p, *q;
+ struct uploadergroup *group;
+
+ p = *pp;
+ q = p;
+ while ((*q >= 'a' && *q <= 'z') || (*q >= 'A' && *q <= 'Z') ||
+ (*q >= '0' && *q <= '9') || *q == '-'
+ || *q == '_' || *q == '.')
+ q++;
+ if (*p == '\0' || (q-p == 3 && memcmp(p, "add", 3) == 0)
+ || (q-p == 5 && memcmp(p, "empty", 5) == 0)
+ || (q-p == 6 && memcmp(p, "unused", 6) == 0)
+ || (q-p == 8 && memcmp(p, "contains", 8) == 0)) {
+ errorcol(fbp, (int)(1 + p - buffer),
+"group name expected!");
+ return RET_ERROR;
+ }
+ if (*q != '\0' && *q != ' ' && *q != '\t') {
+ errorcol(fbp, (int)(1 +p -buffer),
+"invalid group name!");
+ return RET_ERROR;
+ }
+ *pp = q;
+ group = addgroup(u, p, q-p);
+ if (FAILEDTOALLOC(group))
+ return RET_ERROR_OOM;
+ *g = group;
+ return RET_OK;
+}
+
+static retvalue find_uploader(struct uploader **u_p, struct uploaders *u, const char *p, const struct filebeingparsed *fbp, const char *buffer) {
+ struct uploader *uploader;
+ bool allow_subkeys = false;
+ const char *q, *qq;
+
+ if (p[0] == '0' && p[1] == 'x')
+ p += 2;
+ q = overkey(p);
+ if (*p == '\0' || (*q !='\0' && !xisspace(*q) && *q != '+') || q==p) {
+ errorcol(fbp, (int)(1 + q - buffer),
+"key id or fingerprint expected!");
+ return RET_ERROR;
+ }
+ if (q - p > 16) {
+ if (!IGNORABLE(longkeyid))
+ errorcol(fbp, (int)(1 + p - buffer),
+"key id most likely too long for gpgme to understand\n"
+"(at most 16 hex digits should be safe. Use --ignore=longkeyid to ignore)");
+ }
+ qq = q;
+ while (xisspace(*qq))
+ qq++;
+ if (*qq == '+') {
+ qq++;
+ allow_subkeys = true;
+ }
+ while (xisspace(*qq))
+ qq++;
+ if (*qq != '\0') {
+ errorcol(fbp, (int)(1 +qq - buffer),
+"unexpected data after 'key <fingerprint>' statement!");
+ if (*q == ' ')
+ fprintf(stderr,
+" Hint: no spaces allowed in fingerprint specification.\n");
+ return RET_ERROR;
+ }
+ uploader = addfingerprint(u, p, q-p, allow_subkeys);
+ if (FAILEDTOALLOC(uploader))
+ return RET_ERROR_OOM;
+ *u_p = uploader;
+ return RET_OK;
+}
+
+static retvalue include_group(struct uploadergroup *group, const struct uploadergroup ***memberof_p, const struct filebeingparsed *fbp) {
+ size_t n;
+ const struct uploadergroup **memberof = *memberof_p;
+
+ n = 0;
+ if (memberof != NULL) {
+ while (memberof[n] != NULL) {
+ if (memberof[n] == group) {
+ errorline(fbp,
+"member added to group %s a second time!",
+ group->name);
+ return RET_ERROR;
+ }
+ n++;
+ }
+ }
+ if (n == 0 || (n & 15) == 15) {
+ /* let's hope no static checker is confused here ;-> */
+ memberof = realloc(memberof,
+ ((n+17)&~15) * sizeof(struct uploadergroup*));
+ if (FAILEDTOALLOC(memberof))
+ return RET_ERROR_OOM;
+ *memberof_p = memberof;
+ }
+ memberof[n] = group;
+ memberof[n+1] = NULL;
+ if (unset_pos(group->firstmemberat))
+ set_position(group->firstmemberat, fbp);;
+ if (!unset_pos(group->emptyat)) {
+ errorline(fbp,
+"cannot add members to group '%s' marked empty!", group->name);
+ errorpos(group->emptyat,
+"here it was marked as empty");
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static bool is_included_in(const struct uploadergroup *needle, const struct uploadergroup *chair) {
+ const struct uploadergroup **g;
+
+ if (needle->memberof == NULL)
+ return false;
+ for (g = needle->memberof ; *g != NULL ; g++) {
+ if (*g == chair)
+ return true;
+ if (is_included_in(*g, chair))
+ return true;
+ }
+ return false;
+}
+
+static inline bool trim_line(const struct filebeingparsed *fbp, char *buffer) {
+ size_t l = strlen(buffer);
+ if (l == 0 || buffer[l-1] != '\n') {
+ if (l >= 1024)
+ errorcol(fbp, 1024, "Overlong line!");
+ else
+ errorcol(fbp, (int)l, "Unterminated line!");
+ return false;
+ }
+ do {
+ buffer[--l] = '\0';
+ } while (l > 0 && xisspace(buffer[l-1]));
+ return true;
+}
+
+static inline retvalue parseuploaderline(char *buffer, const struct filebeingparsed *fbp, struct uploaders *u) {
+ retvalue r;
+ const char *p, *q;
+ struct upload_condition condition;
+
+ p = buffer;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (*p == '\0' || *p == '#')
+ return RET_NOTHING;
+
+ if (strncmp(p, "group", 5) == 0 && (*p == '\0' || xisspace(p[5]))) {
+ struct uploadergroup *group;
+
+ p += 5;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ r = find_group(&group, u, &p, fbp, buffer);
+ if (RET_WAS_ERROR(r))
+ return r;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (strncmp(p, "add", 3) == 0) {
+ struct uploader *uploader;
+
+ p += 3;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ r = find_uploader(&uploader, u, p, fbp, buffer);
+ if (RET_WAS_ERROR(r))
+ return r;
+ r = include_group(group, &uploader->memberof, fbp);
+ if (RET_WAS_ERROR(r))
+ return r;
+ return RET_OK;
+ } else if (strncmp(p, "contains", 8) == 0) {
+ struct uploadergroup *member;
+
+ p += 8;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ q = p;
+ r = find_group(&member, u, &q, fbp, buffer);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (group == member) {
+ errorline(fbp,
+"cannot add group '%s' to itself!", member->name);
+ return RET_ERROR;
+ }
+ if (is_included_in(group, member)) {
+ /* perhaps offer a winning coupon for the first
+ * one triggering this? */
+ errorline(fbp,
+"cannot add group '%s' to group '%s' as the later is already member of the former!",
+ member->name, group->name);
+ return RET_ERROR;
+ }
+ r = include_group(group, &member->memberof, fbp);
+ if (RET_WAS_ERROR(r))
+ return r;
+ if (unset_pos(member->firstusedat))
+ set_position(member->firstusedat, fbp);;
+ if (!unset_pos(member->unusedat)) {
+ errorline(fbp,
+"cannot use group '%s' marked as unused!", member->name);
+ errorpos(member->unusedat,
+"here it got marked as unused.");
+ return RET_ERROR;
+ }
+ } else if (strncmp(p, "empty", 5) == 0) {
+ q = p + 5;
+ if (!unset_pos(group->emptyat)) {
+ errorline(fbp,
+"group '%s' marked as empty again", group->name);
+ errorpos(group->emptyat,
+"here it was marked empty the first time");
+ }
+ if (!unset_pos(group->firstmemberat)) {
+ errorline(fbp,
+"group '%s' cannot be marked empty as it already has members!",
+ group->name);
+ errorpos(group->firstmemberat,
+"here a member was added the first time");
+ return RET_ERROR;
+ }
+ set_position(group->emptyat, fbp);;
+ } else if (strncmp(p, "unused", 6) == 0) {
+ q = p + 6;
+ if (!unset_pos(group->unusedat)) {
+ errorline(fbp,
+"group '%s' marked as unused again!", group->name);
+ errorpos(group->unusedat,
+"here it was already marked unused");
+ }
+ if (!unset_pos(group->firstusedat)) {
+ errorline(fbp,
+"group '%s' cannot be marked unused as it was already used!", group->name);
+ errorpos(group->firstusedat,
+"here it was used the first time");
+ return RET_ERROR;
+ }
+ set_position(group->unusedat, fbp);;
+ } else {
+ errorcol(fbp, (int)(1 + p - buffer),
+"missing 'add', 'contains', 'unused' or 'empty' keyword.");
+ return RET_ERROR;
+ }
+ while (*q != '\0' && xisspace(*q))
+ q++;
+ if (*q != '\0') {
+ errorcol(fbp, (int)(1 + p - buffer),
+"unexpected data at end of group statement!");
+ return RET_ERROR;
+ }
+ return RET_OK;
+ }
+ if (strncmp(p, "allow", 5) != 0 || !xisspace(p[5])) {
+ errorcol(fbp, (int)(1 +p - buffer),
+"'allow' or 'group' keyword expected!"
+" (no other statement has yet been implemented)");
+ return RET_ERROR;
+ }
+ p+=5;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ r = parse_condition(fbp, (1+p-buffer), &p, &condition);
+ if (RET_WAS_ERROR(r))
+ return r;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (strncmp(p, "key", 3) == 0 && (p[3] == '\0' || xisspace(p[3]))) {
+ struct uploader *uploader;
+
+ p += 3;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ r = find_uploader(&uploader, u, p, fbp, buffer);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(&condition);
+ return r;
+ }
+ condition_add(&uploader->permissions, &condition);
+ } else if (strncmp(p, "group", 5) == 0
+ && (p[5] == '\0' || xisspace(p[5]))) {
+ struct uploadergroup *group;
+
+ p += 5;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ r = find_group(&group, u, &p, fbp, buffer);
+ assert (r != RET_NOTHING);
+ if (RET_WAS_ERROR(r)) {
+ uploadpermission_release(&condition);
+ return r;
+ }
+ assert (group != NULL);
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (*p != '\0') {
+ errorcol(fbp, (int)(1 + p - buffer),
+"unexpected data at end of group statement!");
+ uploadpermission_release(&condition);
+ return RET_ERROR;
+ }
+ if (unset_pos(group->firstusedat))
+ set_position(group->firstusedat, fbp);;
+ if (!unset_pos(group->unusedat)) {
+ errorline(fbp,
+"cannot use group '%s' marked as unused!", group->name);
+ errorpos(group->unusedat,
+"here it was marked as unused.");
+ uploadpermission_release(&condition);
+ return RET_ERROR;
+ }
+ condition_add(&group->permissions, &condition);
+ } else if (strncmp(p, "unsigned", 8) == 0
+ && (p[8]=='\0' || xisspace(p[8]))) {
+ p+=8;
+ if (*p != '\0') {
+ errorcol(fbp, (int)(1 + p - buffer),
+"unexpected data after 'unsigned' statement!");
+ uploadpermission_release(&condition);
+ return RET_ERROR;
+ }
+ condition_add(&u->unsignedpermissions, &condition);
+ } else if (strncmp(p, "any", 3) == 0 && xisspace(p[3])) {
+ p+=3;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (strncmp(p, "key", 3) != 0
+ || (p[3]!='\0' && !xisspace(p[3]))) {
+ errorcol(fbp, (int)(1 + p - buffer),
+"'key' keyword expected after 'any' keyword!");
+ uploadpermission_release(&condition);
+ return RET_ERROR;
+ }
+ p += 3;
+ if (*p != '\0') {
+ errorcol(fbp, (int)(1 + p - buffer),
+"unexpected data after 'any key' statement!");
+ uploadpermission_release(&condition);
+ return RET_ERROR;
+ }
+ condition_add(&u->anyvalidkeypermissions, &condition);
+ } else if (strncmp(p, "anybody", 7) == 0
+ && (p[7] == '\0' || xisspace(p[7]))) {
+ p+=7;
+ while (*p != '\0' && xisspace(*p))
+ p++;
+ if (*p != '\0') {
+ errorcol(fbp, (int)(1 + p - buffer),
+"unexpected data after 'anybody' statement!");
+ uploadpermission_release(&condition);
+ return RET_ERROR;
+ }
+ condition_add(&u->anybodypermissions, &condition);
+ } else {
+ errorcol(fbp, (int)(1 + p - buffer),
+"'key', 'unsigned', 'anybody' or 'any key' expected!");
+ uploadpermission_release(&condition);
+ return RET_ERROR;
+ }
+ return RET_OK;
+}
+
+static retvalue openfiletobeparsed(struct filebeingparsed *includedby, const char *filename, struct filebeingparsed **fbp_p, struct filebeingparsed **root_p) {
+ struct filebeingparsed *fbp;
+
+ if (includedby != NULL && includedby->depth > 100) {
+ errorcol(includedby, 0,
+"Too deeply nested include directives (> 100). Built some recursion?");
+ return RET_ERROR;
+ }
+
+ fbp = calloc(1, sizeof(struct filebeingparsed));
+ if (FAILEDTOALLOC(fbp))
+ return RET_ERROR_OOM;
+
+ fbp->filename = configfile_expandname(filename, NULL);
+ if (FAILEDTOALLOC(fbp->filename)) {
+ free(fbp);
+ return RET_ERROR_OOM;
+ }
+ fbp->f = fopen(fbp->filename, "r");
+ if (fbp->f == NULL) {
+ int e = errno;
+ fprintf(stderr, "Error opening '%s': %s\n",
+ fbp->filename, strerror(e));
+ print_include_trace(includedby);
+ free(fbp->filename);
+ free(fbp);
+ return RET_ERRNO(e);
+ }
+ fbp->depth = (includedby != NULL)?(includedby->depth+1):0;
+ fbp->includedby = includedby;
+ *fbp_p = fbp;
+ fbp->next = *root_p;
+ *root_p = fbp;
+ return RET_OK;
+}
+
+static void filebeingparsed_free(struct filebeingparsed *fbp) {
+ while (fbp != NULL) {
+ struct filebeingparsed *n = fbp->next;
+ if (fbp->f != NULL)
+ (void)fclose(fbp->f);
+ free(fbp->filename);
+ free(fbp);
+ fbp = n;
+ }
+}
+
+static inline retvalue close_file(struct filebeingparsed **p) {
+ int i;
+ struct filebeingparsed *fbp = *p;
+ assert (p != NULL);
+
+ *p = fbp->includedby;
+ i = fclose(fbp->f);
+ fbp->f = NULL;
+ if (i != 0) {
+ int e = errno;
+ fprintf(stderr, "Error reading '%s': %s\n",
+ fbp->filename, strerror(e));
+ print_include_trace(fbp->includedby);
+ return RET_ERRNO(e);
+ } else
+ return RET_OK;
+}
+
+static inline retvalue include_file(struct filebeingparsed **fbp_p, struct filebeingparsed **root_p, const char *buffer) {
+ const char *filename = buffer;
+
+ while (*filename != '\0' && xisspace(*filename))
+ filename++;
+ if (*filename == '\0') {
+ errorcol(*fbp_p, 1+(int)(filename - buffer),
+"Missing filename after include directive!");
+ return RET_ERROR;
+ }
+ return openfiletobeparsed(*fbp_p, filename, fbp_p, root_p);
+}
+
+static retvalue uploaders_load(/*@out@*/struct uploaders **list, const char *fname) {
+ char buffer[1025];
+ struct uploaders *u;
+ struct uploadergroup *g;
+ retvalue r;
+ struct filebeingparsed *fbp = NULL;
+ struct filebeingparsed *filesroot = NULL;
+
+ r = openfiletobeparsed(NULL, fname, &fbp, &filesroot);
+ if (RET_WAS_ERROR(r))
+ return r;
+
+ u = zNEW(struct uploaders);
+ if (FAILEDTOALLOC(u)) {
+ filebeingparsed_free(filesroot);
+ return RET_ERROR_OOM;
+ }
+ /* reject by default */
+ u->unsignedpermissions.type = uc_ALWAYS;
+ u->anyvalidkeypermissions.type = uc_ALWAYS;
+ u->anybodypermissions.type = uc_ALWAYS;
+
+ while (fbp != NULL) {
+ while (fgets(buffer, 1024, fbp->f) != NULL) {
+ fbp->lineno++;
+ if (!trim_line(fbp, buffer)) {
+ filebeingparsed_free(filesroot);
+ uploaders_free(u);
+ return RET_ERROR;
+ }
+ if (strncmp(buffer, "include", 7) == 0)
+ r = include_file(&fbp, &filesroot, buffer + 7);
+ else
+ r = parseuploaderline(buffer, fbp, u);
+ if (RET_WAS_ERROR(r)) {
+ filebeingparsed_free(filesroot);
+ uploaders_free(u);
+ return r;
+ }
+ }
+ r = close_file(&fbp);
+ if (RET_WAS_ERROR(r)) {
+ filebeingparsed_free(filesroot);
+ uploaders_free(u);
+ return r;
+ }
+ }
+ for (g = u->groups ; g != NULL ; g = g->next) {
+ if ((unset_pos(g->firstmemberat) && unset_pos(g->emptyat)) &&
+ !unset_pos(g->firstusedat))
+ errorpos(g->firstusedat,
+"Warning: group '%s' gets used but never gets any members",
+ g->name);
+ if ((unset_pos(g->firstusedat) && unset_pos(g->unusedat)) &&
+ !unset_pos(g->firstmemberat))
+ // TODO: avoid this if the group is from a include?
+ errorpos(g->firstmemberat,
+"Warning: group '%s' gets members but is not used in any rule",
+ g->name);
+ }
+ assert (fbp == NULL);
+ /* only free file information once filenames are no longer needed: */
+ filebeingparsed_free(filesroot);
+ *list = u;
+ return RET_OK;
+}
+
+retvalue uploaders_get(/*@out@*/struct uploaders **list, const char *filename) {
+ retvalue r;
+ struct uploaders *u;
+ size_t len;
+
+ assert (filename != NULL);
+
+ len = strlen(filename);
+ u = uploaderslists;
+ while (u != NULL && (u->filename_len != len ||
+ memcmp(u->filename, filename, len) != 0))
+ u = u->next;
+ if (u == NULL) {
+ r = uploaders_load(&u, filename);
+ if (!RET_IS_OK(r))
+ return r;
+ assert (u != NULL);
+ u->filename = strdup(filename);
+ if (FAILEDTOALLOC(u->filename)) {
+ uploaders_free(u);
+ return RET_ERROR_OOM;
+ }
+ u->filename_len = len;
+ u->next = uploaderslists;
+ u->reference_count = 1;
+ uploaderslists = u;
+ } else
+ u->reference_count++;
+ *list = u;
+ return RET_OK;
+}
diff --git a/uploaderslist.h b/uploaderslist.h
new file mode 100644
index 0000000..cd95a7f
--- /dev/null
+++ b/uploaderslist.h
@@ -0,0 +1,26 @@
+#ifndef REPREPRO_UPLOADERSLIST_H
+#define REPREPRO_UPLOADERSLIST_H
+
+
+struct upload_conditions;
+struct uploaders;
+
+enum upload_condition_type { uc_REJECTED = 0, uc_ALWAYS,
+/* uc_COMPONENT, */ uc_ARCHITECTURES,
+ uc_CODENAME,
+ uc_SOURCENAME, uc_SECTIONS, uc_BINARIES, uc_BYHAND };
+#define uc_ACCEPTED uc_ALWAYS
+
+retvalue uploaders_get(/*@out@*/struct uploaders **list, const char *filename);
+void uploaders_unlock(/*@only@*//*@null@*/struct uploaders *);
+
+struct signatures;
+retvalue uploaders_permissions(struct uploaders *, const struct signatures *, /*@out@*/struct upload_conditions **);
+
+/* uc_FAILED means rejected, uc_ACCEPTED means can go in */
+enum upload_condition_type uploaders_nextcondition(struct upload_conditions *);
+/* true means, give more if more to check, false means enough */
+bool uploaders_verifystring(struct upload_conditions *, const char *);
+bool uploaders_verifyatom(struct upload_conditions *, atom_t);
+
+#endif
diff --git a/valgrind.nodebug.supp b/valgrind.nodebug.supp
new file mode 100644
index 0000000..c88ea94
--- /dev/null
+++ b/valgrind.nodebug.supp
@@ -0,0 +1,66 @@
+{
+ stupid-db4.6
+ Memcheck:Param
+ pwrite64(buf)
+ obj:/lib/ld-2.7.so
+ fun:__os_io
+ obj:/usr/lib/libdb-4.6.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ stupid-db4.6-withlibc-dbg
+ Memcheck:Param
+ pwrite64(buf)
+ fun:pwrite64
+ fun:__os_io
+ obj:/usr/lib/libdb-4.6.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ stupid-db3
+ Memcheck:Param
+ pwrite64(buf)
+ fun:pwrite64
+ fun:__os_io
+ obj:/usr/lib/libdb-4.3.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ stupid-db3-withlibc-dbg
+ Memcheck:Param
+ pwrite64(buf)
+ fun:do_pwrite64
+ fun:__os_io
+ obj:/usr/lib/libdb-4.3.so
+ fun:__memp_bhwrite
+ fun:__memp_sync_int
+ fun:__memp_fsync
+ fun:__db_sync
+}
+{
+ libz-looking-far
+ Memcheck:Cond
+ obj:/usr/lib/libz.so.*
+ obj:/usr/lib/libz.so.*
+}
+{
+ gpgme11-gpgme_data_release_and_get_mem_leak
+ Memcheck:Leak
+ fun:calloc
+ obj:*/libgpgme.so.*
+}
+{
+ gpgme11-gpgme_data_release_and_get_mem_leak2
+ Memcheck:Leak
+ fun:calloc
+ obj:*/libgpgme.so.*
+}