From 6f3914852523d5ff539e07babffd6d2e1398866b Mon Sep 17 00:00:00 2001 From: Packit Date: Aug 19 2020 14:20:07 +0000 Subject: dnf-4.2.17 base --- diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..724f39a --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +*.pyc +*.pyo +*~ +.project +.pydevproject +asthelper.completions +.emacs.desktop* +TAGS +build/ +package/dnf.spec +dnf/const.py +bin/dnf*-2 +bin/dnf*-3 +bin/yum-2 +bin/yum-3 +*.cmake +*CMakeCache* diff --git a/.packit.yaml b/.packit.yaml new file mode 100644 index 0000000..f9a432d --- /dev/null +++ b/.packit.yaml @@ -0,0 +1,13 @@ +--- +upstream_project_name: dnf +specfile_path: dnf.spec +jobs: +- job: copr_build + trigger: pull_request + metadata: + targets: + - fedora-rawhide-x86_64 + - fedora-30-x86_64 + - fedora-29-x86_64 + - mageia-cauldron-x86_64 + - opensuse-tumbleweed-x86_64 diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..b75d4d0 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,96 @@ +---------------- +YUP AUTHORS +---------------- + Bryan Stillwell + Stephen Edie + Dan Burcaw + Troy Bengegerdes + + +---------------- +YUM AUTHORS +---------------- + + Seth Vidal + Jeremy Katz + Adrian Likins + Matthew Miller + Michael Stenner + Icon Riabitsev + Ryan Tomayko + Paul Nasrat + Robert G. Brown + Hollis Blanchard + Grigory Bakunov + Menno Smits + Gijs Hollestelle + Terje Rosten + Luke Macken + James Bowes + Tim Lauridsen + Florian Festi + Jack Neely + James Antill + Panu Matilainen + Tambet Ingo + Nick Jacek + +---------------- +DNF AUTHORS +---------------- + Ales Kozumplik + Elad Alfassa + Igor Gnatenko + Jan Silhan + Jaroslav Mracek + Jaroslav Rohel + Martin Hatina + Michael Mraka + Michal Domonkos + Michal Luscon + Panu Matilainen + Parag Nemade + Radek Holy + Tim Lauridsen + Ville Skyttä + Zdenek Pavlas + +---------------- +DNF CONTRIBUTORS +---------------- + Abhijeet Kasurde + Adam Salih + Adam Williamson + Albert Uchytil + Alberto Ruiz + Baurzhan Muftakhidinov + Christopher Meng + Daniel Mach + Dave Johansen + Dylan Pindur + Eduard Cuba + Frank Dana + George Machitidze + Haïkel Guémar + Kevin Kofler + Kushal Das + Lubomír Sedlář + Matt Sturgeon + Max Prokhorov + Michael Dunphy + Michael Scherer + Neal Gompa + Nathaniel McCallum + Olivier Andrieu + Padraig Brady + Pavel Grunt + Peter Hjalmarsson + Peter Simonyi + Petr Spacek + Rob Cutmore + Satoshi Matsumoto + Tomas Kasparek + Vladan Kudlac + Will Woods + Furkan Karcıoğlu diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..7355296 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,38 @@ +PROJECT (dnf NONE) +CMAKE_MINIMUM_REQUIRED (VERSION 2.4) + +INCLUDE (${CMAKE_SOURCE_DIR}/VERSION.cmake) + +SET( SYSCONFDIR /etc) +SET( SYSTEMD_DIR /usr/lib/systemd/system) + +IF (NOT PYTHON_DESIRED) + FIND_PACKAGE (PythonInterp REQUIRED) +ELSEIF (${PYTHON_DESIRED} STREQUAL "2") + FIND_PACKAGE (PythonInterp 2 EXACT REQUIRED) +ELSEIF (${PYTHON_DESIRED} STREQUAL "3") + FIND_PACKAGE (PythonInterp 3 EXACT REQUIRED) +ELSEIF (EXISTS ${PYTHON_DESIRED}) + SET (PYTHON_EXECUTABLE ${PYTHON_DESIRED}) + FIND_PACKAGE (PythonInterp REQUIRED) +ELSE () + MESSAGE (FATAL_ERROR "Invalid PYTHON_DESIRED value: " ${PYTHON_DESIRED}) +ENDIF() + +EXECUTE_PROCESS(COMMAND ${PYTHON_EXECUTABLE} -c "from sys import stdout; from distutils import sysconfig; stdout.write(sysconfig.get_python_lib())" OUTPUT_VARIABLE PYTHON_INSTALL_DIR) +MESSAGE(STATUS "Python install dir is ${PYTHON_INSTALL_DIR}") + +ADD_SUBDIRECTORY (dnf) +ADD_SUBDIRECTORY (bin) +ADD_SUBDIRECTORY (etc) +ADD_SUBDIRECTORY (doc) +ADD_SUBDIRECTORY (po) +ENABLE_TESTING() +ADD_SUBDIRECTORY (tests) + +CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/bin/dnf.in ${CMAKE_SOURCE_DIR}/bin/dnf-${PYTHON_VERSION_MAJOR} @ONLY) +CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/bin/dnf-automatic.in ${CMAKE_SOURCE_DIR}/bin/dnf-automatic-${PYTHON_VERSION_MAJOR} @ONLY) +CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/dnf/const.py.in ${CMAKE_SOURCE_DIR}/dnf/const.py @ONLY) +CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/dnf/cli/completion_helper.py.in ${CMAKE_SOURCE_DIR}/dnf/cli/completion_helper.py @ONLY) +CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/tests/modules/etc/dnf/repos.d/test.repo.in ${CMAKE_SOURCE_DIR}/tests/modules/etc/dnf/repos.d/test.repo @ONLY) +CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/tests/modules/etc/dnf/dnf.conf.in ${CMAKE_SOURCE_DIR}/tests/modules/etc/dnf/dnf.conf @ONLY) diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..d159169 --- /dev/null +++ b/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/PACKAGE-LICENSING b/PACKAGE-LICENSING new file mode 100644 index 0000000..206694f --- /dev/null +++ b/PACKAGE-LICENSING @@ -0,0 +1,14 @@ +All files in DNF are distributed as GPLv2+ with the exceptions below: + +rpm/transaction.py : GPL + +Licensing conditions of the following files were disputed when DNF was forked +from YUM: + +yum/misc.py +yum/parser.py + +In the yum-devel-list-thread below those concernes were resolved by YUM +maintainers who confirmed these files were covered by GPLv2+: + +http://lists.baseurl.org/pipermail/yum-devel/2012-July/009376.html diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..1f2d94a --- /dev/null +++ b/README.rst @@ -0,0 +1,107 @@ +############### + Dandified YUM +############### + +.. image:: https://raw.githubusercontent.com/rpm-software-management/dnf/gh-pages/logos/DNF_logo.png + +Dandified YUM (DNF) is the next upcoming major version of `YUM `_. It does package management using `RPM `_, `libsolv `_ and `hawkey `_ libraries. For metadata handling and package downloads it utilizes `librepo `_. To process and effectively handle the comps data it uses `libcomps `_. + +============ + Installing +============ + +DNF and all its dependencies are available in Fedora 18 and later, including the +rawhide Fedora. + +Optionally you can use repositories with DNF nightly builds for last 2 stable Fedora versions available at copr://rpmsoftwaremanagement/dnf-nightly. You can enable the repository e.g. using:: + + dnf copr enable rpmsoftwaremanagement/dnf-nightly + +Then install DNF typing:: + + sudo yum install dnf + +In other RPM-based distributions you need to build all the components from their +sources. + +====================== + Building from source +====================== + +All commands should be run from the DNF git checkout directory. + +To install the build dependencies:: + + sudo dnf builddep dnf.spec + +To build DNF:: + + mkdir build; + pushd build; + cmake ..; # add '-DPYTHON_DESIRED="3"' option for Python 3 build + make; + popd; + +To run DNF when compiled for Python2:: + + PYTHONPATH=`readlink -f .` bin/dnf-2 + +To run DNF when compiled for Python3:: + + PYTHONPATH=`readlink -f .` bin/dnf-3 + +If you want to build the manpages, use the option ``-DWITH_MAN=0`` with cmake. + +Man pages will be located in ``build/doc`` and can be read with ``man -l``, e.g:: + + man -l build/doc/dnf.8 + +============================= + Building and installing rpm +============================= + +From the DNF git checkout directory:: + + $ tito build --test --rpm + # dnf install /tmp/tito/noarch/* + +=============== + Running tests +=============== + +From the DNF git checkout directory:: + + mkdir build; + pushd build; + cmake .. && make ARGS="-V" test; + popd; + +============== + Contribution +============== + +Here's the most direct way to get your work merged into the project. + +1. Fork the project +#. Clone down your fork +#. Implement your feature or bug fix and commit changes +#. If you reported a bug or you know it fixes existing bug at `Red Hat bugzilla `_, append ``(RhBug:)`` to your commit message +#. In special commit add your name and email under ``DNF CONTRIBUTORS`` section in `authors file `_ as a reward for your generosity +#. Push the branch up to your fork +#. Send a pull request for your branch + +Please, do not create the pull requests with translation (.po) files improvements. Fix the translation on `Zanata `_ instead. + +=============== + Documentation +=============== + +The DNF package distribution contains man pages, dnf(8) and dnf.conf(8). It is also possible to `read the DNF documentation `_ online, the page includes API documentation. There's also a `wiki `_ meant for contributors to DNF and related projects. + +==================== + Bug reporting etc. +==================== + +Please report discovered bugs to the `Red Hat bugzilla `_ following this `guide `_. If you planned to propose the patch in the report, consider `Contribution`_ instead. + +Freenode's irc channel ``#yum`` is meant for discussions related to both YUM and DNF. Questions should be asked there, issues discussed. Remember: ``#yum`` is not a support channel and prior research is expected from the questioner. diff --git a/bin/CMakeLists.txt b/bin/CMakeLists.txt new file mode 100644 index 0000000..eb5c56f --- /dev/null +++ b/bin/CMakeLists.txt @@ -0,0 +1,2 @@ +INSTALL (PROGRAMS "dnf-${PYTHON_VERSION_MAJOR}" DESTINATION bin) +INSTALL (PROGRAMS "dnf-automatic-${PYTHON_VERSION_MAJOR}" DESTINATION bin) diff --git a/bin/dnf-automatic.in b/bin/dnf-automatic.in new file mode 100755 index 0000000..5b06aa2 --- /dev/null +++ b/bin/dnf-automatic.in @@ -0,0 +1,32 @@ +#!@PYTHON_EXECUTABLE@ +# dnf-automatic executable. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals +import os +import sys + +here = sys.path[0] +if here != '/usr/bin': + # git checkout + dnf_toplevel = os.path.dirname(here) + sys.path[0] = dnf_toplevel + +import dnf.automatic.main +sys.exit(dnf.automatic.main.main(sys.argv[1:])) diff --git a/bin/dnf.in b/bin/dnf.in new file mode 100755 index 0000000..645d0f0 --- /dev/null +++ b/bin/dnf.in @@ -0,0 +1,58 @@ +#!@PYTHON_EXECUTABLE@ +# The dnf executable script. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals +import sys + + +def suppress_keyboard_interrupt_message(): + """Prevent unsightly KeyboardInterrupt tracebacks. + + Nothing will be printed to the terminal after an uncaught + :class:`exceptions.KeyboardInterrupt`. + + """ + old_excepthook = sys.excepthook + + def new_hook(type, value, traceback): + if type != KeyboardInterrupt: + old_excepthook(type, value, traceback) + else: + pass + + sys.excepthook = new_hook + + +# do this ASAP to prevent tracebacks after ^C during imports +suppress_keyboard_interrupt_message() + +if __name__ != "__main__": + sys.stderr.write('The executable DNF module must not be imported.') + sys.exit(1) + +here = sys.path[0] +if here != '/usr/bin': + # git checkout + import os + dnf_toplevel = os.path.dirname(here) + sys.path[0] = dnf_toplevel + +from dnf.cli import main +main.user_main(sys.argv[1:], exit_code=True) diff --git a/dnf.spec b/dnf.spec new file mode 100644 index 0000000..e20be1e --- /dev/null +++ b/dnf.spec @@ -0,0 +1,2496 @@ +# default dependencies +%global hawkey_version 0.39.1 +%global libcomps_version 0.1.8 +%global libmodulemd_version 1.4.0 +%global rpm_version 4.14.0 + +# conflicts +%global conflicts_dnf_plugins_core_version 4.0.12 +%global conflicts_dnf_plugins_extras_version 4.0.4 +%global conflicts_dnfdaemon_version 0.3.19 + +# override dependencies for rhel 7 +%if 0%{?rhel} == 7 + %global rpm_version 4.11.3-32 +%endif + +%if 0%{?rhel} == 7 && 0%{?centos} + %global rpm_version 4.11.3-25.el7.centos.1 +%endif + +# override dependencies for fedora 26 +%if 0%{?fedora} == 26 + %global rpm_version 4.13.0.1-7 +%endif + + +%if 0%{?rhel} && 0%{?rhel} <= 7 +%bcond_with python3 +%else +%bcond_without python3 +%endif + +%if 0%{?rhel} >= 8 || 0%{?fedora} > 29 +# Disable python2 build +%bcond_with python2 +%else +%bcond_without python2 +%endif + +# YUM compat subpackage configuration +# +# level=full -> deploy all compat symlinks (conflicts with yum < 4) +# level=minimal -> deploy a subset of compat symlinks only +# (no conflict with yum >= 3.4.3-505)* +# level=preview -> minimal level with altered paths (no conflict with yum < 4) +# *release 505 renamed /usr/bin/yum to /usr/bin/yum-deprecated +%global yum_compat_level full +%global yum_subpackage_name yum +%if 0%{?fedora} + # Avoid file conflict with yum < 4 in all Fedoras + # It can be resolved by pretrans scriptlet but they are not recommended in Fedora + %global yum_compat_level minimal + %if 0%{?fedora} < 31 + # Avoid name conflict with yum < 4 + %global yum_subpackage_name %{name}-yum + %endif +%endif +%if 0%{?rhel} && 0%{?rhel} <= 7 + %global yum_compat_level preview + %global yum_subpackage_name nextgen-yum4 +%endif + +# paths +%global confdir %{_sysconfdir}/%{name} +%global pluginconfpath %{confdir}/plugins + +%if %{with python2} + %global py2pluginpath %{python2_sitelib}/%{name}-plugins +%endif + +%if %{with python3} + %global py3pluginpath %{python3_sitelib}/%{name}-plugins +%endif + +# Use the same directory of the main package for subpackage licence and docs +%global _docdir_fmt %{name} + + +%global pkg_summary Package manager +%global pkg_description Utility that allows users to manage packages on their systems. \ +It supports RPMs, modules and comps groups & environments. + +Name: dnf +Version: 4.2.17 +Release: 1%{?dist} +Summary: %{pkg_summary} +# For a breakdown of the licensing, see PACKAGE-LICENSING +License: GPLv2+ and GPLv2 and GPL +URL: https://github.com/rpm-software-management/dnf +Source0: %{url}/archive/%{version}/%{name}-%{version}.tar.gz +BuildArch: noarch +BuildRequires: cmake +BuildRequires: gettext +# Documentation +BuildRequires: systemd +BuildRequires: bash-completion +%if %{with python3} +BuildRequires: %{_bindir}/sphinx-build-3 +Requires: python3-%{name} = %{version}-%{release} +%else +BuildRequires: %{_bindir}/sphinx-build +Requires: python2-%{name} = %{version}-%{release} +%endif +%if 0%{?rhel} && 0%{?rhel} <= 7 +Requires: python-dbus +Requires: %{_bindir}/sqlite3 +%else +%if %{with python3} +Recommends: (python3-dbus if NetworkManager) +%else +Recommends: (python2-dbus if NetworkManager) +%endif +Recommends: (%{_bindir}/sqlite3 if bash-completion) +%endif +Provides: dnf-command(alias) +Provides: dnf-command(autoremove) +Provides: dnf-command(check-update) +Provides: dnf-command(clean) +Provides: dnf-command(distro-sync) +Provides: dnf-command(downgrade) +Provides: dnf-command(group) +Provides: dnf-command(history) +Provides: dnf-command(info) +Provides: dnf-command(install) +Provides: dnf-command(list) +Provides: dnf-command(makecache) +Provides: dnf-command(mark) +Provides: dnf-command(provides) +Provides: dnf-command(reinstall) +Provides: dnf-command(remove) +Provides: dnf-command(repolist) +Provides: dnf-command(repoquery) +Provides: dnf-command(repository-packages) +Provides: dnf-command(search) +Provides: dnf-command(updateinfo) +Provides: dnf-command(upgrade) +Provides: dnf-command(upgrade-to) +Conflicts: python2-dnf-plugins-core < %{conflicts_dnf_plugins_core_version} +Conflicts: python3-dnf-plugins-core < %{conflicts_dnf_plugins_core_version} +Conflicts: python2-dnf-plugins-extras < %{conflicts_dnf_plugins_extras_version} +Conflicts: python3-dnf-plugins-extras < %{conflicts_dnf_plugins_extras_version} + +%description +%{pkg_description} + +%package data +Summary: Common data and configuration files for DNF +Requires: libreport-filesystem +Obsoletes: %{name}-conf <= %{version}-%{release} +Provides: %{name}-conf = %{version}-%{release} + +%description data +Common data and configuration files for DNF + +%package -n %{yum_subpackage_name} +Requires: %{name} = %{version}-%{release} +Summary: %{pkg_summary} +%if 0%{?fedora} +%if 0%{?fedora} >= 31 +Provides: %{name}-yum = %{version}-%{release} +Obsoletes: %{name}-yum < 5 +%else +Conflicts: yum < 3.4.3-505 +%endif +%endif + +%description -n %{yum_subpackage_name} +%{pkg_description} + +%if %{with python2} +%package -n python2-%{name} +Summary: Python 2 interface to DNF +%{?python_provide:%python_provide python2-%{name}} +BuildRequires: python2-devel +BuildRequires: python2-hawkey >= %{hawkey_version} +BuildRequires: python2-libdnf >= %{hawkey_version} +BuildRequires: python2-libcomps >= %{libcomps_version} +BuildRequires: python2-libdnf +BuildRequires: python2-nose +BuildRequires: libmodulemd >= %{libmodulemd_version} +Requires: libmodulemd >= %{libmodulemd_version} +%if (0%{?rhel} && 0%{?rhel} <= 7) +BuildRequires: pygpgme +Requires: pygpgme +BuildRequires: python-enum34 +Requires: python-enum34 +%else +BuildRequires: python2-gpg +Requires: python2-gpg +BuildRequires: python2-enum34 +Requires: python2-enum34 +%endif +Requires: %{name}-data = %{version}-%{release} +%if 0%{?fedora} +Recommends: deltarpm +Recommends: python2-unbound +%endif +%if 0%{?centos} +Requires: deltarpm +%endif +Requires: python2-hawkey >= %{hawkey_version} +Requires: python2-libdnf >= %{hawkey_version} +Requires: python2-libcomps >= %{libcomps_version} +Requires: python2-libdnf +%if 0%{?rhel} && 0%{?rhel} <= 7 +BuildRequires: rpm-python >= %{rpm_version} +Requires: rpm-python >= %{rpm_version} +%else +BuildRequires: python2-rpm >= %{rpm_version} +Requires: python2-rpm >= %{rpm_version} +Recommends: rpm-plugin-systemd-inhibit +%endif +Conflicts: dnfdaemon < %{conflicts_dnfdaemon_version} + +%description -n python2-%{name} +Python 2 interface to DNF. +%endif +# ^ %%{with python2} + +%if %{with python3} +%package -n python3-%{name} +Summary: Python 3 interface to DNF +%{?python_provide:%python_provide python3-%{name}} +BuildRequires: python3-devel +BuildRequires: python3-hawkey >= %{hawkey_version} +BuildRequires: python3-libdnf >= %{hawkey_version} +BuildRequires: python3-libcomps >= %{libcomps_version} +BuildRequires: python3-libdnf +BuildRequires: libmodulemd >= %{libmodulemd_version} +Requires: libmodulemd >= %{libmodulemd_version} +BuildRequires: python3-nose +BuildRequires: python3-gpg +Requires: python3-gpg +Requires: %{name}-data = %{version}-%{release} +%if 0%{?fedora} +Recommends: deltarpm +%endif +%if 0%{?centos} +Requires: deltarpm +%endif +Requires: python3-hawkey >= %{hawkey_version} +Requires: python3-libdnf >= %{hawkey_version} +Requires: python3-libcomps >= %{libcomps_version} +Requires: python3-libdnf +BuildRequires: python3-rpm >= %{rpm_version} +Requires: python3-rpm >= %{rpm_version} +Recommends: python3-unbound +%if 0%{?rhel} && 0%{?rhel} <= 7 +Requires: rpm-plugin-systemd-inhibit +%else +Recommends: rpm-plugin-systemd-inhibit +%endif + +%description -n python3-%{name} +Python 3 interface to DNF. +%endif + +%package automatic +Summary: %{pkg_summary} - automated upgrades +BuildRequires: systemd +Requires: %{name} = %{version}-%{release} +%{?systemd_requires} + +%description automatic +Systemd units that can periodically download package upgrades and apply them. + + +%prep +%autosetup +mkdir build-py2 +mkdir build-py3 + + +%build +%if %{with python2} + pushd build-py2 + %cmake .. -DPYTHON_DESIRED:FILEPATH=%{__python2} + %make_build + make doc-man + popd +%endif + +%if %{with python3} + pushd build-py3 + %cmake .. -DPYTHON_DESIRED:FILEPATH=%{__python3} + %make_build + make doc-man + popd +%endif + + +%install +%if %{with python2} + pushd build-py2 + %make_install + popd +%endif + +%if %{with python3} + pushd build-py3 + %make_install + popd +%endif + +%find_lang %{name} +mkdir -p %{buildroot}%{confdir}/vars +mkdir -p %{buildroot}%{confdir}/aliases.d +mkdir -p %{buildroot}%{pluginconfpath}/ +mkdir -p %{buildroot}%{_sysconfdir}/%{name}/modules.d +mkdir -p %{buildroot}%{_sysconfdir}/%{name}/modules.defaults.d +%if %{with python2} +mkdir -p %{buildroot}%{py2pluginpath}/ +%endif +%if %{with python3} +mkdir -p %{buildroot}%{py3pluginpath}/__pycache__/ +%endif +mkdir -p %{buildroot}%{_localstatedir}/log/ +mkdir -p %{buildroot}%{_var}/cache/dnf/ +touch %{buildroot}%{_localstatedir}/log/%{name}.log +%if %{with python3} +ln -sr %{buildroot}%{_bindir}/dnf-3 %{buildroot}%{_bindir}/dnf +mv %{buildroot}%{_bindir}/dnf-automatic-3 %{buildroot}%{_bindir}/dnf-automatic +%else +ln -sr %{buildroot}%{_bindir}/dnf-2 %{buildroot}%{_bindir}/dnf +mv %{buildroot}%{_bindir}/dnf-automatic-2 %{buildroot}%{_bindir}/dnf-automatic +%endif +rm -vf %{buildroot}%{_bindir}/dnf-automatic-* + +# Strict conf distribution +%if 0%{?rhel} +mv -f %{buildroot}%{confdir}/%{name}-strict.conf %{buildroot}%{confdir}/%{name}.conf +%else +rm -vf %{buildroot}%{confdir}/%{name}-strict.conf +%endif + +# YUM compat layer +ln -sr %{buildroot}%{confdir}/%{name}.conf %{buildroot}%{_sysconfdir}/yum.conf +%if %{with python3} +ln -sr %{buildroot}%{_bindir}/dnf-3 %{buildroot}%{_bindir}/yum +%else +%if "%{yum_compat_level}" == "preview" +ln -sr %{buildroot}%{_bindir}/dnf-2 %{buildroot}%{_bindir}/yum4 +ln -sr %{buildroot}%{_mandir}/man8/dnf.8.gz %{buildroot}%{_mandir}/man8/yum4.8.gz +rm -f %{buildroot}%{_mandir}/man8/yum.8.gz +%else +ln -sr %{buildroot}%{_bindir}/dnf-2 %{buildroot}%{_bindir}/yum +%endif +%endif +%if "%{yum_compat_level}" == "full" +mkdir -p %{buildroot}%{_sysconfdir}/yum +ln -sr %{buildroot}%{pluginconfpath} %{buildroot}%{_sysconfdir}/yum/pluginconf.d +ln -sr %{buildroot}%{confdir}/protected.d %{buildroot}%{_sysconfdir}/yum/protected.d +ln -sr %{buildroot}%{confdir}/vars %{buildroot}%{_sysconfdir}/yum/vars +%endif + + +%check +%if %{with python2} + pushd build-py2 + ctest -VV + popd +%endif + +%if %{with python3} + pushd build-py3 + ctest -VV + popd +%endif + + +%post +%systemd_post dnf-makecache.timer + +%preun +%systemd_preun dnf-makecache.timer + +%postun +%systemd_postun_with_restart dnf-makecache.timer + + +%post automatic +%systemd_post dnf-automatic.timer +%systemd_post dnf-automatic-notifyonly.timer +%systemd_post dnf-automatic-download.timer +%systemd_post dnf-automatic-install.timer + +%preun automatic +%systemd_preun dnf-automatic.timer +%systemd_preun dnf-automatic-notifyonly.timer +%systemd_preun dnf-automatic-download.timer +%systemd_preun dnf-automatic-install.timer + +%postun automatic +%systemd_postun_with_restart dnf-automatic.timer +%systemd_postun_with_restart dnf-automatic-notifyonly.timer +%systemd_postun_with_restart dnf-automatic-download.timer +%systemd_postun_with_restart dnf-automatic-install.timer + + +%files -f %{name}.lang +%{_bindir}/%{name} +%if 0%{?rhel} && 0%{?rhel} <= 7 +%{_sysconfdir}/bash_completion.d/%{name} +%else +%dir %{_datadir}/bash-completion +%dir %{_datadir}/bash-completion/completions +%{_datadir}/bash-completion/completions/%{name} +%endif +%{_mandir}/man8/%{name}.8* +%{_mandir}/man8/yum2dnf.8* +%{_mandir}/man7/dnf.modularity.7* +%{_unitdir}/%{name}-makecache.service +%{_unitdir}/%{name}-makecache.timer +%{_var}/cache/%{name}/ + +%files data +%license COPYING PACKAGE-LICENSING +%doc AUTHORS README.rst +%dir %{confdir} +%dir %{confdir}/modules.d +%dir %{confdir}/modules.defaults.d +%dir %{pluginconfpath} +%dir %{confdir}/protected.d +%dir %{confdir}/vars +%dir %{confdir}/aliases.d +%exclude %{confdir}/aliases.d/zypper.conf +%config(noreplace) %{confdir}/%{name}.conf +%config(noreplace) %{confdir}/protected.d/%{name}.conf +%config(noreplace) %{_sysconfdir}/logrotate.d/%{name} +%ghost %attr(644,-,-) %{_localstatedir}/log/hawkey.log +%ghost %attr(644,-,-) %{_localstatedir}/log/%{name}.log +%ghost %attr(644,-,-) %{_localstatedir}/log/%{name}.librepo.log +%ghost %attr(644,-,-) %{_localstatedir}/log/%{name}.rpm.log +%ghost %attr(644,-,-) %{_localstatedir}/log/%{name}.plugin.log +%ghost %attr(755,-,-) %dir %{_sharedstatedir}/%{name} +%ghost %attr(644,-,-) %{_sharedstatedir}/%{name}/groups.json +%ghost %attr(755,-,-) %dir %{_sharedstatedir}/%{name}/yumdb +%ghost %attr(755,-,-) %dir %{_sharedstatedir}/%{name}/history +%{_mandir}/man5/%{name}.conf.5* +%{_tmpfilesdir}/%{name}.conf +%{_sysconfdir}/libreport/events.d/collect_dnf.conf + +%files -n %{yum_subpackage_name} +%if "%{yum_compat_level}" == "full" +%{_bindir}/yum +%{_sysconfdir}/yum.conf +%{_sysconfdir}/yum/pluginconf.d +%{_sysconfdir}/yum/protected.d +%{_sysconfdir}/yum/vars +%{_mandir}/man8/yum.8* +%{_mandir}/man5/yum.conf.5.* +%{_mandir}/man8/yum-shell.8* +%{_mandir}/man1/yum-aliases.1* +%config(noreplace) %{confdir}/protected.d/yum.conf +%else +%exclude %{_sysconfdir}/yum.conf +%exclude %{_sysconfdir}/yum/pluginconf.d +%exclude %{_sysconfdir}/yum/protected.d +%exclude %{_sysconfdir}/yum/vars +%exclude %{confdir}/protected.d/yum.conf +%exclude %{_mandir}/man5/yum.conf.5.* +%exclude %{_mandir}/man8/yum-shell.8* +%exclude %{_mandir}/man1/yum-aliases.1* +%endif + +%if "%{yum_compat_level}" == "minimal" +%{_bindir}/yum +%{_mandir}/man8/yum.8* +%endif + +%if "%{yum_compat_level}" == "preview" +%{_bindir}/yum4 +%{_mandir}/man8/yum4.8* +%exclude %{_mandir}/man8/yum.8* +%endif + +%if %{with python2} +%files -n python2-%{name} +%{_bindir}/%{name}-2 +%exclude %{python2_sitelib}/%{name}/automatic +%{python2_sitelib}/%{name}/ +%dir %{py2pluginpath} +%endif + +%if %{with python3} +%files -n python3-%{name} +%{_bindir}/%{name}-3 +%exclude %{python3_sitelib}/%{name}/automatic +%{python3_sitelib}/%{name}/ +%dir %{py3pluginpath} +%dir %{py3pluginpath}/__pycache__ +%endif + +%files automatic +%{_bindir}/%{name}-automatic +%config(noreplace) %{confdir}/automatic.conf +%{_mandir}/man8/%{name}-automatic.8* +%{_unitdir}/%{name}-automatic.service +%{_unitdir}/%{name}-automatic.timer +%{_unitdir}/%{name}-automatic-notifyonly.service +%{_unitdir}/%{name}-automatic-notifyonly.timer +%{_unitdir}/%{name}-automatic-download.service +%{_unitdir}/%{name}-automatic-download.timer +%{_unitdir}/%{name}-automatic-install.service +%{_unitdir}/%{name}-automatic-install.timer +%if %{with python3} +%{python3_sitelib}/%{name}/automatic/ +%else +%{python2_sitelib}/%{name}/automatic/ +%endif + +%changelog +* Mon Nov 25 2019 Aleš Matěj - 4.2.17-1 +- Enable versionlock for check-update command (RhBug:1750620) +- Add error message when no active modules matched (RhBug:1696204) +- Log mirror failures as warning when repo load fails (RhBug:1713627) +- dnf-automatic: Change all systemd timers to a fixed time of day (RhBug:1754609) +- DNF can use config from the remote location (RhBug:1721091) +- [doc] update reference to plugin documentation (RhBug:1706386) +- [yum compatibility] Report all packages in repoinfo +- [doc] Add definition of active/inactive module stream +- repoquery: Add a switch to disable modular excludes +- Report more informative messages when no match for argument (RhBug:1709563) +- [doc] Add description of excludes in dnf +- Report more descriptive message when removed package is excluded +- Add module repoquery command +- Fix assumptions about ARMv8 and the way the rpm features work (RhBug:1691430) +- Add Requires information into module info commands +- Enhance inheritance of transaction reasons (RhBug:1672618,1769788) + +* Thu Nov 14 2019 Aleš Matěj - 4.2.16-1 +- Make DNF compatible with FIPS mode (RhBug:1762032) +- Return always alphabetically sorted modular profiles +- Revert "Fix messages for starting and failing scriptlets" + +* Tue Nov 05 2019 Pavla Kratochvilova - 4.2.15-1 +- Fix downloading local packages into destdir (RhBug:1727137) +- Report skipped packages with identical nevra only once (RhBug:1643109) +- Restore functionality of dnf remove --duplicates (RhBug:1674296) +- Improve API documentation +- Document NEVRA parsing in the man page +- Do not wrap output when no terminal (RhBug:1577889) +- Allow to ship alternative dnf.conf (RhBug:1752249) +- Don't check if repo is expired if it doesn't have loaded metadata (RhBug:1745170) +- Remove duplicate entries from "dnf search" output (RhBug:1742926) +- Set default value of repo name attribute to repo id (RhBug:1669711) +- Allow searching in disabled modules using "dnf module provides" (RhBug:1629667) +- Group install takes obsoletes into account (RhBug:1761137) +- Improve handling of vars +- Do not load metadata for repolist commands (RhBug:1697472,1713055,1728894) +- Fix messages for starting and failing scriptlets (RhBug:1724779) +- Don't show older install-only pkgs updates in updateinfo (RhBug:1649383,1728004) +- Add --ids option to the group command (RhBug:1706382) +- Add --with_cve and --with_bz options to the updateinfo command (RhBug:1750528) + +* Thu Sep 19 2019 Pavla Kratochvilova - 4.2.11-1 +- Improve modularity documentation (RhBug:1730162,1730162,1730807,1734081) +- Fix detection whether system is running on battery (used by metadata caching timer) (RhBug:1498680) +- New repoquery queryformat: %{reason} +- Print rpm errors during test transaction (RhBug:1730348) +- Fix: --setopt and repo with dots +- Fix incorrectly marked profile and stream after failed rpm transaction check (RhBug:1719679) +- Show transaction errors inside dnf shell (RhBug:1743644) +- Don't reinstall modified packages with the same NEVRA (RhBug:1644241) +- dnf-automatic now respects versionlock excludes (RhBug:1746562) + +* Tue Aug 13 2019 Pavla Kratochvilova - 4.2.9-1 +- Prevent printing empty Error Summary (RhBug: 1690414) +- [doc] Add user_agent and countme options + +* Tue Aug 06 2019 Pavla Kratochvilova - 4.2.8-1 +- Enhance synchronization of rpm transaction to swdb +- Accept multiple specs in repoquery options (RhBug:1667898) +- Prevent switching modules in all cases (RhBug:1706215) +- [history] Don't store failed transactions as succeeded +- [history] Do not require root for informative commands +- [dnssec] Fix UnicodeWarning when using new rpm (RhBug:1699650) +- Print rpm error messages during transaction (RhBug:1677199) +- Report missing default profile as an error (RhBug:1669527) +- Apply excludes before modular excludes (RhBug:1709453) +- Improve help for command line arguments (RhBug:1659328) +- [doc] Describe a behavior when plugin is removed (RhBug:1700741) +- Add new modular API method ModuleBase.get_modules +- Mark features used by ansible, anaconda and subscription-manager as an API + +* Tue Jun 11 2019 Pavla Kratochvilova - 4.2.7-1 +- Set default to skip_if_unavailable=false (RhBug:1679509) +- Fix package reinstalls during yum module remove (RhBug:1700529) +- Fail when "-c" option is given nonexistent file (RhBug:1512457) +- Reuse empty lock file instead of stopping dnf (RhBug:1581824) +- Propagate comps 'default' value correctly (RhBug:1674562) +- Better search of provides in /(s)bin/ (RhBug:1657993) +- Add detection for armv7hcnl (RhBug:1691430) +- Fix group install/upgrade when group is not available (RhBug:1707624) +- Report not matching plugins when using --enableplugin/--disableplugin + (RhBug:1673289) (RhBug:1467304) +- Add support of modular FailSafe (RhBug:1623128) +- Replace logrotate with build-in log rotation for dnf.log and dnf.rpm.log + (RhBug:1702690) + +* Tue May 07 2019 Pavla Kratochvilova - 4.2.6-1 +- librepo: Turn on debug logging only if debuglevel is greater than 2 (RhBug:1355764,1580022) +- Fix issues with terminal hangs when attempting bash completion (RhBug:1702854) +- Rename man page from dnf.automatic to dnf-automatic to match command name +- [provides] Enhanced detecting of file provides (RhBug:1702621) +- [provides] Sort the output packages alphabetically + +* Thu Apr 25 2019 Pavla Kratochvilova - 4.2.5-1 +- Fix multilib obsoletes (RhBug:1672947) +- Do not remove group package if other packages depend on it +- Remove duplicates from "dnf list" and "dnf info" outputs +- Installroot now requires absolute path +- Fix the installation of completion_helper.py +- Allow globs in setopt in repoid part +- Fix formatting of message about free space required +- [doc] Add info of relation update_cache with fill_sack (RhBug:1658694) +- Fix installation failiure when duplicit RPMs are specified (RhBug:1687286) +- Add command abbreviations (RhBug:1634232) +- Allow plugins to terminate dnf (RhBug:1701807) + +* Wed Mar 27 2019 Pavla Kratochvilova - 4.2.2-1 +- [conf] Use environment variables prefixed with DNF_VAR_ +- Enhance documentation of --whatdepends option (RhBug:1687070) +- Allow adjustment of repo from --repofrompath (RhBug:1689591) +- Document cachedir option (RhBug:1691365) +- Retain order of headers in search results (RhBug:1613860) +- Solve traceback with the "dnf install @module" (RhBug:1688823) +- Build "yum" instead of "dnf-yum" on Fedora 31 + +* Mon Mar 11 2019 Pavla Kratochvilova - 4.2.1-1 +- Do not allow direct module switch (RhBug:1669491) +- Use improved config parser that preserves order of data +- Fix alias list command (RhBug:1666325) +- Postpone yum conflict to F31 +- Update documentation: implemented plugins; options; deprecated commands (RhBug:1670835,1673278) +- Support zchunk (".zck") compression +- Fix behavior of ``--bz`` option when specifying more values +- Follow RPM security policy for package verification +- Update modules regardless of installed profiles +- Add protection of yum package (RhBug:1639363) +- Fix ``list --showduplicates`` (RhBug:1655605) + +* Tue Feb 12 2019 Pavla Kratochvilova - 4.1.0-1 +- Allow to enable modules that break default modules (RhBug:1648839) +- Enhance documentation - API examples +- Add best as default behavior (RhBug:1670776,1671683) +- Add --nobest option + +* Wed Dec 12 2018 Jaroslav Mracek - 4.0.10-1 +- Updated difference YUM vs. DNF for yum-updateonboot +- Added new command ``dnf alias [options] [list|add|delete] [...]`` to allow the user to + define and manage a list of aliases +- Enhanced documentation +- Unifying return codes for remove operations +- [transaction] Make transaction content available for commands +- Triggering transaction hooks if no transaction (RhBug:1650157) +- Add hotfix packages to install pool (RhBug:1654738) +- Report group operation in transaction table +- [sack] Change algorithm to calculate rpmdb_version + +* Thu Nov 22 2018 Jaroslav Mracek - 4.0.9-1 +- Added dnf.repo.Repo.get_http_headers +- Added dnf.repo.Repo.set_http_headers +- Added dnf.repo.Repo.add_metadata_type_to_download +- Added dnf.repo.Repo.get_metadata_path +- Added dnf.repo.Repo.get_metadata_content +- Added --changelogs option for check-update command +- [module] Add information about active modules +- Hide messages created only for logging +- Enhanced --setopt option +- [module] Fix dnf remove @ +- [transaction] Make transaction content available for plugins + +* Mon Oct 15 2018 Jaroslav Mracek - 4.0.4-1 +- Update to 4.0.4 +- Add dnssec extension +- Set termforce to AUTO to automatically detect if stdout is terminal +- Repoquery command accepts --changelogs option (RhBug:1483458) +- Calculate sack version from all installed packages (RhBug:1624291) +- [module] Allow to enable module dependencies (RhBug:1622566) + +* Tue Sep 25 2018 Jaroslav Mracek - 3.6.1-1 +- [module] Improved module commands list, info +- [module] Reports error from module solver +- Fix: Error detected when calling 'RepoCB.fastestMirror' (RhBug:1628056) +- Preserve packages from other installed mod profiles (RhBug:1629841) +- [spec] Postpone conflict with yum to Fedora 30+ (RhBug:1600444) +- [cli] Install command recommends alternative packages (RhBug:1625586) +- [cli] Fix case insensitive hint (1628514) +- Fix installed profiles for module info (RhBug:1629689) +- Fix module provides not having consistent output (RhBug:1623866) +- Enhance label for transaction table (RhBug:1609919) +- Implement C_, the gettext function with a context (RhBug:1305340) +- Actually disambiguate some messages using C_ (RhBug:1305340) +- Restore 'strict' choice for group installs (#1461539) +- [repoquery] More strict queryformat parsing (RhBug:1631458) +- Redirect repo progress to std error (RhBug:1626011) +- Unify behavior of remove and module remove (RhBug:1629848) +- Change behavior of disabled module for module install (RhBug:1629711) +- Allow enablement on disabled plugin (RhBug:1614539) + +* Mon Sep 10 2018 Jaroslav Mracek - 3.5.1-1 +- [module] Fixed list and info subcommands + +* Fri Sep 07 2018 Jaroslav Mracek - 3.5.0-1 +- New implementation of modularity + +* Fri Aug 31 2018 Daniel Mach - 3.4.0-1 +- [history] Fix 'attempt to write a readonly database' error in addConsoleOutputLine(). +- [spec] Improve YUM v3 compat layer. +- [doc] document missing link from yum-rhn-plugin to dnf-plugin-spacewalk (RhBug:1580356) +- [doc] document difference between yum and dnf when listing packages (RhBug:1615834) +- [doc] document missing download functionality after transaction table is displayed (RhBug:1585140) +- [systemd] dnf-makecache.timer: move the ordering after network to .service +- [translations] Update translations from zanata. +- [cli] Fix 'already installed' message output. +- [module] change 'module_nsvp' to 'module_spec' +- [module] show module profiles without ', ...' +- [module] unify usability of RepoModuleDict.get_info*(); fix traceback +- [security] fix update count (RhBug:1585138) +- [cli] enable reposync to use --destdir (RhBug:1582152) +- [repo] Replace dnf.repo.Repo with libdnf implementation. +- [dnf] Limit DeprecationWarning to dnf.* modules only. + +* Mon Aug 13 2018 Daniel Mach - 3.3.0-1 +- [misc] Fallback to os.getuid() if /proc/self/loginuid can't be read (RhBug:1597005) +- [translations] Update translations from zanata. +- [doc] Update module documentation. +- [module] Fix `module provides` output. +- [module] Add `module reset` command. +- [module] Fix module disable command +- [repo] Improve error message on broken repo (RhBug:1595796) +- [doc] Enhance a command documentation (RhBug:1361617) +- [module] Automatically save module persistor in do_transaction(). +- [drpm] Fixed setting deltarpm_percentage=0 to switch drpm off +- [repo] Split base.download_packages into two functions +- [output] Use libdnf wrapper for smartcols +- [conf] Do not traceback on empty option (RhBug:1613577) + +* Tue Aug 07 2018 Daniel Mach - 3.2.0-1 +- [sack] Use module_platform_id option. +- [module] Switch module persistor to libdnf implementation. +- [module] Auto-enable module streams based on installed RPMs. +- [transaction] Fix: show packages from the current transaction. +- [conf] Convert any VectorString type to list. +- [module] Replace 'enabled' config option with 'state'. +- [install_specs] Do not exclude groups' packages +- [module] Use module sack filtering from libdnf +- [module] Many UX fixes. + +* Fri Jul 27 2018 Daniel Mach - 3.1.0-1 +- [module] Move 'hotfixes' conf option to libdnf and rename it to 'module_hotfixes'. +- [goal] Exclude @System repo packages from distro_sync. +- [conf] Setup configuration values using C++ bindings. +- [module] Drop module lock command. +- [crypto] Use handle from repo in dnf.crypto.retrieve(). +- [module] Assume a 'default' profile exists for all modules (RhBug:1568165) +- [base] Introduce easy installation of package, group and module specs. + +* Thu Jul 19 2018 Daniel Mach - 3.0.4-1 +- [transaction] Fix 'TransactionItem not found for key' error. +- [module] Allow removing module profile without specifying a stream. +- [module] Fix 'BaseCli' object has no attribute '_yumdb' error. +- [callback] Fix TransactionDisplay.PKG_ERASE redirect to a non-existing constant. +- [spec] Change yum compat package version to 4.0.version. +- [cache] Clean transaction temp files after successfull transaction +- [log] Log messages from libdnf logger +- [transaction] Add states to report rpm transaction progress +- [transaction] Cache TransactionItem during handling of RPM callback (RhBug:1599597) +- [systemd] dnf-makecache.timer: move to multi-user to fix loop + +* Thu Jul 12 2018 Martin Hatina - 3.0.3-1 +- Bug fix release + +* Fri Jun 29 2018 Jaroslav Mracek - 3.0.2-1 +- Update to 3.0.2-1 + +* Tue Jun 26 2018 Jaroslav Mracek - 3.0.1-1 +- Update to 3.0.1-1 +- Support of MODULES - new DNF command `module` +- Add attribute dnf.conf.Conf.proxy_auth_method +- New repoquery option `--depends` and `--whatdepends` +- Enhanced support of variables +- Enhanced documentation +- Resolves: rhbz#1565599 +- Resolves: rhbz#1508839 +- Resolves: rhbz#1506486 +- Resolves: rhbz#1506475 +- Resolves: rhbz#1505577 +- Resolves: rhbz#1505574 +- Resolves: rhbz#1505573 +- Resolves: rhbz#1480481 +- Resolves: rhbz#1496732 +- Resolves: rhbz#1497272 +- Resolves: rhbz#1488100 +- Resolves: rhbz#1488086 +- Resolves: rhbz#1488112 +- Resolves: rhbz#1488105 +- Resolves: rhbz#1488089 +- Resolves: rhbz#1488092 +- Resolves: rhbz#1486839 +- Resolves: rhbz#1486839 +- Resolves: rhbz#1486827 +- Resolves: rhbz#1486816 +- Resolves: rhbz#1565647 +- Resolves: rhbz#1583834 +- Resolves: rhbz#1576921 +- Resolves: rhbz#1270295 +- Resolves: rhbz#1361698 +- Resolves: rhbz#1369847 +- Resolves: rhbz#1368651 +- Resolves: rhbz#1563841 +- Resolves: rhbz#1387622 +- Resolves: rhbz#1575998 +- Resolves: rhbz#1577854 +- Resolves: rhbz#1387622 +- Resolves: rhbz#1542416 +- Resolves: rhbz#1542416 +- Resolves: rhbz#1496153 +- Resolves: rhbz#1568366 +- Resolves: rhbz#1539803 +- Resolves: rhbz#1552576 +- Resolves: rhbz#1545075 +- Resolves: rhbz#1544359 +- Resolves: rhbz#1547672 +- Resolves: rhbz#1537957 +- Resolves: rhbz#1542920 +- Resolves: rhbz#1507129 +- Resolves: rhbz#1512956 +- Resolves: rhbz#1512663 +- Resolves: rhbz#1247083 +- Resolves: rhbz#1247083 +- Resolves: rhbz#1247083 +- Resolves: rhbz#1519325 +- Resolves: rhbz#1492036 +- Resolves: rhbz#1391911 +- Resolves: rhbz#1391911 +- Resolves: rhbz#1479330 +- Resolves: rhbz#1505185 +- Resolves: rhbz#1305232 + +* Wed Oct 18 2017 Igor Gnatenko - 2.7.5-1 +- Improve performance for excludes and includes handling (RHBZ #1500361) +- Fixed problem of handling checksums for local repositories (RHBZ #1502106) +- Fix traceback when using dnf.Base.close() (RHBZ #1503575) + +* Mon Oct 16 2017 Jaroslav Mracek - 2.7.4-1 +- Update to 2.7.4-1 +- Enhanced performance for excludes and includes handling +- Solved memory leaks at time of closing of dnf.Base() +- Resolves: rhbz#1480979 - I thought it abnormal that dnf crashed. +- Resolves: rhbz#1461423 - Memory leak in python-dnf +- Resolves: rhbz#1499564 - dnf list installed crashes +- Resolves: rhbz#1499534 - dnf-2 is much slower than dnf-1 when handling groups +- Resolves: rhbz#1499623 - Mishandling stderr vs stdout (dnf search, dnf repoquery) + +* Fri Oct 06 2017 Igor Gnatenko - 2.7.3-1 +- Fix URL detection (RHBZ #1472847) +- Do not remove downloaded files with --destdir option (RHBZ #1498426) +- Fix handling of conditional packages in comps (RHBZ #1427144) + +* Mon Oct 02 2017 Jaroslav Mracek - 2.7.2-1 +- Update to 2.7.2-1 +- Added new option ``--comment=`` that adds a comment to transaction in history +- :meth:`dnf.Base.pre_configure_plugin` configure plugins by running their pre_configure() method +- Added pre_configure() methotd for plugins and commands to configure dnf before repos are loaded +- Resolves: rhbz#1421478 - dnf repository-packages: error: unrecognized arguments: -x rust-rpm-macros +- Resolves: rhbz#1491560 - 'dnf check' reports spurious "has missing requires of" errors +- Resolves: rhbz#1465292 - DNF remove protected duplicate package +- Resolves: rhbz#1279001 - [RFE] Missing dnf --downloaddir option +- Resolves: rhbz#1212341 - [RFE] Allow plugins to override the core configuration +- Resolves: rhbz#1299482 - mock --init fails with message "Failed calculating RPMDB checksum" +- Resolves: rhbz#1488398 - dnf upstream tests failures on f26 +- Resolves: rhbz#1192811 - dnf whatprovides should show which provides matched a pattern +- Resolves: rhbz#1288845 - "dnf provides" wildcard matching is unreliable (not all packages with matches listed) +- Resolves: rhbz#1473933 - [abrt] dnf-automatic: resolved(): rpm_conf.py:58:resolved:AttributeError: 'Rpmconf' object has no attribute '_interactive' +- Resolves: rhbz#1237349 - dnf autoremove not removing what dnf list extras shows +- Resolves: rhbz#1470050 - the 'priority=' option in /etc/yum.repos.d/*.repo is not respected +- Resolves: rhbz#1347927 - dnf --cacheonly downloads packages +- Resolves: rhbz#1478115 - [abrt] dnf: _hcmd_undo(): __init__.py:888:_hcmd_undo:IndexError: list index out of range +- Resolves: rhbz#1461171 - RFE: support --advisory= with install +- Resolves: rhbz#1448874 - "dnf needs-restarting" vanished from bash completion +- Resolves: rhbz#1495116 - Dnf version fails with traceback in container + +* Mon Aug 07 2017 Jaroslav Mracek 2.6.3-1 +- Fix problem with dnf.Package().remote_location() (RhBug:1476215) (Jaroslav + Mracek) +- Change behavior of -C according to documentation (RhBug:1473964) (Jaroslav + Mracek) +- It should prevent to ask attribute of None (RhBug:1359482) (Jaroslav Mracek) +- Solve a problems with --arch options (RhBug:1476834) (Jaroslav Mracek) +- Use security plugin code for dnf-automatic (Jaroslav Mracek) +- Fix unicode error for python2 (Jaroslav Mracek) +- Inform about packages installed for group (Jaroslav Mracek) +- Provide info if pkg is removed due to dependency (RhBug:1244755) (Jaroslav + Mracek) +- Unify format of %%{_mandir} paths in dnf.spec (Jaroslav Mracek) +- Remove test_yumlayer.py as unneeded test (Jaroslav Mracek) +- Provide yum4 package for rhel7 build (Jaroslav Mracek) +- Make yum compatible layer very minimal (RhBug:1476748) (Jaroslav Mracek) +- Remove metadata_expire from yum compatible layer (Jaroslav Mracek) +- Remove keepcache from yum compatibility layer (Jaroslav Mracek) +- Remove options from yum conf (Jaroslav Mracek) +- Remove unused functionality from yum compatible layer (Jaroslav Mracek) +- Add deplist command for dnf (Jaroslav Mracek) +- Fix problems with --downloaddir options (RhBug:1476464) (Jaroslav Mracek) +- Move description of --forcearch into proper place (Jaroslav Mracek) +- Provide description of --downloaddir option (Jaroslav Mracek) +- Fix if in spec file (Jaroslav Mracek) +- Add description of "test" tsflags (Jaroslav Mracek) +- Enable import gpg_keys with tsflag test (RhBug:1464192) (Jaroslav Mracek) +- Keep old reason when undoing erase (RhBug:1463107) (Eduard Čuba) +- spec: eliminate other weak dependencies for el<=7 (Igor Gnatenko) +- spec: do not strongly require inhibit plugin (Igor Gnatenko) +- Inform that packages are only downloaded (RhBug:1426196) (Jaroslav Mracek) +- Move releasever check after the etc/dnf/vars substitutions. (Alexander + Kanavin) +- Provide substitution for Repodict.add_new_repo() (RhBug:1457507) (Jaroslav + Mracek) + +* Mon Jul 24 2017 Jaroslav Mracek 2.6.2-1 +- Remove autodeglob optimization (Jaroslav Rohel) +- Integrate --destdir with --destdir from download plugin (Ondřej Sojka) +- Add CLI option --destdir (RhBug:1279001) (Ondřej Sojka) +- Add myself to the AUTHORS file (Nathaniel McCallum) +- Add the --forcearch CLI flag (Nathaniel McCallum) +- Add 'ignorearch' option (Nathaniel McCallum) +- Provide an API for setting 'arch' and 'basearch' (Nathaniel McCallum) +- Add nevra forms for repoquery command (Jaroslav Rohel) +- Fix UnicodeDecodeError during checkSig() on non UTF-8 locale (RhBug:1397848) + (Jaroslav Rohel) +- Add dnf option --noautoremove (RhBug:1361424) (Jaroslav Mracek) +- Add group argument for mark command (Jaroslav Mracek) +- Report problems for each pkg during gpgcheck (RhBug:1387925) (Jaroslav + Mracek) +- fix minor spelling mistakes (René Genz) +- Print warning when wrong delimiter in cache (RhBug:1332099) (Vítek Hoch) +- Fix the loading of config for dnf-automatic command_email (RhBug:1470116) + (Jaroslav Rohel) +- Enable download progress bar if redirected output (RhBug:1161950) (Jaroslav + Mracek) +- Support short abbrevations of commands (RhBug:1320254) (Vítek Hoch) +- Remove unused variables kwargs (Jaroslav Mracek) +- Not reinstall packages if install from repository-pkgs used (Jaroslav Mracek) +- bump dnf version to 2.6.0 (Igor Gnatenko) +- spec: use python2- prefix for hawkey (Igor Gnatenko) +- spec: use sphinx-build binary rather than package name (Igor Gnatenko) +- spec: python-bugzilla is not needed for building (Igor Gnatenko) +- spec: fix instructions about generating tarball (Igor Gnatenko) +- po: Update translations (Igor Gnatenko) +- Add an example of installation without weak-deps (RhBug:1424723) (Jaroslav + Mracek) +- Add detection if mirrorlist is used for metalink (Jaroslav Mracek) +- Rename variable (Jaroslav Mracek) +- Add --groupmember option to repoquery (RhBug:1462486) (Jaroslav Mracek) +- Check checksum for local repositories (RhBug:1314405) (Jaroslav Mracek) +- Spelling fixes (Ville Skyttä) +- repoquery --obsoletes prints obsoletes (RhBug:1457368) (Matěj Cepl) +- Provide pkg name hint for icase (RhBug:1339280) (RhBug:1138978) (Jaroslav + Mracek) +- Return only latest pkgs for "dnf list upgrades" (RhBug:1423472) (Jaroslav + Mracek) +- cleanup code not executed in case of exception (Marek Blaha) +- Allow to modify message for user confirmation (Jaroslav Mracek) +- Add autocheck_running_kernel config option (Štěpán Smetana) +- Inform about skipped packages for group install (RhBug:1427365) (Jaroslav + Mracek) +- Remove group remove unneeded pkgs (RhBug:1398871) (RhBug:1432312) (Jaroslav + Mracek) +- po: update translations (Igor Gnatenko) + +* Mon Jun 12 2017 Jaroslav Mracek 2.5.1-1 +- bump version to 2.5.1 + update release notes (Jaroslav Mracek) +- Fix: dnf update --refresh fails for repo_gpgcheck=1 (RhBug:1456419) (Daniel + Mach) +- Don't try to cut datetime message (Jaroslav Rohel) +- Use localized datetime format (RhBug:1445021) (Jaroslav Rohel) +- Work with locale date (Jaroslav Rohel) +- Use ISO 8601 time format in logfile (Jaroslav Rohel) +- Add unitest to prevent callbacks breakage (Jaroslav Mracek) +- Provide compatibility for tools that do not use total_drpms (Jaroslav Mracek) +- Requires strict usage of repoquery --recursive (Jaroslav Mracek) +- Fix output for --resolve with --installed for repoquery (Jaroslav Mracek) +- Remove unnecessary inheritance of yum conf options (Martin Hatina) +- Remove alwaysprompt option support (RhBug:1400714) (Jaroslav Rohel) +- Allow to install groups with multilib_policy=all (RhBug:1250702) (Jaroslav + Mracek) +- Redesign Base.install() to provide alternatives (Jaroslav Mracek) +- Report excludes includes into logger.debug (RhBug:1381988) (Jaroslav Mracek) +- Provide new API to parse string to NEVRA () (Jaroslav Mracek) +- Add more repoquery querytags (Jaroslav Rohel) +- Not hide tracebacks (Jaroslav Mracek) +- Solve error handling for get attr in yumdb (RhBug:1397848) (Jaroslav Mracek) +- Provide a better error if throttle to low (RhBug:1321407) (Jaroslav Mracek) +- Change timeout to 30s (RhBug:1291867) (Jaroslav Mracek) +- Add pre_transaction hook for plugins (Jaroslav Rohel) +- Not download metadata if "dnf history [info|list|userinstalled]" (Jaroslav + Mracek) +- Not download metadata if "dnf repo-pkgs list --installed" (Jaroslav + Mracek) +- Not download metadata if "dnf list --installed" (RhBug:1372895) (Jaroslav + Mracek) +- Format pkg str for repoquery --tree due to -qf (RhBug:1444751) (Jaroslav + Mracek) + +* Mon May 22 2017 Jaroslav Mracek 2.5.0-1 +- Update release notes (Jaroslav Mracek) +- Change documentation for history --userinstalled (RhBug:1370062) (Jaroslav + Mracek) +- Change example to install plugin using versionlock (Jaroslav Mracek) +- Remove unused method Goal.best_run_diff() (Jaroslav Mracek) +- Change recommendations if some problems appear (RhBug:1293067) (Jaroslav + Mracek) +- Report problems for goals with optional=True (Jaroslav Mracek) +- Format resolve problem messages in method in dnf.util (Jaroslav Mracek) +- Enhance reports about broken dep (RhBug:1398040)(RhBug:1393814) (Jaroslav + Mracek) +- search: do not generate error if not match anything (RhBug:1342157) (Jaroslav + Rohel) +- Check if any plugin is removed in transaction (RhBug:1379906) (Jaroslav + Mracek) +- Show progress for DRPM (RhBug:1198975) (Jaroslav Mracek) +- Fix disabledplugin option (Iavael) +- [history]: fixed info command merged output (Eduard Čuba) + +* Thu May 11 2017 Jaroslav Mracek 2.4.1-1 +- bump version to 2.4.1 + update release notes (Jaroslav Mracek) +- goal: do not mark weak dependencies as userinstalled (Igor Gnatenko) +- fix typo in supplements (RhBug:1446756) (Igor Gnatenko) +- Describe present behavior of installonly_limit conf option (Jaroslav Mracek) +- Reset all transaction for groups if Base.reset() (RhBug:1446432) (Jaroslav + Mracek) +- Explain how add negative num for --latest-limit (RhBug:1446641) (Jaroslav + Mracek) +- trivial: don't duplicate option names (Igor Gnatenko) +- Add support for --userinstalled for repoquery command (RhBug:1278124) + (Jaroslav Rohel) +- Fix header of search result sections (RhBug:1301868) (Jaroslav Rohel) +- Filter out src for get_best_selector (Jaroslav Mracek) +- Add minor changes in formating of documentation (Jaroslav Mracek) + +* Tue May 02 2017 Jaroslav Mracek 2.4.0-1 +- po: Update translations (Igor Gnatenko) +- po: Update translations (Igor Gnatenko) +- introduce '--enableplugin' option (Martin Hatina) +- Improve detection of file patterns (Jaroslav Mracek) +- Add method _get_nevra_solution() for subject (Jaroslav Mracek) +- Do not add "*" into query filter in _nevra_to_filters() (Jaroslav Mracek) +- Remove usage of nevra_possibilities_real() (Jaroslav Mracek) +- Increase performance for downgrade_to() (Jaroslav Mracek) +- Add additional keys for get_best_query() (Jaroslav Mracek) +- Increase performance for get_best_selector() (Jaroslav Mracek) +- Increase performance for get_best_query() (Jaroslav Mracek) +- Fix "Package" text translation (RhBug:1302935) (Jaroslav Rohel) +- Create a warning if releasever is None (Jaroslav Mracek) +- Adds cost, excludepkgs, and includepkgs to Doc (RhBug:1248684) (Jaroslav + Mracek) +- Change auto-detection of releasever in empty installroot (Jaroslav Mracek) +- Do not load system repo for makecache command (RhBug:1441636) (Jaroslav + Mracek) +- Do not raise assertion if group inst and rmv pkgs (RhBug:1438438) (Jaroslav + Mracek) +- yum layer using python3 (Martin Hatina) +- Filter url protocols for baseurl in Package.remote_location (Jaroslav Mracek) +- Add armv5tl to arm basearch (Neal Gompa) +- Setup additional parameters for handler for remote packages (Jaroslav Mracek) +- Use same method for user/password setting of every librepo.handle (Jaroslav + Mracek) +- Fix PEP8 violations and remove unused import (Jaroslav Mracek) +- Handle unknown file size in download progress (Jaroslav Mracek) +- Allow to delete cashed files from command line by clean command (Jaroslav + Mracek) +- Save command line packages into chachedir (RhBug:1256313) (Jaroslav Mracek) +- Add progress bar for download of commandline pkgs (RhBug:1161950) (Jaroslav + Mracek) +- Fix minor typo Closes: #781 Approved by: ignatenkobrain (Yuri Chornoivan) +- Mark unremoved packages as failed (RhBug:1421244) (Jaroslav Mracek) + +* Mon Apr 10 2017 Jaroslav Mracek 2.3.0-1 +- update release notes (Jaroslav Mracek) +- po: Update translations (Igor Gnatenko) +- Add require of subcommand for repo-pkgs command (Jaroslav Rohel) +- shell: Fix commands initialization (Jaroslav Rohel) +- po: Update translations (Igor Gnatenko) +- Add support for --location for repoquery command (RhBug:1290137) (Jaroslav + Mracek) +- Add support of --recursive with --resolve in repoquery (Jaroslav Mracek) +- Add --recursive option for repoquery (Jaroslav Mracek) +- Add --whatconflicts for repoquery (Jaroslav Mracek) +- Add support for multiple options for repoquery (Jaroslav Mracek) +- Add multiple format option for repoquery (Jaroslav Mracek) +- Fix problem with "dnf repoquery --querytags" (Jaroslav Mracek) +- Add support of 3 options into updateinfo command (Jaroslav Mracek) +- Add inheritance of reason for obsoleting packages (Jaroslav Mracek) +- Mark installonlypkgs correctly as user installed (RhBug:1349314) (Jaroslav + Mracek) +- Solve a problem with None names in callbacks (Jaroslav Mracek) +- Solve a problem for callbacks (Jaroslav Mracek) +- Revert "remove: CLI: --randomwait" (RhBug:1247122) (Ondřej Sojka) +- po: update translations (Igor Gnatenko) +- po: update translations (Igor Gnatenko) +- Set strings for translations (RhBug:1298717) (Jaroslav Mracek) + +* Mon Mar 27 2017 Jaroslav Mracek 2.2.0-1 +- bump version to 2.2.0 + update release notes (Jaroslav Mracek) +- Add documentation of new API callback actions (RhBug:1411432) (Jaroslav + Mracek) +- Fix python2 doesn't have e.__traceback__ attribute (Jaroslav Mracek) +- Do not report erasing package as None. (Jaroslav Mracek) +- Display scriplet for transaction (RhBug:1411423) (RhBug:1406130) (Jaroslav + Mracek) +- Add support for rpmcallbacks (Jaroslav Mracek) +- AUTHORS: updated (Jaroslav Rohel) +- Not show expiration check if no repo enabled (RhBug:1369212) (Jaroslav + Mracek) +- Fix changelog in dnf spec file (Jaroslav Mracek) +- po: update translations (Igor Gnatenko) +- Add myself (mhatina) to AUTHORS (Martin Hatina) +- po: Update translations (Igor Gnatenko) + +* Tue Mar 21 2017 Jaroslav Mracek 2.1.1-1 +- bump version to 2.1.1 + update release notes (Jaroslav Mracek) +- Sync the translation with locale (Jaroslav Rohel) +- Disable exceptions in logging (Jaroslav Rohel) +- Fix severity info in "updateinfo info" (Jaroslav Mracek) +- Add help for shell commands (Jaroslav Rohel) +- shell: no crash if missing args (Jaroslav Rohel) +- proper check of releasever, when using installroot (RhBug:1417542) (Martin + Hatina) +- Inform about "Cache was expired" with "dnf clean" (RhBug:1401446) (Jaroslav + Mracek) +- crypto: port to the official gpgme bindings (Igor Gnatenko) +- Fix doc example for `fill_sack` method (Lubomír Sedlář) +- po: update translations (Igor Gnatenko) +- Not try to install src package (RhBug:1416699) (Jaroslav Mracek) +- Add usage for add_new_repo() with repofrompath option (Jaroslav Mracek) +- Add new API add_new_repo() in RepoDict() (RhBug:1427132) (Jaroslav Mracek) +- docs: adds documentation for dnf-automatic's Command and CommandEmail + emitters. (rhn) +- docs: fixes typo in section description in automatic (rhn) +- Adds new emitters for dnf-automatic. (rhn) +- po: update translations (Igor Gnatenko) +- Ensure that callback will not kill dnf transaction (Jaroslav Mracek) +- Ensure that name will be not requested on None (RhBug:1397047) (Jaroslav + Mracek) +- Python 3.6 invalid escape sequence deprecation fix (Ville Skyttä) +- display severity information in updateinfo (#741) (Michael Mraka) +- po: update translations (Igor Gnatenko) +- Add --nodocs option for dnf (RhBug:1379628) (Jaroslav Mracek) +- Replace passive plugin noroot (Jaroslav Mracek) +- Fix incorrect formating of string for logger.info (Jaroslav Mracek) +- Not print help if empty line in script for shell command (Jaroslav Mracek) +- Run fill_sack after all repos have changed status (Jaroslav Mracek) +- Remove Hawkey object from repo if rerun of dnf.fill_sack (Jaroslav Mracek) +- util/on_metered_connection: be more polite to failures (Igor Gnatenko) +- cosmetic: i18n: rewording of 'Login user' (RhBug:1424939) (Jan Silhan) +- Fix problem with --whatprovides in repoquery (RhBug:1396992) (Jaroslav + Mracek) +- Add -a and --all option for repoquery (RhBug:1412970) (Jaroslav Mracek) +- Change camel-case of output of grouplist (Jaroslav Mracek) +- Minor correction in release notes (Jaroslav Mracek) +- Minor correction in release notes (Jaroslav Mracek) + +* Thu Feb 16 2017 Jaroslav Mracek 2.1.0-1 +- bump version to 2.1.0 + update release notes (Jaroslav Mracek) +- Fix problem with --recent option in repoquery (Jaroslav Mracek) +- Fix problem with duplicated --obsoletes (RhBug:1421835) (Jaroslav Mracek) +- Python 3.6 invalid escape sequence deprecation fixes (Ville Skyttä) +- Add --repoid as alias for --repo (Jaroslav Mracek) +- introduce dnf.base.Base.update_cache() (Martin Hatina) +- Try to install uninstalled packages if group installed (Jaroslav Mracek) +- Enable search of provides in /usr/(s)bin (RgBug:1421618) (Jaroslav Mracek) +- style: ignore E261 (Igor Gnatenko) +- makecache: do not run on metered connections (RhBug:1415711) (Igor Gnatenko) +- change '--disableplugins' to '--disableplugin' (Martin Hatina) +- cosmetic: removed unused import (Jan Silhan) +- show hint how to display why package was skipped (RhBug:1417627) (Jan Silhan) +- spec: add information how to obtain archive (Igor Gnatenko) +- fix messages (UX) (Jaroslav Rohel) +- zanata update (Jan Silhan) + +* Thu Feb 09 2017 Jaroslav Mracek 2.0.1-1 +- bump version to 2.0.1 + update release notes (Jaroslav Mracek) +- introduce cli 'obsoletes' option (Martin Hatina) +- swap tids if they are in wrong order (RhBug:1409361) (Michael Mraka) +- Disable shell command recursion (Jaroslav Rohel) +- Honor additional arguments for DNF shell repo list command (Jaroslav Rohel) +- don't traceback when bug title is not set (Michael Mraka) +- introducing list-security, info-security etc. commands (Michael Mraka) +- Add lsedlar to contributors list (Lubomír Sedlář) +- Return just name from Package.source_name (Lubomír Sedlář) +- introduce dnf.conf.config.MainConf.exclude() (Martin Hatina) +- systemd: Disable daemons on ostree-managed systems (Colin Walters) +- introduced dnf.base.Base.autoremove() (RhBug:1414512) (Martin Hatina) +- po: update translations (Igor Gnatenko) +- build: use relative directory for translations (Igor Gnatenko) +- Temporary eliminate a problem with install remove loop (Jaroslav Mracek) +- Handle info message when DRPM wastes data (RhBug:1238808) (Daniel + Aleksandersen) +- Fix output for better translation (RhBug:1386085) (Abhijeet Kasurde) +- yum layer refactored (Martin Hatina) +- return values changed to match yum's (Martin Hatina) +- Reword sentence after removing package (RhBug:1286553) (Abhijeet Kasurde) +- Minor documentation revisions (Mark Szymanski) +- Minor code fix (Abhijeet Kasurde) +- automatic: email emitter date header (David Greenhouse) +- Solve problem when no repo and only rpms with upgrade command (Jaroslav + Mracek) +- bash_completion: use system-python if it's available (Igor Gnatenko) +- spec: use system-python for dnf-yum as well (Igor Gnatenko) +- comps/groups: fix tests (Michal Luscon) +- comps: adjust group_upgrade to new CompsTransPkg style (Michal Luscon) +- groups: refactored installation (RhBug:1337731, RhBug:1336879) (Michal + Luscon) +- Increase requirement for hawkey (Jaroslav Mracek) +- Change reporting problems for downgradePkgs() (Jaroslav Mracek) +- Use selector for base.package_upgrade() (Jaroslav Mracek) +- Add usage of selectors for base.package_install() (Jaroslav Mracek) +- Use selector for base.package_downgrade() (Jaroslav Mracek) +- Redirect base.downgrade() to base.downgrade_to() (Jaroslav Mracek) +- Enable wildcard for downgrade command (RhBug:1173349) (Jaroslav Mracek) +- Refactor downgrade cmd behavior (RhBug:1329617)(RhBug:1283255) (Jaroslav + Mracek) +- Redirect logger.info into stderr for repolist (RhBug:1369411) (Jaroslav + Mracek) +- Redirect logger.info into stderr for repoquery (RhBug:1243393) (Jaroslav + Mracek) +- Add possibility for commands to redirect logger (Jaroslav Mracek) +- Put information about metadata expiration into stdout (Jaroslav Mracek) +- Change warning about added repo into info (RhBug:1243393) (Jaroslav Mracek) +- Move grouplist output from logger into stdout (Jaroslav Mracek) +- let repo exclude work the same way as global exclude (Michael Mraka) +- Fix wrong assumptions about metalinks (RhBug:1411349) (Jaroslav Mracek) +- handle --disablerepo/--enablerepo properly with strict (RhBug:1345976) + (Štěpán Smetana) +- Add fix to notify user about no repos (RhBug:1369212) (Abhijeet Kasurde) +- Add information about "hidden" option in dnf doc (RhBug:1349247) (Abhijeet + Kasurde) +- Fix for 'history package-list' (Amit Upadhye) +- Enable multiple args for repoquery -f (RhBug:1403930) (Jaroslav Mracek) +- Set default repo.name as repo._section (Jaroslav Mracek) +- Create from self.forms value forms in cmd.run() (Jaroslav Mracek) +- Add description of swap command into documentation (Jaroslav Mracek) +- Add swap command (RhBug:1403465) (RhBug:1110780) (Jaroslav Mracek) +- Solve a problem with shell when empty line or EOF (Jaroslav Mracek) +- shell: add history of commands (RhBug:1405333) (Michal Luscon) +- Add info if no files with repoquery -l (RhBug:1254879) (Jaroslav Mracek) +- po: update translations (Igor Gnatenko) +- po: migrate to zanata python client and trivial fixes in build (Igor + Gnatenko) +- po: include all possible languages from zanata (Igor Gnatenko) +- po: include comments for translations (Igor Gnatenko) +- shell: catch exceptions from depsolving (Michal Luscon) +- shell: update documentation (Michal Luscon) +- shell: add transaction reset cmd (Michal Luscon) +- shell: add transaction resolve cmd (Michal Luscon) +- shell: provide rewritable demands for cmds (Michal Luscon) +- shell: catch tracebacks from shlex (Michal Luscon) +- shell: handle ctrl+D more gracefully (Michal Luscon) +- groups: set demands in configure instead of run (Michal Luscon) +- shell: implement config cmd (Michal Luscon) +- shell: add help (Michal Luscon) +- shell: make alias repo list -> repolist (Michal Luscon) +- shell: catch exceptions from do_transaction (Michal Luscon) +- shell: resolve transaction in ts run (Michal Luscon) +- shell: add default value for internal methods argument (Michal Luscon) +- shell: create run alias for ts run (Michal Luscon) +- shell: add ts list cmd (Michal Luscon) +- shell: refill sack after every successful repo cmd (Michal Luscon) +- shell: allow running multiple transaction in one session (Michal Luscon) +- shell: add ts command (Michal Luscon) +- shell: catch cmd parsing and run exceptions (Michal Luscon) +- shell: allow to run scripts (Michal Luscon) +- shell: add repo cmd (Michal Luscon) +- shell: add resolving + transaction run support (Michal Luscon) +- shell: implement quit method (Michal Luscon) +- shell: add custom cmds stubs (Michal Luscon) +- shell: implement basic logic (Michal Luscon) +- shell: register new cmd (Michal Luscon) + +* Wed Dec 14 2016 Michal Luscon 2.0.0-1 +- tests: catch ModuleNotFoundError as well (Igor Gnatenko) +- Switch out automatic service for automatic-download and automatic-install + (Pat Riehecky) +- Make upgrade-to alias for upgrade (RhBug:1327999) (Jaroslav Mracek) +- skip appending an empty option (RhBug: 1400081) (Michael Mraka) +- Add description of nevra foems for commands and autoremove args (Jaroslav + Mracek) +- Add support of arguments nevra forms for autoremove command (Jaroslav Mracek) +- Add nevra forms for remove command (Jaroslav Mracek) +- Add nevra forms for install command (Jaroslav Mracek) +- add bin/yum into .gitignore (Michal Luscon) +- clean: acquire all locks before cleaning (RhBug:1293782) (Michal Luscon) +- Change hawkey version requirement (Jaroslav Mracek) +- Add information for translators (RhBug:1386078) (Jaroslav Mracek) +- Change info to warning for clean repoquery output (RhBug:1358245) (Jaroslav + Mracek) +- Add description of pkg flag for Query (RhBug:1243393) (Jaroslav Mracek) +- Add minor changes in documentation (Jaroslav Mracek) +- Do not always overwrite the name with the repo ID (Neal Gompa) + +* Fri Dec 02 2016 Martin Hatina 2.0.0-0.rc2.1 +- See http://dnf.readthedocs.io/en/latest/release_notes.html + +* Thu Sep 29 2016 Michal Luscon 2.0.0-0.rc1.1 +- See http://dnf.readthedocs.io/en/latest/release_notes.html + +* Thu Sep 08 2016 Igor Gnatenko - 1.1.10-2 +- Obsolete dnf-langpacks +- Backport patch for dnf repolist disabled + +* Thu Aug 18 2016 Igor Gnatenko - 1.1.10-1 +- Update to 1.1.10 + +* Tue Aug 09 2016 Igor Gnatenko - 1.1.9-6 +- Fix typo + +* Tue Aug 09 2016 Igor Gnatenko - 1.1.9-5 +- Also change shebang for %%{?system_python_abi} in %%{_bindir}/dnf + +* Tue Aug 09 2016 Igor Gnatenko - 1.1.9-4 +- Add %%{?system_python_abi} + +* Tue Jul 19 2016 Fedora Release Engineering - 1.1.9-3 +- https://fedoraproject.org/wiki/Changes/Automatic_Provides_for_Python_RPM_Packages + +* Tue May 24 2016 Michal Luscon 1.1.9-2 +- Revert "group: treat mandatory pkgs as mandatory if strict=true" (RhBug:1337731) +- enforce-api: reflect changes from #992475 in completion_helper (RhBug:1338504) +- enforce-api: add compatibility methods for renamed counterparts (RhBug:1338564) + +* Thu May 19 2016 Igor Gnatenko 1.1.9-1 +- doc: release notes 1.1.9 (Igor Gnatenko) +- spec: correctly set up requirements for python subpkg (Igor Gnatenko) +- spec: follow new packaging guidelines & make compatible with el7 (Igor + Gnatenko) +- zanata update (Jan Silhan) +- enforce-api: add missing bits of Base class (Michal Luscon) +- help: unify help msg strings (Michal Luscon) +- enforce-api: decorate Base class (Michal Luscon) +- util: add decorator informing users of nonapi functions (Michal Luscon) +- Added description for 'autoremove' in dnf help (RhBug:1324086) (Abhijeet + Kasurde) +- i18n: fixup for 0db13feed (Michal Luscon) +- i18n: use fallback mode if terminal does not support UTF-8 (RhBug:1332012) + (Michal Luscon) +- Revert "spec: follow new packaging guidelines & make compatible with el7" + (Michal Luscon) +- move autoglob feature directly to filterm() and filter() (Michael Mraka) +- group: treat mandatory pkgs as mandatory if strict=true (RhBug:1292892) + (Michal Luscon) +- locks: fix lock paths in tmpfsd config since cachedir has been changed + (Michal Luscon) +- remove formating from translation strings (Michal Luscon) +- base: set diskspace check filter before applying the filters (RhBug:1328674) + (Michal Luscon) +- order repos by priority and cost (Michael Mraka) +- spec: follow new packaging guidelines & make compatible with el7 (Igor + Gnatenko) +- bash-completion: first try to set fallback to BASH_COMPLETION_COMPATDIR (Igor + Gnatenko) +- updated copyrights for files changed this year (Michael Mraka) +- cli: fix warning from re.split() about non-empty pattern (RhBug:1286556) + (Igor Gnatenko) +- update authors file (Michal Luscon) +- Define __hash__ method for YumHistoryPackage (RhBug:1245121) (Max Prokhorov) + +* Tue Apr 05 2016 Michal Luscon 1.1.8-1 +- refactor: repo: add md_expired property (Michal Domonkos) +- test: fix cachedir usage in LocalRepoTest (Michal Domonkos) +- clean: operate on all cached repos (RhBug:1278225) (Michal Domonkos) +- refactor: repo: globally define valid repoid chars (Michal Domonkos) +- RepoPersistor: only write to disk when requested (Michal Domonkos) +- clean: remove dead subcommands (Michal Domonkos) +- doc: --best in case of problem (RhBug:1309408) (Jan Silhan) +- Added fix for correct error message for group info (RhBug:1209649) (Abhijeet + Kasurde) +- repo: don't get current timeout for librepo (RhBug:1272977) (Igor Gnatenko) +- doc: fix default timeout value (Michal Luscon) +- cli: inform only about nonzero md cache check interval (Michal Luscon) +- base: report errors in batch at the end of md downloading (Michal Luscon) +- repo: produce more sane error if md download fails (Michal Luscon) +- zanata update (RhBug:1322226) (Jan Silhan) +- doc: Fixed syntax of `assumeyes` and `defaultyes` ref lables in + `conf_ref.rst` (Matt Sturgeon) +- Fix output headers for dnf history command (Michael Dunphy) +- doc: change example of 'dnf-command(repoquery)' (Jaroslav Mracek) +- makacache.service: shorten journal logs (RhBug:1315349) (Michal Luscon) +- config: improve UX of error msg (Michal Luscon) +- Added user friendly message for out of range value (RhBug:1214562) (Abhijeet + Kasurde) +- doc: prefer repoquery to list (Jan Silhan) +- history: fix empty history cmd (RhBug:1313215) (Michal Luscon) +- Very minor tweak to the docs for `--assumeyes` and `--assumeno` (Matt + Sturgeon) + +* Thu Feb 25 2016 Michal Luscon 1.1.7-1 +- Add `/etc/distro.repos.d` as a path owned by the dnf package (Neal Gompa + (ニール・ゴンパ)) +- Change order of search and add new default repodirs (RhBug:1286477) (Neal + Gompa (ニール・ゴンパ)) +- group: don't mark available packages as installed (RhBug:1305356) (Jan + Silhan) +- history: adjust demands for particular subcommands (RhBug:1258503) (Michal + Luscon) +- Added extension command for group list (RhBug:1283432) (Abhijeet Kasurde) +- perf: dnf repository-packages upgrade (RhBug:1306304) (Jan Silhan) +- sack: Pass base.conf.substitutions["arch"] to sack in build_sack() function. + (Daniel Mach) +- build: make python2/3 binaries at build time (Michal Domonkos) +- fix dnf history traceback (RhBug:1303149) (Jan Silhan) +- cli: truncate expiration msg (RhBug:1302217) (Michal Luscon) + +* Mon Jan 25 2016 Michal Luscon 1.1.6-1 +- history: don't fail if there is no history (RhBug:1291895) (Michal Luscon) +- Allow dnf to use a socks5 proxy, since curl support it (RhBug:1256587) + (Michael Scherer) +- output: do not log rpm info twice (RhBug:1287221) (Michal Luscon) +- dnf owns /var/lib/dnf dir (RhBug:1294241) (Jan Silhan) +- Fix handling of repo that never expire (RhBug:1289166) (Jaroslav Mracek) +- Filter out .src packages when multilib_proto=all (Jeff Smith) +- Enable string for translation (RhBug:1294355) (Parag Nemade) +- Let logging format messages on demand (Ville Skyttä) +- clean: include metadata of local repos (RhBug:1226322) (Michal Domonkos) +- completion: Install to where bash-completion.pc says (Ville Skyttä) +- spec: bash completion is not a %%config file (Ville Skyttä) +- Change assertion handling for rpmsack.py (RhBug:1275878) (Jaroslav Mracek) +- cli: fix storing arguments in history (RhBug:1239274) (Ting-Wei Lan) + +* Thu Dec 17 2015 Michal Luscon 1.1.5-1 +- base: save group persistor only after successful transaction (RhBug:1229046) + (Michal Luscon) +- base: do not clean tempfiles after remove transaction (RhBug:1282250) (Michal + Luscon) +- base: clean packages that do not belong to any trans (Michal Luscon) +- upgrade: allow group upgrade via @ syntax (RhBug:1265391) (Michal Luscon) +- spec: Mark license files as %%license where available (Ville Skyttä) +- Remove unused imports (Ville Skyttä) +- Spelling fixes (Ville Skyttä) +- Fix typos in documentation (Rob Cutmore) +- parser: add support for braces in substitution (RhBug:1283017) (Dave + Johansen) +- completion_helper: Don't omit "packages" from clean completions (Ville + Skyttä) +- bash-completion: Avoid unnecessary python invocation per _dnf_helper (Ville + Skyttä) +- repo: Download drpms early (RhBug:1260421) (Ville Skyttä) +- clean: Don't hardcode list of args in two places (Ville Skyttä) +- cli: don't crash if y/n and sys.stdin is None (RhBug:1278382) (Adam + Williamson) +- sp err "environement" -> "environment" (Michael Goodwin) +- Remove -OO from #!/usr/bin/python (RhBug:1230820) (Jaroslav Mracek) +- cli: warn if plugins are disabled (RhBug:1280240) (Michal Luscon) + +* Mon Nov 16 2015 Michal Luscon 1.1.4-1 +- AUTHORS: updated (Jan Silhan) +- query: add compatibility methods (Michal Luscon) +- query: add recent, extras and autoremove methods to Query (Michal Luscon) +- query: add duplicated and latest-limit queries into api (Michal Luscon) +- format the email message with its as_string method (Olivier Andrieu) +- added dnf.i18n.ucd* functions as deprecated API (Jan Silhan) +- i18n: unicode resulting translations (RhBug:1278031) (Jan Silhan) +- po: get rid of new lines in translation (Jan Silhan) +- output: add skip count to summary (RhBug:1264032) (Michal Domonkos) +- groups: fix environment upgrade (Michal Luscon) +- Fix plural strings extraction (RhBug:1209056) (Baurzhan Muftakhidinov) +- po: fixed malformed beginning / ending (Jan Silhan) +- zanata update (Jan Silhan) +- cli: prevent tracebacks after C^ (RhBug:1274946) (Michal Luscon) + +* Wed Oct 14 2015 Michal Luscon 1.1.3-1 +- Update command_ref.rst (Jaroslav Mracek) +- Change in automatic.conf email settings to prevent email error with default + sender name (Jaroslav Mracek) +- Replace assert_called() with assert_called_with() for Py35 support (Neal + Gompa (ニール・ゴンパ)) +- doc: improve documentation (Jaroslav Mracek) +- doc: update the instructions related to nightly builds (Radek Holy) +- Revert "Add the continuous integration script" (Radek Holy) +- Revert "cosmetic: ci: fix the Copr name in the README" (Radek Holy) +- Fix typo in Command.canonical's doctring (Timo Wilken) +- base: group_install is able to exclude mandatory packages + (Related:RhBug:1199868) (Jan Silhan) + +* Wed Sep 30 2015 Michal Luscon 1.1.2-4 +- don't import readline as it causes crashes in Anaconda + (related:RhBug:1258364) + +* Tue Sep 22 2015 Michal Luscon 1.1.2-3 +- Revert "completion_helper: don't get IndexError (RhBug:1250038)" + +* Tue Sep 22 2015 Michal Luscon 1.1.2-2 +- add hawkey version requirement +- revert commit #70956 + +* Tue Sep 22 2015 Michal Luscon 1.1.2-1 +- doc: release notes 1.1.2 (Michal Luscon) +- sanitize non Unicode command attributes (RhBug:1262082) (Jan Silhan) +- don't redirect confirmation to stderr RhBug(1258364) (Vladan Kudlac) +- clean: add rpmdb to usage (Vladan Kudlac) +- completion_helper: don't get IndexError (RhBug:1250038) (Vladan Kudlac) +- add --downloadonly switch (RhBug:1048433) (Adam Salih) +- Add globbing support to base.by_provides() (RhBug:11259650) (Valentina + Mukhamedzhanova) +- spec: packaging python(3)-dnf according to new Fedora guidelines + (RhBug:1260198) (Jaroslav Mracek) +- Bug in Source0: URL in dnf.spec fixed (RhBug:126255) (Jaroslav Mracek) +- To dnf.spec added provides dnf-command(command name) for 21 dnf commands + (RhBug:1259657) (jmracek) +- Expire repo cache on failed package download (Valentina Mukhamedzhanova) +- cosmetic: ci: fix the Copr name in the README (Radek Holy) +- Add the continuous integration script (Radek Holy) +- Set proper charset on email in dnf-automatic (RhBug:1254982) (Valentina + Mukhamedzhanova) +- doc: improve configuration description (RhBug:1261766) (Michal Luscon) +- remove: show from which repo a package is (Vladan Kudlac) +- list: show from which repo a package is (RhBug:1234491) (Vladan Kudlac) +- Spelling/grammar fixes (Ville Skyttä) +- install: fix crash when terminal window is small (RhBug:1256531) (Vladan + Kudlac) +- install: mark unification of the progress bar (Vladan Kudlac) +- fix translations in python3 (RhBug:1254687) (Michal Luscon) +- group: CompsQuery now returns group ids (RhBug:1261656) (Michal Luscon) + +* Tue Sep 08 2015 Michal Luscon 1.1.1-2 +- fix access to demands (RhBug:1259194) (Jan Silhan) +- make clean_requiremets_on_remove=True (RhBug:1260280) (Jan Silhan) + +* Mon Aug 31 2015 Michal Luscon 1.1.1-1 +- Fixed typo (RhBug:1249319) (Adam Salih) +- fixed downgrade with wildcard (RhBug:1234763) (Adam Salih) +- reorganize logic of get_best_selector(s) and query (RhBug:1242946) (Adam + Salih) +- completion_helper: don't crash if exception occurred (RhBug:1225225) (Igor + Gnatenko) +- base: expire cache if repo is not available (Michal Luscon) +- Don't suggest --allowerasing if it is enabled (Christian Stadelmann) +- translation works in python3 (RhBug:1254687) (Jan Silhan) +- logrotate less often (RhBug:1247766) (Jan Silhan) +- implement dnf mark command (RhBug:1125925) (Michal Luscon) +- groups: use comps data to migrate persistor (Michal Luscon) +- groups: preserve api compatibility (Michal Luscon) +- groups: use persistor data for removing env/group (Michal Luscon) +- persistor: add migration and bump version (Michal Luscon) +- persistor: store name and ui_name of group (Michal Luscon) +- show real metadata timestamp on the server in verbose mode (Jan Silhan) +- lock: make rpmdb lock blocking (RhBug:1210289) (Michal Luscon) + +* Wed Aug 12 2015 Michal Luscon 1.1.0-2 +- update: installonly pkgs are not shown in both install and skipped section + (RhBug:1252415) (Jan Silhan) +- output: sort skipped packages (Jan Silhan) +- output: skipped conflicts are set (RhBug:1252032) (Jan Silhan) +- keep the dwongrading package installed if transaction fails (RhBug:1249379) + (Jan Silhan) +- don't store empty attributes (RhBug:1246928) (Michael Mraka) +- doc: correct dnf.conf man section (RhBug:1245349) (Michal Luscon) + +* Mon Aug 10 2015 Michal Luscon 1.1.0-1 +- print skipped pkg with broken deps too (Related:RhBug:1210445) (Jan Silhan) +- history: set commands output as default (RhBug:1218401) (Michal Luscon) +- Update es.po. save:guardar -> save:ahorrar (Máximo Castañeda) +- cosmetic: option arg in Base.*install is replaced with strict (Jan Silhan) +- group: don't fail on first non-existing group (Jan Silhan) +- install: skips local pkgs of lower version when strict=0 + (Related:RhBug:1227952) (Jan Silhan) +- install: skip broken/conflicting packages in groups when strict=0 (Jan + Silhan) +- install: skip broken/conflicting packages when strict=0 (Jan Silhan) +- implemented `strict` config option working in install cmd (RhBug:1197456) + (Jan Silhan) +- fixed 'dnf --quiet repolist' lack of output (RhBug:1236310) (Nick Coghlan) +- Add support for MIPS architecture (Michal Toman) +- package: respect baseurl attribute in localPkg() (RhBug:1219638) (Michal + Luscon) +- Download error message is not written on the same line as progress bar + anymore (RhBug: 1224248) (Adam Salih) +- dnf downgrade does not try to downgrade not installed packages (RhBug: + 1243501) (max9631) +- pkgs not installed due to rpm error are reported (RhBug:1207981) (Adam Salih) +- dnf install checks availability of all given packages (RhBug:1208918) (Adam + Salih) +- implemented install_weak_deps config option (RhBug:1221635) (Jan Silhan) +- ignore SIGPIPE (RhBug:1236306) (Michael Mraka) +- always add LoggingTransactionDisplay to the list of transaction displays + (RhBug:1234639) (Radek Holy) +- Add missing FILES section (RhBug: 1225237) (Adam Salih) +- doc: Add yum vs dnf hook information (RhBug:1244486) (Parag Nemade) +- doc: clarify the expected type of the do_transactions's display parameter + (Radek Holy) +- apichange: add dnf.cli.demand.DemandSheet.transaction_display (Radek Holy) +- apichange: add dnf.callback.TransactionProgress (Radek Holy) +- move the error output from TransactionDisplay into a separate class (Radek + Holy) +- rename TransactionDisplay.errorlog to TransactionDisplay.error (Radek Holy) +- report package verification as a regular RPM transaction event (Radek Holy) +- rename TransactionDisplay.event to TransactionDisplay.progress (Radek Holy) +- apichange: deprecate dnf.callback.LoggingTransactionDisplay (Radek Holy) +- use both CliTransactionDisplay and demands.transaction_display (Radek Holy) +- apichange: accept multiple displays in do_transaction (Radek Holy) +- support multiple displays in RPMTransaction (Radek Holy) + +* Fri Jul 31 2015 Michal Luscon 1.0.2-3 +- Fix regression in group list command introduced by 02c3cc3 (Adam Salih) +- AUTHORS: updated (Jan Silhan) +- stop saying "experimental" (Matthew Miller) + +* Tue Jul 21 2015 Jan Silhan 1.0.2-2 +- fixed python3 syntax error from f427aa2 (Jan Silhan) + +* Fri Jul 17 2015 Michal Luscon 1.0.2-1 +- give --allowerasing hint when error occurs during resolution (RhBug:1148630) + (Jan Silhan) +- show --best hint with skipped packages every time (RhBug:1176351) (Jan Silhan) +- notify about skipped packages when upgrade (RhBug:1210445) (Jan Silhan) +- dnf-automatic: Document apply_updates=no behavior wrt keepcache (Ville + Skyttä) +- persistor: share functionality of JSONDB (Jan Silhan) +- keepcache=0 persists packages till next successful transaction + (RhBug:1220074) (Jan Silhan) +- do not use releasever in cache path (related to RhBug:1173107) (Michael + Mraka) +- doc: add dnf list use case (Michal Luscon) +- repo: allow ntlm proxy auth (RhBug:1219199) (Michal Luscon) +- add a script which updates release notes (Radek Holy) +- doc: reverse the order of release notes (Radek Holy) +- completion_helper: fix tb if list XXX is not known arg (RhBug:1220040) (Igor + Gnatenko) +- configurable maximum number of parallel downloads (RhBug:1230975) (Igor + Gnatenko) +- add info to bash_completion (1nsan3) +- dnf upgrade does not try to upgrade uninstalled packages (RhBug: 1234763) + (Adam Salih) +- dnf group list now checks every package and prints out only invalid ones + (Adam Salih) +- install: return zero exit code if group is already installed (RhBug:1232815) + (Michal Luscon) +- doc: add -b which does the same as --best (Igor Gnatenko) +- support category groups (Michael Mraka) +- cli test update for repofrompath (Michael Mraka) +- documentation for --repofrompath (Michael Mraka) +- implemented --repofrompath option (RhBug:1113384) (Michael Mraka) +- doc: document filter provides and obsoletes (Michal Luscon) +- doc: extend --quiet explanation (RhBug:1133979) (Jan Silhan) +- fixed dnf-automatic email emitter unicode error (RhBug:1238958) (Jan Silhan) +- doc: be specific what 'available' means in list/info (Jan Silhan) +- cosmetic: fixed typo (RhBug:1238252) (Jan Silhan) +- groups: clean dependencies (Michal Luscon) +- groups: fix removing of env that contains previously removed group (Michal + Luscon) +- groups: fix removing of empty group (Michal Luscon) +- AUTHORS: updated (Jan Silhan) +- bash-completion: ignore sqlite3 user configuration (Peter Simonyi) +- Fix package name for rawhide .repo files (Frank Dana) +- Add 'transaction_display' to DemandSheet (Will Woods) +- translation: update (Jan Silhan) +- translation: use zanata instead of transifex (Jan Silhan) +- Updated Polish translation (Piotr Drąg) +- updated georgian translation (George Machitidze) +- group: fixed installing of already installed environment (Jan Silhan) +- conf: change minrate threshold to librepo default (RhBug:1212320) (Michal + Luscon) + +* Tue Jun 09 2015 Michal Luscon 1.0.1-2 +- conf: change minrate threshold to librepo default (RhBug:1212320) +- group: fixed installation of already installed environments + +* Tue Jun 09 2015 Michal Luscon 1.0.1-1 +- doc: document variables in repo conf (Michal Luscon) +- groups: temporary fix for group remove (RhBug:1214968) (Michal Luscon) +- group: print summary of marked groups / environments together at the end (Jan + Silhan) +- group: fixed marking as installed (RhBug:1222694) (Jan Silhan) +- doc: Spelling fixes (Ville Skyttä) +- dnf-automatic: Fix systemd service description (thanks Ville Skyttä) (Jan + Silhan) +- doc: assumeyes added to Base.conf and config option (Jan Silhan) +- optionparser: deleted --obsoletes option that conflicted with repoquery + plugin (Jan Silhan) +- dnf-automatic: Document emit_via default (Ville Skyttä) +- man: yum2dnf don;t show content (RhBug:1225246) (Thanks Adam Salih) (Jan + Silhan) +- doc: allowed chars of repo ID (Jan Silhan) +- doc: minimal repo config file (Jan Silhan) +- doc: configuration files replacement policy (Jan Silhan) +- fixed typo in man page (RhBug:1225168) (Michael Mraka) +- Update authors (Michal Luscon) +- dnf-automatic: add random_sleep option (RhBug:1213985) (Vladan Kudlac) +- don't print bug report statement when rpmdb is corrupted + (Related:RhBug:1225277) (Jan Silhan) +- comps: fix unicode issue (RhBug:1223932) (Thanks Parag) (Parag Nemade) +- logging: setup librepo log in verbose mode (Michal Luscon) +- doc: document the versioning scheme (Radek Holy) +- groups: end up empty group removal before solving (Michal Luscon) +- groups: end up empty installation before solving (RhBug:1223614) (Michal + Luscon) +- doc: add support for transactions/packages/ranges in "dnf history list" + (Radek Holy) +- doc: add support for transaction ranges in "dnf history info" (Radek Holy) +- support ssl client certificates (RhBug:1203661) (Michael Mraka) +- doc: document the "mirrorlist" configuration option (Radek Holy) +- doc: document the "metalink" configuration option (Radek Holy) +- doc: document the "baseurl" configuration option (Radek Holy) +- doc: document the "enabled" configuration option (Radek Holy) +- doc: document the "name" configuration option (Radek Holy) +- Revert "spec: added sqlite requirement" (Jan Silhan) +- spec: added sqlite requirement (Jan Silhan) +- cosmetic: fixed typo in comment (Jan Silhan) +- man: added reference to bug reporting guide (Jan Silhan) +- test: ignore user terminal width (Jan Silhan) +- cosmetic: base: import dnf.util.first (Jan Silhan) +- base.upgrade: inform user when pkg not installed and skipped (RhBug:1187741) + (Jan Silhan) +- disable buildtime c/c++ dependency (Michael Mraka) +- doc: document the new virtual provides (Radek Holy) +- AUTHORS: updated (Jan Silhan) +- AUTHORS: distuinguish authors and contributors (Jan Silhan) +- Create ka.po (George Machitidze) +- Parser: fix path handling (Haikel Guemar) +- doc: metadata_timer_sync checked every hour (Jan Silhan) + +* Wed Apr 29 2015 Michal Luscon 1.0.0-1 +- doc: release notes dnf-1.0.0 (Michal Luscon) +- completion: don't do aliases (RhBug:1215289) (Jan Silhan) +- use Sack.load_repo() instead of Sack.load_yum_repo() (Jan Silhan) +- Repo.name has default value of repo ID (RhBug:1215560) (Jan Silhan) +- cosmetic: get rid of user visible yum references (Jan Silhan) +- moved install_or_skip to dnf.comps (Jan Silhan) +- group: see already installed group during installation (RhBug:1199648) (Jan + Silhan) +- group: install_or_skip returns num of packages to install (Jan Silhan) +- group: made global function install_or_skip (Jan Silhan) +- AUTHORS: updated (Radek Holy) +- describe --refresh option in --help output (Pádraig Brady) +- better no such command message (RhBug:1208773) (Jan Silhan) +- doc: package-cleanup example doesn't print 'No match for argument:...' + garbage (Jan Silhan) +- mention yum check replacement (Michael Mraka) +- added ref to dnf list (Michael Mraka) +- added package-cleanup to dnf translation table (Michael Mraka) +- python3: Repo comparison (RhBug:1208018) (Jan Silhan) +- python3: YumHistoryRpmdbProblem comparison (RhBug:1207861) (Jan Silhan) +- python3: YumHistoryTransaction comparison (Jan Silhan) +- tests: use packages in test_transaction (Radek Holy) +- cosmetic: fix some Pylint errors (Radek Holy) +- updated documentation wrt installonlypkgs and auto removal (Michael Mraka) +- mark installonly packages always as userinstalled (RhBug:1201445) (Michael + Mraka) +- mark username/password as api (Michael Mraka) +- document username/password repo attributes (Michael Mraka) +- support HTTP basic auth (RhBug:1210275) (Michael Mraka) +- cli: better metadata timestamp info (Michal Luscon) +- repo: add metadata mirror failure callback (Michal Luscon) +- dnf-yum: cosmetic: lower case after comma (Jan Silhan) +- dnf-yum: print how to install migrate plugin (Jan Silhan) +- doc: show the real package for each tool in dnf-plugins-extras (Tim + Lauridsen) +- doc: improve the documentation of repo costs (Radek Holy) +- doc: fix debuginfo-install package name (Michal Luscon) +- doc: release notes 0.6.5 (Michal Luscon) +- bash-completion: allow only one subcmd for help (Igor Gnatenko) +- bash-completion: add history completion (Igor Gnatenko) +- bash-completion: add completion for help (Igor Gnatenko) +- bash-completion: check where pointing bin/dnf (Igor Gnatenko) +- bash-completion: implement completion for clean cmd (Igor Gnatenko) +- bash_completion: implement downgrade command (Igor Gnatenko) +- bash-completion: refactor to python helper (Igor Gnatenko) +- command downgrade does downgrade_to (RhBug:1191275) (Jan Silhan) +- AUTHORS: updated (Jan Silhan) +- clean: 'dnf clean all' should also clean presto and updateinfo solvx files + (Parag Nemade) +- dnf-yum: modified warning message (RhBug:1207965) (Jan Silhan) + +* Tue Mar 31 2015 Michal Luscon 0.6.5-1 +- subject: expand every glob name only once (RhBug:1203151) (Michal Luscon) +- group mark: skips already installed groups (Jan Silhan) +- Merge pull request #246 from mluscon/yum2dnf (mluscon) +- Add yum2dnf man page (Michal Luscon) +- doc: extend cli_vs_yum (Michal Luscon) +- dnf-yum package does not conflict with yum 3.4.3-505+ (Jan Silhan) +- fixed double set of demand from 0e4276f (Jan Silhan) +- group: remove cmd don't load available_repos, see 04da412 (Jan Silhan) +- spec: /var/lib/dnf owned by dnf-conf (Jan Silhan) +- spec: apply the weak dependencies only on F21+ (Radek Holy) +- dnf-automatic: fixed python_sitelib (RhBug:1199450) (Jan Silhan) +- Add release instructions (Michal Luscon) +- setup tito to bump version in VERSION.cmake (Michal Luscon) +- initialize to use tito (Michal Luscon) +- prepare repo for tito build system (Michal Luscon) +- spec: recommends bash-completion (RhBug:1190671) (Jan Silhan) +- completion: work with just python(3)-dnf (Jan Silhan) +- spec: move necessary files inside python(3) subpackages (RhBug:1191579) (Jan Silhan) +- bash-completion: use python method to get commands (RhBug:1187579) (Igor Gnatenko) +- api: exposed pluginconfpath main config (RhBug:1195325) (Jan Silhan) +- updated AUTHORS (Jan Silhan) +- add reinstall to bash_completion (Alberto Ruiz) +- added new packages to @System for duplicated query test (Michael Mraka) +- test for duplicated, installonly and latest_limit pkgs (Michael Mraka) +- tests for autoremove, extras and recent pkgs (Michael Mraka) +- moved push_userinstalled from base to goal (Michael Mraka) +- filter or skip 'n' latest packages (Michael Mraka) +- moved recent to query (Michael Mraka) +- moved autoremove to query (Michael Mraka) +- moved extras list to query (Michael Mraka) +- create query for installonly packages (Michael Mraka) +- create query for duplicated packages (Michael Mraka) +- cosmetic: base: fixed pylint warnings (Jan Silhan) +- do transaction cleanup after plugin hook (RhBug:1185977) (Michal Luscon) +- base: extend download lock (RhBug:1157233) (Michal Luscon) +- lock: output meaningful error for malformed lock file (Michal Luscon) +- util: fix race condition in ensure_dir() (Michal Luscon) +- lock: switch metadata lock to blocking mode (Michal Luscon) +- install nonmandatory group packages as optional (Related:RhBug:1167881) (Michal Luscon) +- remove command deletes whole dependency tree (RhBug:1154202) (Jan Silhan) +- cmd list takes as parameter, revert of 526e674 (Jan Silhan) +- spec: own /var/lib/dnf directory (RhBug:1198999) (Jan Silhan) +- transifex update (Jan Silhan) +- doc: fixed systemd execution of dnf-automatic (Jan Silhan) +- doc: how to run dnf-automatic (RhBug:1195240) (Jan Silhan) +- cosmetic: added forgotten :api mark from 05b03fc (Jan Silhan) +- api: exposed Repo.skip_if_unavailable config (RhBug:1189083) (Jan Silhan) +- updated documentation for 'dnf list autoremove' (Michael Mraka) +- reuse list_autoremove() in autoremove command (Michael Mraka) +- function for autoremove package list (Michael Mraka) +- implemented dnf list autoremove (Michael Mraka) +- exclude not documented history subcommands (RhBug:1193914,1193915) (Jan Silhan) +- better file pattern recognition (RhBug:1195385) (Jan Silhan) +- spec: fix Obsoletes of the new DNF (Radek Holy) +- remove boot only constraint and add missing download lock (Michal Luscon) +- util: remove unused user_run_dir() function (Michal Luscon) +- lock: change the destination folder of locks to allow suided programs work properly (RhBug:1195661) (Michal Luscon) +- install dnf-3 only when python3 is enabled (thanks glensc) (Jan Silhan) +- fixed unicode Download error (RhBug:1190458) (Jan Silhan) +- log: print metadata age along with timestamp (Petr Spacek) +- cli: fix double expansion of cachedir (RhBug:1194685) (Michal Luscon) +- removed unused dnf-makecache.cron (Jan Silhan) +- renamed erase command to remove (RhBug:1160806) (Jan Silhan) +- spec: made python3-dnf package installed by default in f23 (Jan Silhan) +- AUTHORS: changed email address (Jan Silhan) +- doc: improve the documentation of the "install" command (Radek Holy) +- "dnf install non-existent" should fail (Radek Holy) +- tests: add some tests of Base.install (Radek Holy) +- tests: add some tests of Base.package_install (Radek Holy) +- Revert "doesn't upgrade packages by installing local packages" (RhBug:1160950) (Radek Holy) +- lint: fix all Pylint errors in test_install (Radek Holy) +- tests: add some tests to test_install (Radek Holy) +- tests: improve some tests in test_install (Radek Holy) +- cosmetic: reorder tests in test_install (Radek Holy) +- cosmetic: rename some tests in test_install and add some docstrings (Radek Holy) +- AUTHORS: updated (Jan Silhan) +- Add support for armv6hl (Peter Hjalmarsson) +- doc: subject.__init__(): what is pkg_spec (Jan Silhan) +- doc: mentioning raising IOError from Base.fill_sack() (Jan Silhan) +- option_parser: fixed splitting multiple values (RhBug:1186710) (Jan Silhan) +- AUTHORS: updated (Jan Silhan) +- Standardize words describing boolean data type (Christopher Meng) + +* Wed Feb 4 2015 Jan Silhan - 0.6.4-1 +- Adapt to librepo-1.7.13, metalink and mirrorlist are not loaded anymore when the repo is local. (Radek Holy) +- not raises value error when no metadata exist (Jan Silhan) +- Remove lock files during boot (RhBug:1154476) (Michal Luscon) +- doc: groups are ordered not categories (Jan Silhan) +- doc: added Package attributes to API (Jan Silhan) +- README: link to bug reporting guide (Jan Silhan) +- README: the official documentation is on readthedoc (Jan Silhan) +- i18n: unicode encoding does not throw error (RhBug:1155877) (Jan Silhan) +- conf: added minrate repo option (Related:RhBug:1175466) (Jan Silhan) +- conf: added timeout repo option (RhBug:1175466) (Jan Silhan) +- doc: api_queries: add 'file' filter description (RhBug:1186461) (Igor Gnatenko) +- doc: documenting enablegroups (Jan Silhan) +- log: printing metadata timestamp (RhBug:1170156) (Jan Silhan) +- base: setup default cachedir value (RhBug:1184943) (Michal Luscon) +- orders groups/environments by display_order tag (RhBug:1177002) (Jan Silhan) +- no need to call create_cmdline_repo (Jan Silhan) +- base: package-spec matches all packages which the name glob pattern fits (RhBug:1169165) (Michal Luscon) +- doc: move dnf.conf to appropriate man page section (RhBug:1167982) (Michal Luscon) +- tests: add test for blocking process lock (Michal Luscon) +- lock: fix several race conditions in process lock mechanism (Michal Luscon) +- base: use blocking process lock during download phase (RhBug:1157233) (Michal Luscon) +- Update the Source0 generation commands in dnf.spec.in file (Parag Nemade) +- Enhancement to dnf.spec.in file which follows current fedora packaging guidelines (Parag Nemade) +- doc: add some examples and documentation of the core use case (RhBug:1138096) (Radek Holy) +- bash-completion: enable downgrading packages for local files (RhBug:1181189) (Igor Gnatenko) +- group: prints plain package name when package not in any repo (RhBug:1181397) (Jan Silhan) +- spec: own __pycache__ for python 3 (Igor Gnatenko) +- changed hawkey.log dir to /var/log (RhBug:1175434) (Jan Silhan) +- bash-completion: handle sqlite errors (Igor Gnatenko) +- use LANG=C when invoking 'dnf help' and 'sed' with regular expressions (Jakub Dorňák) +- spec: own __pycache__ directory for py3 (Igor Gnatenko) +- doc: mentioning Install command accepts path to local rpm package (Jan Silhan) +- groups: in erase and install cmd non-existent group does not abort transaction (Jan Silhan) +- doc: running tests in README (Jan Silhan) +- api: transaction: added install_set and remove_set (RhBug:1162887) (Jan Silhan) +- cosmetic: fixed some typos in documentation (Jan Silhan) +- groups: environments described after @ sign works (RhBug:1156084) (Jan Silhan) +- own /etc/dnf/protected.d (RhBug:1175098) (Jan Silhan) +- i18n: computing width of char right (RhBug:1174136) (Jan Silhan) +- cosmetic: renamed _splitArg -> _split_arg (Jan Silhan) +- conf: removed include name conflict (RhBug:1055910) (Jan Silhan) +- output: removed unpredictable decision based on probability introduced in ab4d2c5 (Jan Silhan) +- output: history list is not limited to 20 records (RhBug:1155918) (Jan Silhan) +- doc: referenced forgotten bug fix to release notes (Jan Silhan) +- cosmetic: doc: removed duplicated word (Jan Silhan) +- doc: described unavailable package corner case with skip_if_unavailable option (RhBug:1119030) (Jan Silhan) +- log: replaced size with maxsize directive (RhBug:1177394) (Jan Silhan) +- spec: fixed %ghost log file names (Jan Silhan) + +* Mon Dec 8 2014 Jan Silhan - 0.6.3-2 +- logging: reverted naming from a6dde81 + +* Mon Dec 8 2014 Jan Silhan - 0.6.3-1 +- transifex update (Jan Silhan) +- bash-completion: don't query if we trying to use local file (RhBug:1153543) (Igor Gnatenko) +- bash-completion: fix local completion (RhBug:1151231) (Igor Gnatenko) +- bash-completion: use sqlite cache from dnf-plugins-core (Igor Gnatenko) +- base: output a whole list of installed packages with glob pattern (RhBug:1163063) (Michal Luscon) +- cli: _process_demands() does not respect --caheonly (RhBug:1151854) (Michal Luscon) +- new authors added (Jan Silhan) +- install: allow installation of provides with glob (Related:RhBug:1148353) (Michal Luscon) +- tests: removed mock patch for _, P_ (Jan Silhan) +- fixed error summary traceback (RhBug:1151740) (Jan Silhan) +- doc: swap command alternative mentioned (RhBug:1110780) (Jan Silhan) +- base: package_reinstall works only with the same package versions (Jan Silhan) +- base: package_install allows install different arch of installed package (Jan Silhan) +- base: package_downgrade prints message on failure (Jan Silhan) +- base: package_upgrade does not reinstall or downgrade (RhBug:1149972) (Jan Silhan) +- groups: searches also within localized names (RhBug:1150474) (Jan Silhan) +- Run tests with C locales. (Daniel Mach) +- Adds new motd emitter for dnf-automatic (RhBug:995537) (Kushal Das) +- Fix wrong cache directory path used to clean up binary cache (Satoshi Matsumoto) +- fix: traceback in history info (RhBug: 1149952) (Tim Lauridsen) +- logging: added logrotate script for hawkey.log (RhBug:1149350) (Jan Silhan) +- output: renamed displayPkgsInGroups (Jan Silhan) +- logging: renamed log files (RhBug:1074715)" (Jan Silhan) +- comps: Environment differentiates optional and mandatory groups (Jan Silhan) +- group info handles environments (RhBug:1147523) (Jan Silhan) +- deltarpm enabled by default (RhBug:1148208) (Jan Silhan) +- doc: deplist command (Jan Silhan) +- doc: minor fixes + repo references changed (Jan Silhan) +- spec: requires rpm-plugin-systemd-inhibit (RhBug:1109927) (Jan Silhan) + +* Fri Oct 3 2014 Jan Silhan - 0.6.2-1 +- transifex update (Jan Silhan) +- refactor: move MakeCacheCommand out into its own file. (Ales Kozumplik) +- api: add dnf.cli.CliError. (Ales Kozumplik) +- Update user_faq.rst (Stef Krie) +- Make --refresh play nice with lazy commands. (Ales Kozumplik) +- bash-completion: more faster completing install/remove (Igor Gnatenko) +- bash-completion: complete 'clean|groups|repolist' using help (Igor Gnatenko) +- Allow some commands to use stale metadata. (RhBug:909856) (Ales Kozumplik) +- does not install new pkgs when updating from local pkgs (RhBug:1134893) (Jan Silhan) +- doesn't upgrade packages by installing local packages (Related:RhBug:1138700) (Jan Silhan) +- refactor: repo: separate concepts of 'expiry' and 'sync strategy'. (Ales Kozumplik) +- fix: dnf.cli.util.* leaks file handles. (Ales Kozumplik) +- remove: YumRPMTransError. (Ales Kozumplik) +- rename: Base's runTransaction -> _run_transaction(). (Ales Kozumplik) +- drop unused parameter of Base.verify_transaction(). (Ales Kozumplik) +- bash-completion: new completion from scratch (RhBug:1070902) (Igor Gnatenko) +- py3: add queue.Queue to pycomp. (Ales Kozumplik) +- locking: store lockfiles with the resource they are locking. (RhBug:1124316) (Ales Kozumplik) +- groups: marks reason 'group' for packages that have no record yet (RhBug:1136584) (Jan Silhan) +- goal: renamed undefined name variable (Jan Silhan) +- refactor: split out and clean up the erase command. (Ales Kozumplik) +- py3: fix traceback in fmtColumns() on a non-subscriptable 'columns'. (Ales Kozumplik) +- groups: allow erasing depending packages on remove (RhBug:1135861) (Ales Kozumplik) +- history: fixed wrong set operation (RhBug:1136223) (Jan Silhan) +- base: does not reinstall pkgs from local rpms with install command (RhBug:1122617) (Jan Silhan) +- refactor: crypto: drop the integer keyid representation altogether. (Ales Kozumplik) +- crypto: fix importing rpmfusion keys. (RhBug:1133830) (Ales Kozumplik) +- refactor: crypto: Key is a class, not an "info" dict. (Ales Kozumplik) +- repos: fix total downloaded size reporting for cached packages. (RhBug:1121184) (Ales Kozumplik) + +* Thu Aug 28 2014 Jan Silhan - 0.6.1-1 +- packaging: add dnf-yum. (Ales Kozumplik) +- cli: added plugins missing hint (RhBug:1132335) (Jan Silhan) +- using ts.addReinstall for package reinstallation (RhBug:1071854) (Jan Silhan) +- Add history redo command. (Radek Holy) +- Add a TransactionConverter class. (Radek Holy) +- bash-completion: complete `help` with commands (Igor Gnatenko) +- bash-completion: generate commands dynamically (Igor Gnatenko) +- base: group_install accepts glob exclude names (RhBug:1131969) (Jan Silhan) +- README: changed references to new repo location (Jan Silhan) +- transifex update (Jan Silhan) +- syntax: fixed indentation (Jan Silhan) +- removed lt.po which was accidentally added in c2e9b39 (Jan Silhan) +- lint: fix convention violations in the new source files (Radek Holy) +- Fix setting of the resolving demand for repo-pkgs command. (Radek Holy) +- Add repository-packages remove-or-distro-sync command. (RhBug:908764) (Radek Holy) +- fix: traceback that GroupPersistor._original might not exist. (RhBug:1130878) (Ales Kozumplik) +- pycomp: drop to_ord(). (Ales Kozumplik) +- refactor: crypto.keyids_from_pubring() using _extract_signing_subkey(). (Ales Kozumplik) +- fix: another 32-bit hex() problem in crypto. (Ales Kozumplik) +- remove: pgpmsg.py. (Ales Kozumplik) +- replace the whole of pgpmsg.py with gpgme and a dummy context. (Ales Kozumplik) +- cosmetic: sort methods of Repo according to the coding standard. (Ales Kozumplik) +- Fix dnf.crypto.keyinfo2keyid(). (Ales Kozumplik) +- util: get rid of an inconvenient 'default_handle' constant. (Ales Kozumplik) +- simplify misc.import_key_to_pubring()'s signature. (Ales Kozumplik) +- cleanup: header of dnf.yum.pgpmsg. (Ales Kozumplik) +- crypto: add crypto.retrieve() and drop Base._retrievePublicKey() (Ales Kozumplik) +- cosmetic: order of functions in dnf.crypto. (Ales Kozumplik) +- unicode: fixed locale.format error (RhBug:1130432) (Jan Silhan) +- remove: misc.valid_detached_sig(). (Ales Kozumplik) +- tests: some tests for dnf.crypto. (Ales Kozumplik) +- crypto: use pubring_dir() context manager systematically. (Ales Kozumplik) +- Drop unused argument from getgpgkeyinfo(). (Ales Kozumplik) +- remove: Base._log_key_import(). (Ales Kozumplik) +- doc: cosmetic: conf_ref: maintain alphabetical order of the options. (Ales Kozumplik) +- crypto: document crypto options for repo. (Ales Kozumplik) +- crypto: fixup procgpgkey() to work with Py3 bytes. (Ales Kozumplik) +- dnf.util.urlopen(): do not create unicode streams for Py3 and bytes for Py2 by default. (Ales Kozumplik) +- lint: delinting of the repo_gpgcheck patchset. (Ales Kozumplik) +- Add CLI parts to let the user confirm key imports. (RhBug:1118236) (Ales Kozumplik) +- gpg: make key decoding work under Py3. (Ales Kozumplik) +- crypto: add dnf.crypto and fix things up so untrusted repo keys can be imported. (Ales Kozumplik) +- transifex update (Jan Silhan) +- syntax: fixed indentation (Jan Silhan) +- packaging: pygpgme is a requirement. (Ales Kozumplik) +- remove: support for gpgcakey gets dropped for now. (Ales Kozumplik) +- repo: smarter _DetailedLibrepoError construction. (Ales Kozumplik) +- repo: nicer error message on librepo's perform() failure. (Ales Kozumplik) +- get_best_selector returns empty selector instead of None (Jan Silhan) +- packaging: add automatic's systemd unit files. (RhBug:1109915) (Ales Kozumplik) +- automatic: handle 'security' update_cmd. (Ales Kozumplik) + +* Tue Aug 12 2014 Aleš Kozumplík - 0.6.0-1 +- lint: fix convention violations in the new source files (Radek Holy) +- Add "updateinfo [] [] security" command. (RhBug:850912) (Radek Holy) +- Add "updateinfo [] [] bugfix" command. (Radek Holy) +- Add "updateinfo [] [] enhancement" command. (Radek Holy) +- Add "updateinfo [] [] [...]" command. (Radek Holy) +- Add "updateinfo [] [] [...]" command. (Radek Holy) +- Add "updateinfo [] all" command. (Radek Holy) +- Add "updateinfo [] updates" command. (Radek Holy) +- Add "updateinfo [] installed" command. (Radek Holy) +- Add "-v updateinfo info" command. (Radek Holy) +- Add "updateinfo info" command. (Radek Holy) +- Add "updateinfo list" command. (Radek Holy) +- Add "updateinfo available" command. (Radek Holy) +- Add "updateinfo summary" command. (Radek Holy) +- Add basic updateinfo command. (Radek Holy) +- test: add updateinfo to the testing repository (Radek Holy) +- test: support adding directory repos to Base stubs (Radek Holy) +- test: really don't break other tests with the DRPM fixture (Radek Holy) +- Load UpdateInfo.xml during the sack preparation. (Radek Holy) +- Add Repo.updateinfo_fn. (Radek Holy) +- lint: add Selector calls to false positives, it's a hawkey type. (Ales Kozumplik) +- removed recursive calling of ucd in DownloadError (Jan Silhan) +- does not throw error when selector is empty (RhBug:1127206) (Jan Silhan) +- remove etc/version-groups.conf, not used. (Ales Kozumplik) +- lint: dnf.conf.parser (Ales Kozumplik) +- rename: dnf.conf.parser.varReplace()->substitute() (Ales Kozumplik) +- pycomp: add urlparse/urllib.parser. (Ales Kozumplik) +- move: dnf.yum.parser -> dnf.conf.parser. (Ales Kozumplik) +- packaging: add dnf-automatic subpackage. (Ales Kozumplik) +- doc: properly list the authors. (Ales Kozumplik) +- automatic: add documentation, including dnf.automatic(8) man page. (Ales Kozumplik) +- dnf-automatic: tool supplying the yum-cron functionality. (Ales Kozumplik) +- doc: cosmetic: fixed indent in proxy directive (Jan Silhan) +- include directive support added (RhBug:1055910) (Jan Silhan) +- refactor: move MultiCallList to util. (Ales Kozumplik) +- cli: do not output that extra starting newline in list_transaction(). (Ales Kozumplik) +- refactor: extract CLI cachedir magic to cli.cachedir_fit. (Ales Kozumplik) +- transifex update (Jan Silhan) +- move: test_output to tests/cli. (Ales Kozumplik) +- refactor: move Term into its own module. (Ales Kozumplik) +- refactoring: cleanup and linting in dnf.exceptions. (Ales Kozumplik) +- lint: test_cli.py (Ales Kozumplik) +- lint: rudimentary cleanups in tests.support. (Ales Kozumplik) +- refactor: loggers are module-level variables. (Ales Kozumplik) +- groups: promote unknown-reason installed packages to 'group' on group install. (RhBug:1116666) (Ales Kozumplik) +- c82267f refactoring droppped plugins.run_transaction(). (Ales Kozumplik) +- cli: sort packages in the transaction summary. (Ales Kozumplik) +- refactor: cli: massively simplify how errors are propagated from do_transaction(). (Ales Kozumplik) +- groups: rearrange things in CLI so user has to confirm the group changes. (Ales Kozumplik) +- groups: committing the persistor data should only happen at one place. (Ales Kozumplik) +- groups: visualizing the groups transactions. (Ales Kozumplik) +- Add dnf.util.get_in() to navigate nested dicts with sequences of keys. (Ales Kozumplik) +- group persistor: generate diffs between old and new DBs. (Ales Kozumplik) +- Better quoting in dnf_pylint. (Ales Kozumplik) +- lint: logging.py. (Ales Kozumplik) +- Do not print tracebacks to the tty on '-d 10' (RhBug:1118272) (Ales Kozumplik) +- search: do not double-report no matches. (Ales Kozumplik) +- refactor: move UpgradeToCommand to its own module. (Ales Kozumplik) + +* Mon Jul 28 2014 Aleš Kozumplík - 0.5.5-1 +- packaging: also add pyliblzma to BuildRequires. (Ales Kozumplik) +- essential cleanup in dnf.yum.misc, removing a couple of functions too. (Ales Kozumplik) +- remove: Base.findDeps and friends. (Ales Kozumplik) +- Make pyliblzma a requriement. (RhBug:1123688) (Ales Kozumplik) +- whole user name can contain non-ascii chars (RhBug:1121280) (Jan Silhan) +- Straighten up the exceptions when getting a packages header. (RhBug:1122900) (Ales Kozumplik) +- tests: refactor: rename test_resource_path() -> resource_path() and use it more. (Ales Kozumplik) +- transifex update (Jan Silhan) +- remove: conf.commands. (Ales Kozumplik) +- proxy username and password, for both CLI and API. (RhBug:1120583) (Ales Kozumplik) +- conf: only 'main' is a reserved section name. (Ales Kozumplik) +- refactoring: cleanup a couple of lint warnings in base.py. (Ales Kozumplik) +- refactoring: move repo reading implementation out of dnf.Base. (Ales Kozumplik) +- refactor: repo_setopts is a CLI thing and doesn't belong to Base. (Ales Kozumplik) +- refactor: move cleanup methods to dnf.cli.commands.clean. (Ales Kozumplik) +- depsolving: doesn't install both architectures of pkg by filename (RhBug:1100946) (Jan Silhan) +- refactor: put CleanCommand in its own module. (Ales Kozumplik) +- cli: avoid 'Error: None' output on malformed CLI commands. (Ales Kozumplik) +- remove the special SIGQUIT handler. (Ales Kozumplik) +- api: In Repo(), cachedir is a required argument. (Ales Kozumplik) +- api: better describe how Repos should be created, example. (RhBug:1117789) (Ales Kozumplik) +- Base._conf lasts the lifetime of Base and can be passed via constructor. (Ales Kozumplik) +- doc: faq: having Yum and DNF installed at the same time. (Ales Kozumplik) +- remove: protected_packages config option, it has been ignored. (Ales Kozumplik) +- fix: misleading error message when no repo is enabled. (Ales Kozumplik) + +* Wed Jul 16 2014 Aleš Kozumplík - 0.5.4-1 +- pkg name from rpm transaction callback is in Unicode (RhBug:1118796) (Jan Silhan) +- packaging: python3-dnf depends on dnf. (RhBug:1119032) (Ales Kozumplik) +- Ship /usr/bin/dnf-3 to run DNF under Py3. (RhBug:1117678) (Ales Kozumplik) +- packaging: own /etc/dnf/plugins. (RhBug:1118178) (Ales Kozumplik) +- fix: pluginconfpath is a list. (Ales Kozumplik) +- cosmetic: use classmethod as a decorator in config.py. (Ales Kozumplik) +- cleanup: imports in dnf.cli.output (Ales Kozumplik) +- lint: straightforward lint fixes in dnf.cli.output. (Ales Kozumplik) +- Repo.__setattr__ has to use the parsed value. (Ales Kozumplik) +- Repo priorities. (RhBug:1048973) (Ales Kozumplik) +- repo: simplify how things are propagated to repo.hawkey_repo. (Ales Kozumplik) +- refactor: concentrate Repo.hawkey_repo construction in Repo.__init__(). (Ales Kozumplik) +- bash-completion: Update command and option lists, sort in same order as --help (Ville Skyttä) +- bash-completion: Use grep -E instead of deprecated egrep (Ville Skyttä) +- output: fixed identation of info command output (Jan Silhan) +- i18n: calculates right width of asian utf-8 strings (RhBug:1116544) (Jan Silhan) +- transifex update + renamed po files to Fedora conventions (Jan Silhan) +- remove: CLI: --randomwait (Ales Kozumplik) +- cli: fix: --installroot has to be used with --releasever (RhBug:1117293) (Ales Kozumplik) +- Base.reset(goal=True) also resets the group persistor (RhBug:1116839) (Ales Kozumplik) +- tests: fix failing DistroSync.test_distro_sync(). (Ales Kozumplik) +- logging: RPM transaction markers are too loud. (Ales Kozumplik) +- logging: silence drpm a bit. (Ales Kozumplik) +- logging: put timing functionality into one place. (Ales Kozumplik) +- repolist: fix traceback with disabled repos. (RhBug:1116845) (Ales Kozumplik) +- refactor: cleanups in repolist. (Ales Kozumplik) +- lint: remove some unused imports. (Ales Kozumplik) +- cli: break out the repolsit command into a separate module. (Ales Kozumplik) +- does not crash with non-ascii user name (RhBug:1108908) (Jan Silhan) +- doc: document 'pluginpath' configuration option. (RhBug:1117102) (Ales Kozumplik) +- Spelling fixes (Ville Skyttä) +- cli: Fix software name in --version help (Ville Skyttä) +- doc: ip_resolve documented at two places. remove one. (Ales Kozumplik) + +* Thu Jul 3 2014 Aleš Kozumplík - 0.5.3-1 +- packaging: bump hawkey dep to 0.4.17. (Ales Kozumplik) +- api: remove Base.select_group(). (Ales Kozumplik) +- tests: cleanup our base test case classes a bit. (Ales Kozumplik) +- Add DNF itself among the protected packages. (Ales Kozumplik) +- api: plugins: add the resolved() hook. (Ales Kozumplik) +- api: expose Transaction introspecting in the API. (RhBug:1067156) (Ales Kozumplik) +- api: add basic documentation for dnf.package.Package. (Ales Kozumplik) +- tests: cosmetic: conf.protected_packages is ignored, drop it in FakeConf. (Ales Kozumplik) +- cli: simplify exception handling more. (Ales Kozumplik) +- Fixed a minor typo in user_faq - 'intall' should be 'install' (Martin Preisler) +- fixed encoding of parsed config line (RhBug:1110800) (Jan Silhan) +- syntax: replaced tab with spaces (Jan Silhan) +- doc: acknowledge the existence of plugins on the man page (RhBug:1112669) (Ales Kozumplik) +- improve the 'got root?' message of why a transaction couldn't start. (RhBug:1111569) (Ales Kozumplik) +- traceback in Base.do_transaction. to_utf8() is gone since 06fb280. (Ales Kozumplik) +- fix traceback from broken string formatting in _retrievePublicKey(). (RhBug:1111997) (Ales Kozumplik) +- doc: replace Yum with DNF in command_ref.rst (Viktor Ashirov) +- Fix a missing s in the title (mscherer) +- api: add dnf.rpm.detect_releasever() (Ales Kozumplik) +- Detect distroverpkg from 'system-release(release)' (RhBug:1047049) (Ales Kozumplik) +- bulid: add dnf/conf to cmake. (Ales Kozumplik) +- lint: clean up most lint messages in dnf.yum.config (Ales Kozumplik) +- remove: couple of dead-code methods in dnf.yum.config. (Ales Kozumplik) +- api: document client's responsibility to preset the substitutions. (RhBug:1104757) (Ales Kozumplik) +- move: rpmUtils -> rpm. (Ales Kozumplik) +- refactor: move yumvar out into its proper module dnf.conf.substitutions. (Ales Kozumplik) +- refactor: turn dnf.conf into a package. (Ales Kozumplik) +- doc: api_base.rst pointing to nonexistent method. (Ales Kozumplik) +- remove: some logging from Transaction.populate_rpm_ts(). (Ales Kozumplik) +- Update cli_vs_yum.rst (James Pearson) +- api: doc: queries relation specifiers, with an example. (RhBug:1105009) (Ales Kozumplik) +- doc: phrasing in ip_resolve documentation. (Ales Kozumplik) +- cli: refactored transferring cmdline options to conf (Jan Silhan) +- cli: added -4/-6 option for using ipv4/ipv6 connection (RhBug:1093420) (Jan Silhan) +- cosmetic: empty set inicialization (Jan Silhan) +- repo: improve the RepoError message to include URL. (Ales Kozumplik) +- remove: dnf.yum.config.writeRawRepoFile(). (Ales Kozumplik) +- remove: bunch of (now) blank config options. (Ales Kozumplik) +- removed unique function (Jan Silhan) +- tests: mock.assert_has_calls() enforces its iterable arguments in py3.4. (Ales Kozumplik) +- logging: improve how repolist logs the total number of packages. (Ales Kozumplik) +- logging: Base.close() should not log to the terminal. (Ales Kozumplik) + +* Wed May 28 2014 Aleš Kozumplík - 0.5.2-1 +- doc: packaging: add license block to each .rst. (Ales Kozumplik) +- cosmetic: replaced yum with dnf in comment (Jan Silhan) +- takes non-ascii cmd line input (RhBug:1092777) (Jan Silhan) +- replaced 'unicode' conversion functions with 'ucd' (RhBug:1095861) (Jan Silhan) +- using write_to_file py2/py3 compatibility write function (Jan Silhan) +- encoding: all encode methods are using utf-8 coding instead of default ascii (Jan Silhan) +- fixed rpmbuild warning of missing file (Jan Silhan) +- transifex update (Jan Silhan) +- fixed typos in comments (Jan Silhan) +- Drop --debugrepodata and susetags generation with it. (Ales Kozumplik) +- doc: document --debugsolver. (Ales Kozumplik) +- fix: 'dnf repo-pkgs' failures (RhBug:1092006) (Radek Holy) +- lint: make dnf_pylint take '-s' that suppresses line/column numbers. (Ales Kozumplik) +- doc: cli_vs_yum: we do not promote installs to the obsoleting package. (RhBug:1096506) (Ales Kozumplik) +- dealing with installonlies, we always need RPMPROB_FILTER_OLDPACKAGE (RhBug:1095580) (Ales Kozumplik) +- transifex update (Jan Silhan) +- arch: recognize noarch as noarch's basearch. (RhBug:1094594) (Ales Kozumplik) +- pylint: clean up dnf.repo. (Ales Kozumplik) +- sslverify: documentation and bumped librepo require. (Ales Kozumplik) +- repos: support sslverify setting. (RhBug:1076045) (Ales Kozumplik) +- search: exact matches should propagate higher. (RhBug:1093888) (Ales Kozumplik) +- refactor: concentrate specific search functionality in commands.search. (Ales Kozumplik) +- refactor: SearchCommand in its own file. (Ales Kozumplik) +- pylint: fix around one hundred pylint issues in dnf.base. (Ales Kozumplik) +- pylint: add simple pylint script (Ales Kozumplik) +- autoerase: write out the debugdata used to calculate redundant packages. (Ales Kozumplik) +- cosmetic: fix pylint comment in test_group.py. (Ales Kozumplik) +- refactor: err_mini_usage() is public. (Ales Kozumplik) +- refactor: fix several pylint errors in dnf.cli.commands.group. (Ales Kozumplik) +- fix: 'dnf remove' is deprecated so autoremove should be autoerase. (Ales Kozumplik) +- doc: command_ref: remove the deprecated aliases from the initial list. (Ales Kozumplik) +- Add autoremove command. (RhBug:963345) (Ales Kozumplik) +- refactor: Base.push_userinstalled() is public. (Ales Kozumplik) +- Remove sudo from dnf-completion.bash RhBug:1073457 (Elad Alfassa) +- exclude switch takes as a parameter (Jan Silhan) +- using nevra glob query during list command (RhBug:1083679) (Jan Silhan) +- removed rpm.RPMPROB_FILTER_REPLACEOLDFILES filter flag (Jan Silhan) +- test: changed tests according to new distro-sync behavior (Jan Silhan) +- packaging: cosmetic: copyright years in bin/dnf. (Ales Kozumplik) +- bin/dnf: run the python interpreter with -OO. (Ales Kozumplik) + +* Fri May 2 2014 Aleš Kozumplík - 0.5.1-1 +- drpm: output stats (RhBug:1065882) (Ales Kozumplik) +- refactor: architectures. (Ales Kozumplik) +- cli: be lot less verbose about dep processing. (Ales Kozumplik) +- groups: do not error out if group install/remove produces no RPM transaction. (Ales Kozumplik) +- fix: do not traceback on comps remove operations if proper pkg reasons can not be found. (Ales Kozumplik) +- fix: tracebacks in 'group remove ...' (Ales Kozumplik) +- groups: move all the logic of persistor saving from main.py to Base. (Ales Kozumplik) +- groups: auto-saving the groups persistor. (RhBug:1089864) (Ales Kozumplik) +- transifex update (Jan Silhan) +- remove: profiling code from cli.main. (Ales Kozumplik) +- remove: removal of dead code (Miroslav Suchý) +- doc: changes to rhbug.py to work on readthedocs.org. (Ales Kozumplik) +- doc: build the documentation without any dependencies (on DNF or anything else). (Ales Kozumplik) +- doc: make clear where one should expect bin/dnf (Miroslav Suchý) +- abrt: disable abrt for 'dnf makecache timer' run from systemd.service. (RhBug:1081753) (Ales Kozumplik) +- remove: stray itertools import from group.py. (Ales Kozumplik) + +* Wed Apr 23 2014 Aleš Kozumplík - 0.5.0-1 +- doc: fix formatting in api_cli.rst. (Ales Kozumplik) +- doc: document operation of 'group upgrade'. (Ales Kozumplik) +- comps: ensure only packages of 'group' reason get deleted on 'group erase'. (Ales Kozumplik) +- comps: store 'group' reason when installing a group-membering package. (Ales Kozumplik) +- Override Goal.get_reason(). (Ales Kozumplik) +- Add dnf.goal.Goal deriving from hawkey.Goal. (Ales Kozumplik) +- fix: encoding of yumdb directory names in py3. (Ales Kozumplik) +- tests: clean up the functions that load seeded comps a bit. (Ales Kozumplik) +- remove: cli._*aybeYouMeant(). (Ales Kozumplik) +- simplify groups/envs API methods in Base a lot. (Ales Kozumplik) +- tests: add test for Base._translate_comps_pkg_types() (Ales Kozumplik) +- refactor: move the group listing etc. methods() away from Base into GroupCommand. (Ales Kozumplik) +- api: add group.upgrade opration to Base and CLI (RhBug:1029022) (Ales Kozumplik) +- remove: OriginalGroupPersistor. (Ales Kozumplik) +- groups: store format version of the groups db. (Ales Kozumplik) +- groups: saving the persistent data. (Ales Kozumplik) +- refactor: extract out the transactioning part of _main(). (Ales Kozumplik) +- groups: Integrate the redone components with Base. (Ales Kozumplik) +- Add comps Solver. (Ales Kozumplik) +- groups: redo the GroupPersistor class. (Ales Kozumplik) +- doc: faq: why we don't check for root. (RhBug:1088166) (Ales Kozumplik) +- cosmetic: reordered import statements (Jan Silhan) +- added --refresh option (RhBug:1064226) (Jan Silhan) +- added forgotten import (Jan Silhan) +- fixed import errors after yum/i18n.py removal (Jan Silhan) +- removed to_utf8 from yum/i18n.py (Jan Silhan) +- removed to_str from yum/i18n.py (Jan Silhan) +- removed utf8_text_fill from yum/i18n.py (Jan Silhan) +- removed utf8_width from yum/i18n.py (Jan Silhan) +- removed utf8_width_fill from yum/i18n.py (Jan Silhan) +- removed to_unicode from yum/i18n.py (Jan Silhan) +- make all strings unicode_literals implicitly (Jan Silhan) +- moved _, P_ to dnf/i18n.py (Jan Silhan) +- removed utf8_valid from yum/i18n.py (Jan Silhan) +- removed str_eq from yum/i18n.py (Jan Silhan) +- removed exception2msg from yum/i18n.py (Jan Silhan) +- removed dummy_wrapper from yum/i18n.py (Jan Silhan) +- cosmetics: leave around the good things from 660c3e5 (documentation, UT). (Ales Kozumplik) +- Revert "fix: provides are not recognized for erase command. (RhBug:1087063)" (Ales Kozumplik) +- fix: provides are not recognized for erase command. (RhBug:1087063) (Ales Kozumplik) +- test: fix UsageTest test, so it work without dnf is installed on the system PEP8 cleanup (Tim Lauridsen) +- cleanup: getSummary() and getUsage() can be dropped entirely now. (Ales Kozumplik) +- test: use Command.usage & Command.summary API in unittest (Tim Lauridsen) +- show plugin commands in separate block api: add new public Command.usage & Command.summary API cleanup: make Commands (Tim Lauridsen) +- tests: move libcomps test to a separate test file. (Ales Kozumplik) +- refactor: put DistoSyncCommand into its own file (Tim Lauridsen) +- refactor: commands.group: _split_extcmd is a static method. (Ales Kozumplik) +- GroupsCommand: make the way comps are searched more robust. (RhBug:1051869) (Ales Kozumplik) +- tests: move GroupCommand tests to a more proper place. (Ales Kozumplik) +- fix leak: Base.__del__ causes GC-uncollectable circles. (Ales Kozumplik) +- gruops: 'list' and similar commands should run without root. (RhBug:1080331) (Ales Kozumplik) +- refactor: conf is given to Output on instantiation. (Ales Kozumplik) +- remove: Command.done_command_once and Command.hidden. (Ales Kozumplik) +- [doc] improve documentation of '--best' (RhBug:1084553) (Ales Kozumplik) +- api: Command.base and Command.cli are API attributes. (Ales Kozumplik) +- demands: similarly to 78661a4, commands should set the exit success_exit_status directly. (Ales Kozumplik) +- demands: commands requiring resolving dymamically need to set the demand now. (Ales Kozumplik) +- doc: typo in group doc. (RhBug:1084139) (Ales Kozumplik) +- api: Base.resolve() takes allow_erasing. (RhBug:1073859) (Ales Kozumplik) +- refactor: OptionParser._checkAbsInstallRoot is static. (Ales Kozumplik) +- option_parser: remove base dependency. (Ales Kozumplik) +- move: dnf.cli.cli.OptionParser -> dnf.cli.option_parser.OptionParser. (Ales Kozumplik) +- doc: 'clean packages' incorrectly mentions we do not delete cached packages. (RhBug:1083767) (Ales Kozumplik) +- fix: TypeError in dnf history info (RHBug: #1082230) (Tim Lauridsen) +- Start new version: 0.5.0. (Ales Kozumplik) +- remove: instance attrs of Base, namely cacheonly. (Ales Kozumplik) +- tests: remove: support.MockCli. (Ales Kozumplik) +- tests: fix locale independence. (Radek Holy) +- cleanups in cli.OptionParser. (Ales Kozumplik) +- fix: PendingDeprecationWarning from RPM in gpgKeyCheck(). (Ales Kozumplik) +- api: add Cli.demands.root_user (RhBug:1062889) (Ales Kozumplik) +- api: add Cli.demands and Command.config() to the API (RhBug:1062884) (Ales Kozumplik) +- Integrate DemandSheet into CLI. (Ales Kozumplik) +- Command.configure() takes the command arguments like run(). (Ales Kozumplik) +- Add dnf.cli.demand.DemandSheet. (Ales Kozumplik) +- remove: dead code for deplist, version and check-rpmdb commands. (Ales Kozumplik) +- sync with transifex (Jan Silhan) +- removed _enc method that did nothing without specspo (Jan Silhan) +- fixed local reinstall error (Jan Silhan) +- Fix Term.MODE setting under Python 3 in case of incapable tty stdout. (Radek Holy) +- tests: move Term tests to better file. (Radek Holy) +- refactor: move ReinstallCommand in its own module. (Ales Kozumplik) +- rename: yumbase (case insensitive) -> base. (Ales Kozumplik) +- fixed py3 error thrown by search command (Jan Silhan) +- fixed wrong named variable (Jan Silhan) +- fixed local downgrade error (Jan Silhan) +- doc: fix Package references that are ambiguous now. (Ales Kozumplik) +- fix: resource leak in yum.misc.checksum() under py3. (Ales Kozumplik) +- fix: leak: couple of files objects left open. (Ales Kozumplik) +- fix PendingDepreaction warning from rpm in _getsysver(). (Ales Kozumplik) +- repo: Repo.cachedir is not a list. (Ales Kozumplik) +- api: add Base.package_install et al. and Base.add_remote_rpm(). (RhBug:1079519) (Ales Kozumplik) +- tests: fix tests broken under foreign locale after 32818b2. (Ales Kozumplik) +- refactor: move install, downgrade and upgrade commands into separate modules. (Ales Kozumplik) +- tests: refactor: make Term tests more isolated. (Radek Holy) +- tests: fix terminfo capability independence. (Radek Holy) +- api: explain that Base is a context manager with a close(). (Ales Kozumplik) +- cosmetic: move stuff around in comps. (Ales Kozumplik) +- api: groups: add comps.Package, add group.package_iter(). (RhBug:1079932) (Ales Kozumplik) +- fixed installation of conflicted packages (RhBug:1061780) (Jan Silhan) +- removed never executed code based on _ts_saved_file variable (Jan Silhan) +- added logrotate script and ownership of log files to dnf (RhBug:1064211) (Jan Silhan) +- fixed: highlight characters broken under py3 (RhBug:1076884) (Jan Silhan) +- remove: base.deselectGroup(). it is not used. (Ales Kozumplik) +- tests: fix broken InstallMultilib.test_install_src_fails(). (Ales Kozumplik) +- groups: support manipulation with environments (RhBug:1063666) (Ales Kozumplik) +- add dnf.util.partition(). (Ales Kozumplik) +- refactor: RepoPersistor: use the global logger instead of an instance variable. (Ales Kozumplik) +- groups: besides installed groups also store persistently the environments. (Ales Kozumplik) +- rename: persistor.Groups -> ClonableDict. (Ales Kozumplik) +- doc: cli_vs_yum: typography in bandwidth limiting section. (Ales Kozumplik) +- doc: cli_vs_yum: we do not partially allow operations that install .srpm. (RhBug:1080489) (Ales Kozumplik) +- refactor: imports order in cli/commands/__init__.py. (Ales Kozumplik) +- refactor: groups: make all commands use _patterns2groups(). (Ales Kozumplik) +- kernel: remove kernel-source from const.INSTALLONLYPKGS. (Ales Kozumplik) +- build: 0.4.19-1 (Ales Kozumplik) +- New version: 0.4.19 (Ales Kozumplik) +- downloads: bump number of downloaded files on a skip. (RhBug:1079621) (Ales Kozumplik) +- packaging: add dnf.cli.commands to the installation. (Ales Kozumplik) +- refactor: put GroupCommand into its separate module. (Ales Kozumplik) +- rename: make cli.commands a subpackage. (Ales Kozumplik) +- AUTHORS: added Albert. (Ales Kozumplik) +- test: fixed CacheTest.test_noroot() when running as root (Albert Uchytil) +- AUTHORS: added Tim. (Ales Kozumplik) +- fixes TypeError: '_DownloadErrors' object is not iterable (RhBug:1078832) (Tim Lauridsen) +- fixed not including .mo files (Jan Silhan) +- comps: _by_pattern() no longer does the comma splitting. (Ales Kozumplik) + +* Mon Mar 24 2014 Aleš Kozumplík - 0.4.19-1 +- downloads: bump number of downloaded files on a skip. (RhBug:1079621) (Ales Kozumplik) +- packaging: add dnf.cli.commands to the installation. (Ales Kozumplik) +- refactor: put GroupCommand into its separate module. (Ales Kozumplik) +- rename: make cli.commands a subpackage. (Ales Kozumplik) +- AUTHORS: added Albert. (Ales Kozumplik) +- test: fixed CacheTest.test_noroot() when running as root (Albert Uchytil) +- AUTHORS: added Tim. (Ales Kozumplik) +- fixes TypeError: '_DownloadErrors' object is not iterable (RhBug:1078832) (Tim Lauridsen) +- fixed not including .mo files (Jan Silhan) +- comps: _by_pattern() no longer does the comma splitting. (Ales Kozumplik) +- including .mo files correctly (Jan Silhan) +- tests: fix locale independence. (Radek Holy) +- remove: unused trashy methods in dnf.yum.misc. (Ales Kozumplik) +- persistor: do not save Groups if it didn't change (RhBug:1077173) (Ales Kozumplik) +- tests: simplify the traceback logging. (Ales Kozumplik) +- main: log IO errors etc. thrown even during Base.__exit__. (Ales Kozumplik) +- logging: do not log IOError tracebacks in verbose mode. (Ales Kozumplik) +- refactor: move out main._main()'s inner error handlers. (Ales Kozumplik) +- added gettext as a build dependency for translation files (Jan Silhan) +- translation: updated .pot file and fetched fresh .po files from transifex (Jan Silhan) +- removed redundant word from persistor translation (Jan Silhan) +- translation: show relative path in generated pot file (Jan Silhan) +- refactor: replaced type comparisons with isinstance (Jan Silhan) +- translation: added mo files generation and including them in rpm package (Jan Silhan) +- removed unused imports in base.py (Jan Silhan) +- doc: typo in Base.group_install(). (Ales Kozumplik) + +* Mon Mar 17 2014 Aleš Kozumplík - 0.4.18-1 +- api: drop items deprecated since 0.4.9 or earlier. (Ales Kozumplik) +- api: deprecate Base.select_group() (Ales Kozumplik) +- doc: document the group marking operations. (Ales Kozumplik) +- api: add Base.group_install() with exclude capability. (Ales Kozumplik) +- groups: recognize 'mark install' instead of 'mark-install'. (Ales Kozumplik) +- Allow installing optional packages from a group. (RhBug:1067136) (Ales Kozumplik) +- groups: add installing groups the object marking style. (Ales Kozumplik) +- groups: add Base.group_remove(). (Ales Kozumplik) +- groups: add support for marking/unmarking groups. (Ales Kozumplik) +- groups: add dnf.persistor.GroupPersistor(), to store the installed groups. (Ales Kozumplik) +- logging: log plugin import tracebacks on the subdebug level. (Ales Kozumplik) +- rename: dnf.persistor.Persistor -> RepoPersistor. (Ales Kozumplik) +- doc: update README and FAQ with the unabbreviated name. (Ales Kozumplik) +- groups: fix grouplist crashes with new libcomps. (Ales Kozumplik) +- Do not terminate for unreadable repository config. (RhBug:1071212) (Ales Kozumplik) +- cli: get rid of ridiculous slashes and the file:// scheme on config read fails. (Ales Kozumplik) +- repo: log more than nothing about a remote repo MD download. (Ales Kozumplik) +- drpm: fallback to .rpm download on drpm rebuild error. (RhBug:1071501) (Ales Kozumplik) +- remove: Base.download_packages()' inner function mediasort(). (Ales Kozumplik) +- tests: tidy up the imports, in particular import mock from support. (Ales Kozumplik) +- changed documentation of distro-sync command (Jan Silhan) +- added distro-sync explicit packages support (RhBug:963710) (Jan Silhan) +- renamed testcase to distro_sync_all (Jan Silhan) +- Minor spelling (Arjun Temurnikar) +- i18n: translate repo sync error message. (Ales Kozumplik) +- add support for ppc64le (Dennis Gilmore) +- there is no arch called arm64 it is aarch64 (Dennis Gilmore) + +* Wed Mar 5 2014 Aleš Kozumplík - 0.4.17-1 +- doc: in the faq, warn users who might install rawhide packages on stable. (RhBug:1071677) (Ales Kozumplik) +- cli: better format the download errors report. (Ales Kozumplik) +- drpm: properly report applydeltarpm errors. (RhBug:1071501) (Ales Kozumplik) +- fixed Japanese translatated message (RhBug:1071455) (Jan Silhan) +- generated and synchronized translations with transifex (Jan Silhan) +- added transifex support to cmake (gettext-export, gettext-update) (Jan Silhan) +- api: expose RepoDict.get_matching() and RepoDict.all() (RhBug:1071323) (Ales Kozumplik) +- api: add Repo.set_progress_bar() to the API. (Ales Kozumplik) +- tests: test_cli_progress uses StringIO to check the output. (Ales Kozumplik) +- downloads: fix counting past 100% on mirror failures (RhBug:1070598) (Ales Kozumplik) +- repo: log callback calls to librepo. (Ales Kozumplik) +- Add repository-packages remove-or-reinstall command. (Radek Holy) +- Support negative filtering by new repository name in Base.reinstall. (Radek Holy) +- Support removal N/A packages in Base.reinstall. (Radek Holy) +- Add repository-packages remove command. (Radek Holy) +- refactor: Reduce amount of code in repository-packages subcommands. (Radek Holy) +- Support filtering by repository name in Base.remove. (Radek Holy) +- remove: BaseCli.erasePkgs (Radek Holy) +- Add repository-packages reinstall command. (Radek Holy) +- exceptions: improve empty key handling in DownloadError.__str__(). (Ales Kozumplik) +- downloads: fix fatal error message return value from download_payloads() (RhBug:1071518) (Ales Kozumplik) +- fixes problem with TypeError in Base.read_comps() in python3 (RhBug:1070710) (Tim Lauridsen) +- fix read_comps: not throwing exceptions when repo has no repodata (RhBug:1059704) (Jan Silhan) +- not decompressing groups when --cacheonly option is set (RhBug:1058224) (Jan Silhan) +- added forgotten import (Jan Silhan) +- Add repository-packages move-to command. (Radek Holy) +- Add repository-packages reinstall-old command. (Radek Holy) +- Support filtering by repository name in Base.reinstall. (Radek Holy) +- tests: test effects instead of mock calls. (Radek Holy) +- Wrap some recently added long lines. (Radek Holy) +- remove: BaseCli.reinstallPkgs (Radek Holy) +- repos: repos can never expire. (RhBug:1069538) (Ales Kozumplik) +- build: rebuild with 9d95442 (updated summaries_cache). (Ales Kozumplik) +- doc: update summaries_cache. (Ales Kozumplik) + +* Wed Feb 26 2014 Aleš Kozumplík - 0.4.16-1 +- fix: ensure MDPayload always has a valid progress attribute. (RhBug:1069996) (Ales Kozumplik) +- refactor: Move repo-pkgs upgrade-to to a standalone class instead of reusing the UpgradeToCommand. (Radek Holy) +- remove: BaseCli.updatePkgs (Radek Holy) +- refactor: Remove the reference to updatePkgs from UpgradeSubCommand. (Radek Holy) +- refactor: Remove the reference to updatePkgs from UpgradeCommand. (Radek Holy) +- refactor: Move repo-pkgs upgrade to a standalone class instead of reusing the UpgradeCommand. (Radek Holy) +- remove: BaseCli.installPkgs (Radek Holy) +- refactor: Remove the reference to installPkgs from InstallSubCommand. (Radek Holy) +- refactor: Remove the reference to installPkgs from InstallCommand. (Radek Holy) +- refactor: Move repo-pkgs install to a standalone class instead of reusing the InstallCommand. (Radek Holy) +- Revert "Support filtering by repository name in install_groupie." (Radek Holy) +- Revert "Support filtering by repository name in Base.select_group." (Radek Holy) +- Drop group filtering by repository name from installPkgs. (Radek Holy) +- Drop "repo-pkgs install @Group" support. (Radek Holy) +- refactor: Move CheckUpdateCommand.check_updates to BaseCli. (Radek Holy) +- refactor: Move repo-pkgs check-update to a standalone class instead of reusing the CheckUpdateCommand. (Radek Holy) +- refactor: Move repo-pkgs list to a standalone class instead of reusing the ListCommand. (Radek Holy) +- tests: Add tests of repo-pkgs info against the documentation. (Radek Holy) +- Fix "repo-pkgs info installed" behavior with respect to the documentation. (Radek Holy) +- refactor: Move MockBase methods to BaseStubMixin. (Radek Holy) +- refactor: Move repo-pkgs info to a standalone class instead of reusing the InfoCommand. (Radek Holy) +- refactor: Move InfoCommand._print_packages to BaseCli.output_packages. (Radek Holy) diff --git a/dnf/CMakeLists.txt b/dnf/CMakeLists.txt new file mode 100644 index 0000000..b423bdd --- /dev/null +++ b/dnf/CMakeLists.txt @@ -0,0 +1,12 @@ +FILE(GLOB dnf_SRCS *.py) +INSTALL (FILES ${dnf_SRCS} DESTINATION ${PYTHON_INSTALL_DIR}/dnf) +# const.py is generated so the glob alone won't see it: +INSTALL (FILES const.py DESTINATION ${PYTHON_INSTALL_DIR}/dnf) + +ADD_SUBDIRECTORY (automatic) +ADD_SUBDIRECTORY (cli) +ADD_SUBDIRECTORY (conf) +ADD_SUBDIRECTORY (module) +ADD_SUBDIRECTORY (rpm) +ADD_SUBDIRECTORY (yum) +ADD_SUBDIRECTORY (db) diff --git a/dnf/__init__.py b/dnf/__init__.py new file mode 100644 index 0000000..4ae303d --- /dev/null +++ b/dnf/__init__.py @@ -0,0 +1,37 @@ +# __init__.py +# The toplevel DNF package. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals +import warnings +import dnf.pycomp + +warnings.filterwarnings('once', category=DeprecationWarning, module=r'^dnf\..*$') + +from dnf.const import VERSION +__version__ = VERSION # :api + +import dnf.base +Base = dnf.base.Base # :api + +import dnf.plugin +Plugin = dnf.plugin.Plugin # :api + +# setup libraries +dnf.pycomp.urlparse.uses_fragment.append("media") diff --git a/dnf/automatic/CMakeLists.txt b/dnf/automatic/CMakeLists.txt new file mode 100644 index 0000000..7229ab6 --- /dev/null +++ b/dnf/automatic/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB automatic_SRCS *.py) +INSTALL (FILES ${automatic_SRCS} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/automatic) diff --git a/dnf/automatic/__init__.py b/dnf/automatic/__init__.py new file mode 100644 index 0000000..c393854 --- /dev/null +++ b/dnf/automatic/__init__.py @@ -0,0 +1,19 @@ +# __init__.py +# dnf.automatic toplevel. +# +# Copyright (C) 2014 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# diff --git a/dnf/automatic/emitter.py b/dnf/automatic/emitter.py new file mode 100644 index 0000000..ac8dba9 --- /dev/null +++ b/dnf/automatic/emitter.py @@ -0,0 +1,167 @@ +# emitter.py +# Emitters for dnf-automatic. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals +from dnf.i18n import _ +import logging +import dnf.pycomp +import smtplib +import email.utils +import subprocess + +APPLIED = _("The following updates have been applied on '%s':") +AVAILABLE = _("The following updates are available on '%s':") +DOWNLOADED = _("The following updates were downloaded on '%s':") + +logger = logging.getLogger('dnf') + + +class Emitter(object): + def __init__(self, system_name): + self._applied = False + self._available_msg = None + self._downloaded = False + self._system_name = system_name + self._trans_msg = None + + def _prepare_msg(self): + msg = [] + if self._applied: + msg.append(APPLIED % self._system_name) + msg.append(self._available_msg) + elif self._downloaded: + msg.append(DOWNLOADED % self._system_name) + msg.append(self._available_msg) + elif self._available_msg: + msg.append(AVAILABLE % self._system_name) + msg.append(self._available_msg) + else: + return None + return '\n'.join(msg) + + def notify_applied(self): + assert self._available_msg + self._applied = True + + def notify_available(self, msg): + self._available_msg = msg + + def notify_downloaded(self): + assert self._available_msg + self._downloaded = True + + +class EmailEmitter(Emitter): + def __init__(self, system_name, conf): + super(EmailEmitter, self).__init__(system_name) + self._conf = conf + + def _prepare_msg(self): + if self._applied: + subj = _("Updates applied on '%s'.") % self._system_name + elif self._downloaded: + subj = _("Updates downloaded on '%s'.") % self._system_name + elif self._available_msg: + subj = _("Updates available on '%s'.") % self._system_name + else: + return None, None + return subj, super(EmailEmitter, self)._prepare_msg() + + def commit(self): + subj, body = self._prepare_msg() + message = dnf.pycomp.email_mime(body) + message.set_charset('utf-8') + email_from = self._conf.email_from + email_to = self._conf.email_to + message['Date'] = email.utils.formatdate() + message['From'] = email_from + message['Subject'] = subj + message['To'] = ','.join(email_to) + message['Message-ID'] = email.utils.make_msgid() + + # Send the email + try: + smtp = smtplib.SMTP(self._conf.email_host, timeout=300) + smtp.sendmail(email_from, email_to, message.as_string()) + smtp.close() + except smtplib.SMTPException as exc: + msg = _("Failed to send an email via '%s': %s") % ( + self._conf.email_host, exc) + logger.error(msg) + + +class CommandEmitterMixIn(object): + """ + Executes a desired command, and pushes data into its stdin. + Both data and command can be formatted according to user preference. + For this reason, this class expects a {str:str} dictionary as _prepare_msg + return value. + Meant for mixing with Emitter classes, as it does not define any names used + for formatting on its own. + """ + def commit(self): + command_fmt = self._conf.command_format + stdin_fmt = self._conf.stdin_format + msg = self._prepare_msg() + # all strings passed to shell should be quoted to avoid accidental code + # execution + quoted_msg = dict((key, dnf.pycomp.shlex_quote(val)) + for key, val in msg.items()) + command = command_fmt.format(**quoted_msg) + stdin_feed = stdin_fmt.format(**msg).encode('utf-8') + + # Execute the command + subp = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE) + subp.communicate(stdin_feed) + subp.stdin.close() + if subp.wait() != 0: + msg = _("Failed to execute command '%s': returned %d") \ + % (command, subp.returncode) + logger.error(msg) + + +class CommandEmitter(CommandEmitterMixIn, Emitter): + def _prepare_msg(self): + return {'body': super(CommandEmitter, self)._prepare_msg()} + + +class CommandEmailEmitter(CommandEmitterMixIn, EmailEmitter): + def _prepare_msg(self): + subject, body = super(CommandEmailEmitter, self)._prepare_msg() + return {'subject': subject, + 'body': body, + 'email_from': self._conf.email_from, + 'email_to': ' '.join(self._conf.email_to)} + + +class StdIoEmitter(Emitter): + def commit(self): + msg = self._prepare_msg() + print(msg) + + +class MotdEmitter(Emitter): + def commit(self): + msg = self._prepare_msg() + with open('/etc/motd', 'w') as fobj: + fobj.write(msg) + diff --git a/dnf/automatic/main.py b/dnf/automatic/main.py new file mode 100644 index 0000000..4585f54 --- /dev/null +++ b/dnf/automatic/main.py @@ -0,0 +1,286 @@ +# __init__.py +# dnf.automatic CLI +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals +from dnf.i18n import _, ucd +import dnf +import dnf.automatic.emitter +import dnf.cli +import dnf.cli.cli +import dnf.cli.output +import dnf.conf +import libdnf.conf +import dnf.const +import dnf.exceptions +import dnf.util +import dnf.logging +import hawkey +import logging +import socket +import argparse +import random +import time + +logger = logging.getLogger('dnf') + + +def build_emitters(conf): + emitters = dnf.util.MultiCallList([]) + system_name = conf.emitters.system_name + emit_via = conf.emitters.emit_via + if emit_via: + for name in emit_via: + if name == 'email': + emitter = dnf.automatic.emitter.EmailEmitter(system_name, conf.email) + emitters.append(emitter) + elif name == 'stdio': + emitter = dnf.automatic.emitter.StdIoEmitter(system_name) + emitters.append(emitter) + elif name == 'motd': + emitter = dnf.automatic.emitter.MotdEmitter(system_name) + emitters.append(emitter) + elif name == 'command_email': + emitter = dnf.automatic.emitter.CommandEmailEmitter(system_name, conf.command_email) + emitters.append(emitter) + else: + raise dnf.exceptions.ConfigError("Unknown emitter option: %s" % name) + return emitters + + +def parse_arguments(args): + parser = argparse.ArgumentParser() + parser.add_argument('conf_path', nargs='?', default=dnf.const.CONF_AUTOMATIC_FILENAME) + parser.add_argument('--timer', action='store_true') + parser.add_argument('--installupdates', dest='installupdates', action='store_true') + parser.add_argument('--downloadupdates', dest='downloadupdates', action='store_true') + parser.add_argument('--no-installupdates', dest='installupdates', action='store_false') + parser.add_argument('--no-downloadupdates', dest='downloadupdates', action='store_false') + parser.set_defaults(installupdates=None) + parser.set_defaults(downloadupdates=None) + + return parser.parse_args(args), parser + + +class AutomaticConfig(object): + def __init__(self, filename=None, downloadupdates=None, + installupdates=None): + if not filename: + filename = dnf.const.CONF_AUTOMATIC_FILENAME + self.commands = CommandsConfig() + self.email = EmailConfig() + self.emitters = EmittersConfig() + self.command_email = CommandEmailConfig() + self._parser = None + self._load(filename) + + if downloadupdates: + self.commands.download_updates = True + elif downloadupdates is False: + self.commands.download_updates = False + if installupdates: + self.commands.apply_updates = True + elif installupdates is False: + self.commands.apply_updates = False + + self.commands.imply() + self.filename = filename + + def _load(self, filename): + parser = libdnf.conf.ConfigParser() + try: + parser.read(filename) + except RuntimeError as e: + raise dnf.exceptions.ConfigError('Parsing file "%s" failed: %s' % (filename, e)) + except IOError as e: + logger.warning(e) + + self.commands.populate(parser, 'commands', filename, + libdnf.conf.Option.Priority_AUTOMATICCONFIG) + self.email.populate(parser, 'email', filename, libdnf.conf.Option.Priority_AUTOMATICCONFIG) + self.emitters.populate(parser, 'emitters', filename, + libdnf.conf.Option.Priority_AUTOMATICCONFIG) + self.command_email.populate(parser, 'command_email', filename, + libdnf.conf.Option.Priority_AUTOMATICCONFIG) + self._parser = parser + + def update_baseconf(self, baseconf): + baseconf._populate(self._parser, 'base', self.filename, dnf.conf.PRIO_AUTOMATICCONFIG) + + +class Config(object): + def __init__(self): + self._options = {} + + def add_option(self, name, optionobj): + self._options[name] = optionobj + + def prop_get(obj): + return obj._options[name].getValue() + + def prop_set(obj, val): + obj._options[name].set(libdnf.conf.Option.Priority_RUNTIME, val) + + setattr(type(self), name, property(prop_get, prop_set)) + + def populate(self, parser, section, filename, priority): + """Set option values from an INI file section.""" + if parser.hasSection(section): + for name in parser.options(section): + value = parser.getValue(section, name) + if not value or value == 'None': + value = '' + opt = self._options.get(name, None) + if opt: + try: + opt.set(priority, value) + except RuntimeError as e: + logger.debug(_('Unknown configuration value: %s=%s in %s; %s'), + ucd(name), ucd(value), ucd(filename), str(e)) + else: + logger.debug( + _('Unknown configuration option: %s = %s in %s'), + ucd(name), ucd(value), ucd(filename)) + + +class CommandsConfig(Config): + def __init__(self): + super(CommandsConfig, self).__init__() + self.add_option('apply_updates', libdnf.conf.OptionBool(False)) + self.add_option('base_config_file', libdnf.conf.OptionString('/etc/dnf/dnf.conf')) + self.add_option('download_updates', libdnf.conf.OptionBool(False)) + self.add_option('upgrade_type', libdnf.conf.OptionEnumString('default', + libdnf.conf.VectorString(['default', 'security']))) + self.add_option('random_sleep', libdnf.conf.OptionNumberInt32(300)) + + def imply(self): + if self.apply_updates: + self.download_updates = True + + +class EmailConfig(Config): + def __init__(self): + super(EmailConfig, self).__init__() + self.add_option('email_to', + libdnf.conf.OptionStringList(libdnf.conf.VectorString(["root"]))) + self.add_option('email_from', libdnf.conf.OptionString("root")) + self.add_option('email_host', libdnf.conf.OptionString("localhost")) + self.add_option('email_port', libdnf.conf.OptionNumberInt32(25)) + + +class CommandConfig(Config): + _default_command_format = "cat" + _default_stdin_format = "{body}" + + def __init__(self): + super(CommandConfig, self).__init__() + self.add_option('command_format', + libdnf.conf.OptionString(self._default_command_format)) + self.add_option('stdin_format', + libdnf.conf.OptionString(self._default_stdin_format)) + + +class CommandEmailConfig(CommandConfig): + _default_command_format = "mail -s {subject} -r {email_from} {email_to}" + + def __init__(self): + super(CommandEmailConfig, self).__init__() + self.add_option('email_to', + libdnf.conf.OptionStringList(libdnf.conf.VectorString(["root"]))) + self.add_option('email_from', libdnf.conf.OptionString("root")) + + +class EmittersConfig(Config): + def __init__(self): + super(EmittersConfig, self).__init__() + self.add_option('emit_via', libdnf.conf.OptionStringList( + libdnf.conf.VectorString(['email', 'stdio']))) + self.add_option('output_width', libdnf.conf.OptionNumberInt32(80)) + self.add_option('system_name', libdnf.conf.OptionString(socket.gethostname())) + + +def main(args): + (opts, parser) = parse_arguments(args) + + try: + conf = AutomaticConfig(opts.conf_path, opts.downloadupdates, + opts.installupdates) + with dnf.Base() as base: + cli = dnf.cli.Cli(base) + cli._read_conf_file() + # Although dnf-automatic does not use demands, the versionlock + # plugin uses this demand do decide whether it's rules should + # be applied. + # https://bugzilla.redhat.com/show_bug.cgi?id=1746562 + cli.demands.resolving = True + conf.update_baseconf(base.conf) + base.init_plugins(cli=cli) + logger.debug(_('Started dnf-automatic.')) + + if opts.timer: + sleeper = random.randint(0, conf.commands.random_sleep) + logger.debug(_('Sleep for %s seconds'), sleeper) + time.sleep(sleeper) + + base.pre_configure_plugins() + base.read_all_repos() + base.configure_plugins() + base.fill_sack() + upgrade(base, conf.commands.upgrade_type) + base.resolve() + output = dnf.cli.output.Output(base, base.conf) + trans = base.transaction + if not trans: + return 0 + + lst = output.list_transaction(trans, total_width=80) + emitters = build_emitters(conf) + emitters.notify_available(lst) + if not conf.commands.download_updates: + emitters.commit() + return 0 + + base.download_packages(trans.install_set) + emitters.notify_downloaded() + if not conf.commands.apply_updates: + emitters.commit() + return 0 + + base.do_transaction() + emitters.notify_applied() + emitters.commit() + except dnf.exceptions.Error as exc: + logger.error(_('Error: %s'), ucd(exc)) + return 1 + return 0 + + +def upgrade(base, upgrade_type): + if upgrade_type == 'security': + base._update_security_filters.append(base.sack.query().upgrades().filterm( + advisory_type='security')) + base.upgrade_all() + elif upgrade_type == 'default': + base.upgrade_all() + else: + raise dnf.exceptions.Error( + 'Unsupported upgrade_type "{}", only "default" and "security" supported'.format( + upgrade_type)) diff --git a/dnf/base.py b/dnf/base.py new file mode 100644 index 0000000..8091ca0 --- /dev/null +++ b/dnf/base.py @@ -0,0 +1,2537 @@ +# Copyright 2005 Duke University +# Copyright (C) 2012-2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +Supplies the Base class. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import argparse +import dnf +import libdnf.transaction + +from dnf.comps import CompsQuery +from dnf.i18n import _, P_, ucd +from dnf.util import _parse_specs +from dnf.db.history import SwdbInterface +from dnf.yum import misc +from functools import reduce +try: + from collections.abc import Sequence +except ImportError: + from collections import Sequence +import datetime +import dnf.callback +import dnf.comps +import dnf.conf +import dnf.conf.read +import dnf.crypto +import dnf.dnssec +import dnf.drpm +import dnf.exceptions +import dnf.goal +import dnf.history +import dnf.lock +import dnf.logging +# WITH_MODULES is used by ansible (lib/ansible/modules/packaging/os/dnf.py) +try: + import dnf.module.module_base + WITH_MODULES = True +except ImportError: + WITH_MODULES = False +import dnf.persistor +import dnf.plugin +import dnf.query +import dnf.repo +import dnf.repodict +import dnf.rpm.connection +import dnf.rpm.miscutils +import dnf.rpm.transaction +import dnf.sack +import dnf.selector +import dnf.subject +import dnf.transaction +import dnf.util +import dnf.yum.rpmtrans +import functools +import hawkey +import itertools +import logging +import math +import os +import operator +import re +import rpm +import time +import shutil + + +logger = logging.getLogger("dnf") + + +class Base(object): + + def __init__(self, conf=None): + # :api + self._closed = False + self._conf = conf or self._setup_default_conf() + self._goal = None + self._repo_persistor = None + self._sack = None + self._transaction = None + self._priv_ts = None + self._comps = None + self._comps_trans = dnf.comps.TransactionBunch() + self._history = None + self._tempfiles = set() + self._trans_tempfiles = set() + self._ds_callback = dnf.callback.Depsolve() + self._logging = dnf.logging.Logging() + self._repos = dnf.repodict.RepoDict() + self._rpm_probfilter = set([rpm.RPMPROB_FILTER_OLDPACKAGE]) + self._plugins = dnf.plugin.Plugins() + self._trans_success = False + self._trans_install_set = False + self._tempfile_persistor = None + self._update_security_filters = [] + self._allow_erasing = False + self._repo_set_imported_gpg_keys = set() + self.output = None + + def __enter__(self): + return self + + def __exit__(self, *exc_args): + self.close() + + def __del__(self): + self.close() + + def _add_tempfiles(self, files): + if self._transaction: + self._trans_tempfiles.update(files) + elif self.conf.destdir: + pass + else: + self._tempfiles.update(files) + + def _add_repo_to_sack(self, repo): + repo.load() + mdload_flags = dict(load_filelists=True, + load_presto=repo.deltarpm, + load_updateinfo=True) + if repo.load_metadata_other: + mdload_flags["load_other"] = True + try: + self._sack.load_repo(repo._repo, build_cache=True, **mdload_flags) + except hawkey.Exception as e: + logger.debug(_("loading repo '{}' failure: {}").format(repo.id, e)) + raise dnf.exceptions.RepoError( + _("Loading repository '{}' has failed").format(repo.id)) + + @staticmethod + def _setup_default_conf(): + conf = dnf.conf.Conf() + subst = conf.substitutions + if 'releasever' not in subst: + subst['releasever'] = \ + dnf.rpm.detect_releasever(conf.installroot) + return conf + + def _setup_modular_excludes(self): + hot_fix_repos = [i.id for i in self.repos.iter_enabled() if i.module_hotfixes] + try: + solver_errors = self.sack.filter_modules( + self._moduleContainer, hot_fix_repos, self.conf.installroot, + self.conf.module_platform_id, update_only=False, debugsolver=self.conf.debug_solver) + except hawkey.Exception as e: + raise dnf.exceptions.Error(ucd(e)) + if solver_errors: + logger.warning( + dnf.module.module_base.format_modular_solver_errors(solver_errors[0])) + + def _setup_excludes_includes(self, only_main=False): + disabled = set(self.conf.disable_excludes) + if 'all' in disabled and WITH_MODULES: + self._setup_modular_excludes() + return + repo_includes = [] + repo_excludes = [] + # first evaluate repo specific includes/excludes + if not only_main: + for r in self.repos.iter_enabled(): + if r.id in disabled: + continue + if len(r.includepkgs) > 0: + incl_query = self.sack.query().filterm(empty=True) + for incl in set(r.includepkgs): + subj = dnf.subject.Subject(incl) + incl_query = incl_query.union(subj.get_best_query( + self.sack, with_nevra=True, with_provides=False, with_filenames=False)) + incl_query.filterm(reponame=r.id) + repo_includes.append((incl_query.apply(), r.id)) + excl_query = self.sack.query().filterm(empty=True) + for excl in set(r.excludepkgs): + subj = dnf.subject.Subject(excl) + excl_query = excl_query.union(subj.get_best_query( + self.sack, with_nevra=True, with_provides=False, with_filenames=False)) + excl_query.filterm(reponame=r.id) + if excl_query: + repo_excludes.append((excl_query, r.id)) + + # then main (global) includes/excludes because they can mask + # repo specific settings + if 'main' not in disabled: + include_query = self.sack.query().filterm(empty=True) + if len(self.conf.includepkgs) > 0: + for incl in set(self.conf.includepkgs): + subj = dnf.subject.Subject(incl) + include_query = include_query.union(subj.get_best_query( + self.sack, with_nevra=True, with_provides=False, with_filenames=False)) + exclude_query = self.sack.query().filterm(empty=True) + for excl in set(self.conf.excludepkgs): + subj = dnf.subject.Subject(excl) + exclude_query = exclude_query.union(subj.get_best_query( + self.sack, with_nevra=True, with_provides=False, with_filenames=False)) + if len(self.conf.includepkgs) > 0: + self.sack.add_includes(include_query) + self.sack.set_use_includes(True) + if exclude_query: + self.sack.add_excludes(exclude_query) + + if repo_includes: + for query, repoid in repo_includes: + self.sack.add_includes(query) + self.sack.set_use_includes(True, repoid) + + if repo_excludes: + for query, repoid in repo_excludes: + self.sack.add_excludes(query) + + if not only_main and WITH_MODULES: + self._setup_modular_excludes() + + def _store_persistent_data(self): + if self._repo_persistor and not self.conf.cacheonly: + expired = [r.id for r in self.repos.iter_enabled() + if (r.metadata and r._repo.isExpired())] + self._repo_persistor.expired_to_add.update(expired) + self._repo_persistor.save() + + if self._tempfile_persistor: + self._tempfile_persistor.save() + + @property + def comps(self): + # :api + return self._comps + + @property + def conf(self): + # :api + return self._conf + + @property + def repos(self): + # :api + return self._repos + + @repos.deleter + def repos(self): + # :api + self._repos = None + + @property + @dnf.util.lazyattr("_priv_rpmconn") + def _rpmconn(self): + return dnf.rpm.connection.RpmConnection(self.conf.installroot) + + @property + def sack(self): + # :api + return self._sack + + @property + def _moduleContainer(self): + if self.sack is None: + raise dnf.exceptions.Error("Sack was not initialized") + if self.sack._moduleContainer is None: + self.sack._moduleContainer = libdnf.module.ModulePackageContainer( + False, self.conf.installroot, self.conf.substitutions["arch"], self.conf.persistdir) + return self.sack._moduleContainer + + @property + def transaction(self): + # :api + return self._transaction + + @transaction.setter + def transaction(self, value): + # :api + if self._transaction: + raise ValueError('transaction already set') + self._transaction = value + + def _activate_persistor(self): + self._repo_persistor = dnf.persistor.RepoPersistor(self.conf.cachedir) + + def init_plugins(self, disabled_glob=(), enable_plugins=(), cli=None): + # :api + """Load plugins and run their __init__().""" + if self.conf.plugins: + self._plugins._load(self.conf, disabled_glob, enable_plugins) + self._plugins._run_init(self, cli) + + def pre_configure_plugins(self): + # :api + """Run plugins pre_configure() method.""" + self._plugins._run_pre_config() + + def configure_plugins(self): + # :api + """Run plugins configure() method.""" + self._plugins._run_config() + + def update_cache(self, timer=False): + # :api + + period = self.conf.metadata_timer_sync + persistor = self._repo_persistor + if timer: + if dnf.util.on_metered_connection(): + msg = _('Metadata timer caching disabled ' + 'when running on metered connection.') + logger.info(msg) + return False + if dnf.util.on_ac_power() is False: + msg = _('Metadata timer caching disabled ' + 'when running on a battery.') + logger.info(msg) + return False + if period <= 0: + msg = _('Metadata timer caching disabled.') + logger.info(msg) + return False + since_last_makecache = persistor.since_last_makecache() + if since_last_makecache is not None and since_last_makecache < period: + logger.info(_('Metadata cache refreshed recently.')) + return False + for repo in self.repos.values(): + repo._repo.setMaxMirrorTries(1) + + if not self.repos._any_enabled(): + logger.info(_('There are no enabled repositories in "{}".').format( + '", "'.join(self.conf.reposdir))) + return False + + for r in self.repos.iter_enabled(): + (is_cache, expires_in) = r._metadata_expire_in() + if expires_in is None: + logger.info(_('%s: will never be expired and will not be refreshed.'), r.id) + elif not is_cache or expires_in <= 0: + logger.debug(_('%s: has expired and will be refreshed.'), r.id) + r._repo.expire() + elif timer and expires_in < period: + # expires within the checking period: + msg = _("%s: metadata will expire after %d seconds and will be refreshed now") + logger.debug(msg, r.id, expires_in) + r._repo.expire() + else: + logger.debug(_('%s: will expire after %d seconds.'), r.id, + expires_in) + + if timer: + persistor.reset_last_makecache = True + self.fill_sack(load_system_repo=False, load_available_repos=True) # performs the md sync + logger.info(_('Metadata cache created.')) + return True + + def fill_sack(self, load_system_repo=True, load_available_repos=True): + # :api + """Prepare the Sack and the Goal objects. """ + timer = dnf.logging.Timer('sack setup') + self.reset(sack=True, goal=True) + self._sack = dnf.sack._build_sack(self) + lock = dnf.lock.build_metadata_lock(self.conf.cachedir, self.conf.exit_on_lock) + with lock: + if load_system_repo is not False: + try: + # FIXME: If build_cache=True, @System.solv is incorrectly updated in install- + # remove loops + self._sack.load_system_repo(build_cache=False) + except IOError: + if load_system_repo != 'auto': + raise + if load_available_repos: + error_repos = [] + mts = 0 + age = time.time() + # Iterate over installed GPG keys and check their validity using DNSSEC + if self.conf.gpgkey_dns_verification: + dnf.dnssec.RpmImportedKeys.check_imported_keys_validity() + for r in self.repos.iter_enabled(): + try: + self._add_repo_to_sack(r) + if r._repo.getTimestamp() > mts: + mts = r._repo.getTimestamp() + if r._repo.getAge() < age: + age = r._repo.getAge() + logger.debug(_("%s: using metadata from %s."), r.id, + dnf.util.normalize_time( + r._repo.getMaxTimestamp())) + except dnf.exceptions.RepoError as e: + r._repo.expire() + if r.skip_if_unavailable is False: + raise + logger.warning("Error: %s", e) + error_repos.append(r.id) + r.disable() + if error_repos: + logger.warning( + _("Ignoring repositories: %s"), ', '.join(error_repos)) + if self.repos._any_enabled(): + if age != 0 and mts != 0: + logger.info(_("Last metadata expiration check: %s ago on %s."), + datetime.timedelta(seconds=int(age)), + dnf.util.normalize_time(mts)) + else: + self.repos.all().disable() + conf = self.conf + self._sack._configure(conf.installonlypkgs, conf.installonly_limit) + self._setup_excludes_includes() + timer() + self._goal = dnf.goal.Goal(self._sack) + self._plugins.run_sack() + return self._sack + + def _finalize_base(self): + self._tempfile_persistor = dnf.persistor.TempfilePersistor( + self.conf.cachedir) + + if not self.conf.keepcache: + self._clean_packages(self._tempfiles) + if self._trans_success: + self._trans_tempfiles.update( + self._tempfile_persistor.get_saved_tempfiles()) + self._tempfile_persistor.empty() + if self._trans_install_set: + self._clean_packages(self._trans_tempfiles) + else: + self._tempfile_persistor.tempfiles_to_add.update( + self._trans_tempfiles) + + if self._tempfile_persistor.tempfiles_to_add: + logger.info(_("The downloaded packages were saved in cache " + "until the next successful transaction.")) + logger.info(_("You can remove cached packages by executing " + "'%s'."), "{prog} clean packages".format(prog=dnf.util.MAIN_PROG)) + + # Do not trigger the lazy creation: + if self._history is not None: + self.history.close() + self._store_persistent_data() + self._closeRpmDB() + self._trans_success = False + + def close(self): + # :api + """Close all potential handles and clean cache. + + Typically the handles are to data sources and sinks. + + """ + + if self._closed: + return + logger.log(dnf.logging.DDEBUG, 'Cleaning up.') + self._closed = True + self._finalize_base() + self.reset(sack=True, repos=True, goal=True) + + def read_all_repos(self, opts=None): + # :api + """Read repositories from the main conf file and from .repo files.""" + + reader = dnf.conf.read.RepoReader(self.conf, opts) + for repo in reader: + try: + self.repos.add(repo) + except dnf.exceptions.ConfigError as e: + logger.warning(e) + + def reset(self, sack=False, repos=False, goal=False): + # :api + """Make the Base object forget about various things.""" + if sack: + self._sack = None + if repos: + self._repos = dnf.repodict.RepoDict() + if goal: + self._goal = None + if self._sack is not None: + self._goal = dnf.goal.Goal(self._sack) + if self._sack and self._moduleContainer: + # sack must be set to enable operations on moduleContainer + self._moduleContainer.rollback() + if self._history is not None: + self.history.close() + self._comps_trans = dnf.comps.TransactionBunch() + self._transaction = None + + def _closeRpmDB(self): + """Closes down the instances of rpmdb that could be open.""" + del self._ts + + _TS_FLAGS_TO_RPM = {'noscripts': rpm.RPMTRANS_FLAG_NOSCRIPTS, + 'notriggers': rpm.RPMTRANS_FLAG_NOTRIGGERS, + 'nodocs': rpm.RPMTRANS_FLAG_NODOCS, + 'test': rpm.RPMTRANS_FLAG_TEST, + 'justdb': rpm.RPMTRANS_FLAG_JUSTDB, + 'nocontexts': rpm.RPMTRANS_FLAG_NOCONTEXTS, + 'nocrypto': rpm.RPMTRANS_FLAG_NOFILEDIGEST} + if hasattr(rpm, 'RPMTRANS_FLAG_NOCAPS'): + # Introduced in rpm-4.14 + _TS_FLAGS_TO_RPM['nocaps'] = rpm.RPMTRANS_FLAG_NOCAPS + + _TS_VSFLAGS_TO_RPM = {'nocrypto': rpm._RPMVSF_NOSIGNATURES | + rpm._RPMVSF_NODIGESTS} + + @property + def goal(self): + return self._goal + + @property + def _ts(self): + """Set up the RPM transaction set that will be used + for all the work.""" + if self._priv_ts is not None: + return self._priv_ts + self._priv_ts = dnf.rpm.transaction.TransactionWrapper( + self.conf.installroot) + self._priv_ts.setFlags(0) # reset everything. + for flag in self.conf.tsflags: + rpm_flag = self._TS_FLAGS_TO_RPM.get(flag) + if rpm_flag is None: + logger.critical(_('Invalid tsflag in config file: %s'), flag) + continue + self._priv_ts.addTsFlag(rpm_flag) + vs_flag = self._TS_VSFLAGS_TO_RPM.get(flag) + if vs_flag is not None: + self._priv_ts.pushVSFlags(vs_flag) + + if not self.conf.diskspacecheck: + self._rpm_probfilter.add(rpm.RPMPROB_FILTER_DISKSPACE) + + if self.conf.ignorearch: + self._rpm_probfilter.add(rpm.RPMPROB_FILTER_IGNOREARCH) + + probfilter = reduce(operator.or_, self._rpm_probfilter, 0) + self._priv_ts.setProbFilter(probfilter) + return self._priv_ts + + @_ts.deleter + def _ts(self): + """Releases the RPM transaction set. """ + if self._priv_ts is None: + return + self._priv_ts.close() + del self._priv_ts + self._priv_ts = None + + def read_comps(self, arch_filter=False): + # :api + """Create the groups object to access the comps metadata.""" + timer = dnf.logging.Timer('loading comps') + self._comps = dnf.comps.Comps() + + logger.log(dnf.logging.DDEBUG, 'Getting group metadata') + for repo in self.repos.iter_enabled(): + if not repo.enablegroups: + continue + if not repo.metadata: + continue + comps_fn = repo._repo.getCompsFn() + if not comps_fn: + continue + + logger.log(dnf.logging.DDEBUG, + 'Adding group file from repository: %s', repo.id) + if repo._repo.getSyncStrategy() == dnf.repo.SYNC_ONLY_CACHE: + decompressed = misc.calculate_repo_gen_dest(comps_fn, + 'groups.xml') + if not os.path.exists(decompressed): + # root privileges are needed for comps decompression + continue + else: + decompressed = misc.repo_gen_decompress(comps_fn, 'groups.xml') + + try: + self._comps._add_from_xml_filename(decompressed) + except dnf.exceptions.CompsError as e: + msg = _('Failed to add groups file for repository: %s - %s') + logger.critical(msg, repo.id, e) + + if arch_filter: + self._comps._i.arch_filter( + [self._conf.substitutions['basearch']]) + timer() + return self._comps + + def _getHistory(self): + """auto create the history object that to access/append the transaction + history information. """ + if self._history is None: + releasever = self.conf.releasever + self._history = SwdbInterface(self.conf.persistdir, releasever=releasever) + return self._history + + history = property(fget=lambda self: self._getHistory(), + fset=lambda self, value: setattr( + self, "_history", value), + fdel=lambda self: setattr(self, "_history", None), + doc="DNF SWDB Interface Object") + + def _goal2transaction(self, goal): + ts = self.history.rpm + all_obsoleted = set(goal.list_obsoleted()) + installonly_query = self._get_installonly_query() + + for pkg in goal.list_downgrades(): + obs = goal.obsoleted_by_package(pkg) + downgraded = obs[0] + self._ds_callback.pkg_added(downgraded, 'dd') + self._ds_callback.pkg_added(pkg, 'd') + ts.add_downgrade(pkg, downgraded, obs[1:]) + for pkg in goal.list_reinstalls(): + self._ds_callback.pkg_added(pkg, 'r') + obs = goal.obsoleted_by_package(pkg) + nevra_pkg = str(pkg) + # reinstall could obsolete multiple packages with the same NEVRA or different NEVRA + # Set the package with the same NEVRA as reinstalled + obsoletes = [] + for obs_pkg in obs: + if str(obs_pkg) == nevra_pkg: + obsoletes.insert(0, obs_pkg) + else: + obsoletes.append(obs_pkg) + reinstalled = obsoletes[0] + ts.add_reinstall(pkg, reinstalled, obsoletes[1:]) + for pkg in goal.list_installs(): + self._ds_callback.pkg_added(pkg, 'i') + obs = goal.obsoleted_by_package(pkg) + # Skip obsoleted packages that are not part of all_obsoleted, + # they are handled as upgrades/downgrades. + # Also keep RPMs with the same name - they're not always in all_obsoleted. + obs = [i for i in obs if i in all_obsoleted or i.name == pkg.name] + + reason = goal.get_reason(pkg) + + if pkg in installonly_query: + reason_installonly = ts.get_reason(pkg) + if libdnf.transaction.TransactionItemReasonCompare( + reason, reason_installonly) == -1: + reason = reason_installonly + + # inherit the best reason from obsoleted packages + for obsolete in obs: + reason_obsolete = ts.get_reason(obsolete) + if libdnf.transaction.TransactionItemReasonCompare(reason, reason_obsolete) == -1: + reason = reason_obsolete + + ts.add_install(pkg, obs, reason) + cb = lambda pkg: self._ds_callback.pkg_added(pkg, 'od') + dnf.util.mapall(cb, obs) + for pkg in goal.list_upgrades(): + obs = goal.obsoleted_by_package(pkg) + upgraded = None + for i in obs: + # try to find a package with matching name as the upgrade + if i.name == pkg.name: + upgraded = i + break + if upgraded is None: + # no matching name -> pick the first one + upgraded = obs.pop(0) + else: + obs.remove(upgraded) + # Skip obsoleted packages that are not part of all_obsoleted, + # they are handled as upgrades/downgrades. + # Also keep RPMs with the same name - they're not always in all_obsoleted. + obs = [i for i in obs if i in all_obsoleted or i.name == pkg.name] + + cb = lambda pkg: self._ds_callback.pkg_added(pkg, 'od') + dnf.util.mapall(cb, obs) + if pkg in installonly_query: + ts.add_install(pkg, obs) + else: + ts.add_upgrade(pkg, upgraded, obs) + self._ds_callback.pkg_added(upgraded, 'ud') + self._ds_callback.pkg_added(pkg, 'u') + for pkg in goal.list_erasures(): + self._ds_callback.pkg_added(pkg, 'e') + reason = goal.get_reason(pkg) + ts.add_erase(pkg, reason) + return ts + + def _query_matches_installed(self, q): + """ See what packages in the query match packages (also in older + versions, but always same architecture) that are already installed. + + Unlike in case of _sltr_matches_installed(), it is practical here + to know even the packages in the original query that can still be + installed. + """ + inst = q.installed() + inst_per_arch = inst._na_dict() + avail_per_arch = q.available()._na_dict() + avail_l = [] + inst_l = [] + for na in avail_per_arch: + if na in inst_per_arch: + inst_l.append(inst_per_arch[na][0]) + else: + avail_l.append(avail_per_arch[na]) + return inst_l, avail_l + + def _sltr_matches_installed(self, sltr): + """ See if sltr matches a patches that is (in older version or different + architecture perhaps) already installed. + """ + inst = self.sack.query().installed().filterm(pkg=sltr.matches()) + return list(inst) + + def iter_userinstalled(self): + """Get iterator over the packages installed by the user.""" + return (pkg for pkg in self.sack.query().installed() + if self.history.user_installed(pkg)) + + def _run_hawkey_goal(self, goal, allow_erasing): + ret = goal.run( + allow_uninstall=allow_erasing, force_best=self.conf.best, + ignore_weak_deps=(not self.conf.install_weak_deps)) + if self.conf.debug_solver: + goal.write_debugdata('./debugdata/rpms') + return ret + + def resolve(self, allow_erasing=False): + # :api + """Build the transaction set.""" + exc = None + self._finalize_comps_trans() + + timer = dnf.logging.Timer('depsolve') + self._ds_callback.start() + goal = self._goal + if goal.req_has_erase(): + goal.push_userinstalled(self.sack.query().installed(), + self.history) + elif not self.conf.upgrade_group_objects_upgrade: + # exclude packages installed from groups + # these packages will be marked to installation + # which could prevent them from upgrade, downgrade + # to prevent "conflicting job" error it's not applied + # to "remove" and "reinstall" commands + + solver = self._build_comps_solver() + solver._exclude_packages_from_installed_groups(self) + + goal.add_protected(self.sack.query().filterm( + name=self.conf.protected_packages)) + if not self._run_hawkey_goal(goal, allow_erasing): + if self.conf.debuglevel >= 6: + goal.log_decisions() + msg = dnf.util._format_resolve_problems(goal.problem_rules()) + exc = dnf.exceptions.DepsolveError(msg) + else: + self._transaction = self._goal2transaction(goal) + + self._ds_callback.end() + timer() + + got_transaction = self._transaction is not None and \ + len(self._transaction) > 0 + if got_transaction: + msg = self._transaction._rpm_limitations() + if msg: + exc = dnf.exceptions.Error(msg) + + if exc is not None: + raise exc + + self._plugins.run_resolved() + + # auto-enable module streams based on installed RPMs + new_pkgs = self._goal.list_installs() + new_pkgs += self._goal.list_upgrades() + new_pkgs += self._goal.list_downgrades() + new_pkgs += self._goal.list_reinstalls() + self.sack.set_modules_enabled_by_pkgset(self._moduleContainer, new_pkgs) + + return got_transaction + + def do_transaction(self, display=()): + # :api + if not isinstance(display, Sequence): + display = [display] + display = \ + [dnf.yum.rpmtrans.LoggingTransactionDisplay()] + list(display) + + if not self.transaction: + # packages are not changed, but comps and modules changes need to be committed + self._moduleContainer.save() + self._moduleContainer.updateFailSafeData() + if self._history and (self._history.group or self._history.env): + cmdline = None + if hasattr(self, 'args') and self.args: + cmdline = ' '.join(self.args) + elif hasattr(self, 'cmds') and self.cmds: + cmdline = ' '.join(self.cmds) + old = self.history.last() + if old is None: + rpmdb_version = self.sack._rpmdb_version() + else: + rpmdb_version = old.end_rpmdb_version + + self.history.beg(rpmdb_version, [], [], cmdline) + self.history.end(rpmdb_version) + self._plugins.run_pre_transaction() + self._plugins.run_transaction() + self._trans_success = True + return + + tid = None + logger.info(_('Running transaction check')) + lock = dnf.lock.build_rpmdb_lock(self.conf.persistdir, + self.conf.exit_on_lock) + with lock: + self.transaction._populate_rpm_ts(self._ts) + + msgs = self._run_rpm_check() + if msgs: + msg = _('Error: transaction check vs depsolve:') + logger.error(msg) + for msg in msgs: + logger.error(msg) + raise dnf.exceptions.TransactionCheckError(msg) + + logger.info(_('Transaction check succeeded.')) + + timer = dnf.logging.Timer('transaction test') + logger.info(_('Running transaction test')) + + self._ts.order() # order the transaction + self._ts.clean() # release memory not needed beyond this point + + testcb = dnf.yum.rpmtrans.RPMTransaction(self, test=True) + tserrors = self._ts.test(testcb) + + if len(tserrors) > 0: + for msg in testcb.messages(): + logger.critical(_('RPM: {}').format(msg)) + errstring = _('Transaction test error:') + '\n' + for descr in tserrors: + errstring += ' %s\n' % ucd(descr) + + summary = self._trans_error_summary(errstring) + if summary: + errstring += '\n' + summary + + raise dnf.exceptions.Error(errstring) + del testcb + + logger.info(_('Transaction test succeeded.')) + timer() + + # save module states on disk right before entering rpm transaction, + # because we want system in recoverable state if transaction gets interrupted + self._moduleContainer.save() + self._moduleContainer.updateFailSafeData() + + # unset the sigquit handler + timer = dnf.logging.Timer('transaction') + # setup our rpm ts callback + cb = dnf.yum.rpmtrans.RPMTransaction(self, displays=display) + if self.conf.debuglevel < 2: + for display_ in cb.displays: + display_.output = False + + self._plugins.run_pre_transaction() + + logger.info(_('Running transaction')) + tid = self._run_transaction(cb=cb) + timer() + self._plugins.unload_removed_plugins(self.transaction) + self._plugins.run_transaction() + + return tid + + def _trans_error_summary(self, errstring): + """Parse the error string for 'interesting' errors which can + be grouped, such as disk space issues. + + :param errstring: the error string + :return: a string containing a summary of the errors + """ + summary = '' + # do disk space report first + p = re.compile(r'needs (\d+)(K|M)B(?: more space)? on the (\S+) filesystem') + disk = {} + for m in p.finditer(errstring): + size_in_mb = int(m.group(1)) if m.group(2) == 'M' else math.ceil( + int(m.group(1)) / 1024.0) + if m.group(3) not in disk: + disk[m.group(3)] = size_in_mb + if disk[m.group(3)] < size_in_mb: + disk[m.group(3)] = size_in_mb + + if disk: + summary += _('Disk Requirements:') + "\n" + for k in disk: + summary += " " + P_( + 'At least {0}MB more space needed on the {1} filesystem.', + 'At least {0}MB more space needed on the {1} filesystem.', + disk[k]).format(disk[k], k) + '\n' + + if not summary: + return None + + summary = _('Error Summary') + '\n-------------\n' + summary + + return summary + + def _record_history(self): + return self.conf.history_record and \ + not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST) + + def _run_transaction(self, cb): + """ + Perform the RPM transaction. + + :return: history database transaction ID or None + """ + + tid = None + if self._record_history(): + using_pkgs_pats = list(self.conf.history_record_packages) + installed_query = self.sack.query().installed() + using_pkgs = installed_query.filter(name=using_pkgs_pats).run() + rpmdbv = self.sack._rpmdb_version() + lastdbv = self.history.last() + if lastdbv is not None: + lastdbv = lastdbv.end_rpmdb_version + + if lastdbv is None or rpmdbv != lastdbv: + logger.debug(_("RPMDB altered outside of {prog}.").format( + prog=dnf.util.MAIN_PROG_UPPER)) + + cmdline = None + if hasattr(self, 'args') and self.args: + cmdline = ' '.join(self.args) + elif hasattr(self, 'cmds') and self.cmds: + cmdline = ' '.join(self.cmds) + + tid = self.history.beg(rpmdbv, using_pkgs, [], cmdline) + + if self.conf.comment: + # write out user provided comment to history info + # TODO: + # self._store_comment_in_history(tid, self.conf.comment) + pass + + if self.conf.reset_nice: + onice = os.nice(0) + if onice: + try: + os.nice(-onice) + except: + onice = 0 + + logger.log(dnf.logging.DDEBUG, 'RPM transaction start.') + errors = self._ts.run(cb.callback, '') + logger.log(dnf.logging.DDEBUG, 'RPM transaction over.') + # ts.run() exit codes are, hmm, "creative": None means all ok, empty + # list means some errors happened in the transaction and non-empty + # list that there were errors preventing the ts from starting... + if self.conf.reset_nice: + try: + os.nice(onice) + except: + pass + dnf.util._sync_rpm_trans_with_swdb(self._ts, self._transaction) + + if errors is None: + pass + elif len(errors) == 0: + # If there is no failing element it means that some "global" error + # occurred (like rpm failed to obtain the transaction lock). Just pass + # the rpm logs on to the user and raise an Error. + # If there are failing elements the problem is related to those + # elements and the Error is raised later, after saving the failure + # to the history and printing out the transaction table to user. + failed = [el for el in self._ts if el.Failed()] + if not failed: + for msg in cb.messages(): + logger.critical(_('RPM: {}').format(msg)) + msg = _('Could not run transaction.') + raise dnf.exceptions.Error(msg) + else: + logger.critical(_("Transaction couldn't start:")) + for e in errors: + logger.critical(ucd(e[0])) + if self._record_history() and not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST): + self.history.end(rpmdbv) + msg = _("Could not run transaction.") + raise dnf.exceptions.Error(msg) + + for i in ('ts_all_fn', 'ts_done_fn'): + if hasattr(cb, i): + fn = getattr(cb, i) + try: + misc.unlink_f(fn) + except (IOError, OSError): + msg = _('Failed to remove transaction file %s') + logger.critical(msg, fn) + + # keep install_set status because _verify_transaction will clean it + self._trans_install_set = bool(self._transaction.install_set) + + # sync up what just happened versus what is in the rpmdb + if not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST): + self._verify_transaction(cb.verify_tsi_package) + + return tid + + def _verify_transaction(self, verify_pkg_cb=None): + transaction_items = [ + tsi for tsi in self.transaction + if tsi.action != libdnf.transaction.TransactionItemAction_REASON_CHANGE] + total = len(transaction_items) + + def display_banner(pkg, count): + count += 1 + if verify_pkg_cb is not None: + verify_pkg_cb(pkg, count, total) + return count + + timer = dnf.logging.Timer('verify transaction') + count = 0 + + rpmdb_sack = dnf.sack.rpmdb_sack(self) + + # mark group packages that are installed on the system as installed in the db + q = rpmdb_sack.query().installed() + names = set([i.name for i in q]) + for ti in self.history.group: + g = ti.getCompsGroupItem() + for p in g.getPackages(): + if p.getName() in names: + p.setInstalled(True) + p.save() + + # TODO: installed groups in environments + + # Post-transaction verification is no longer needed, + # because DNF trusts error codes returned by RPM. + # Verification banner is displayed to preserve UX. + # TODO: drop in future DNF + for tsi in transaction_items: + count = display_banner(tsi.pkg, count) + + rpmdbv = rpmdb_sack._rpmdb_version() + self.history.end(rpmdbv) + + timer() + self._trans_success = True + + def _download_remote_payloads(self, payloads, drpm, progress, callback_total): + lock = dnf.lock.build_download_lock(self.conf.cachedir, self.conf.exit_on_lock) + with lock: + beg_download = time.time() + est_remote_size = sum(pload.download_size for pload in payloads) + total_drpm = len( + [payload for payload in payloads if isinstance(payload, dnf.drpm.DeltaPayload)]) + # compatibility part for tools that do not accept total_drpms keyword + if progress.start.__code__.co_argcount == 4: + progress.start(len(payloads), est_remote_size, total_drpms=total_drpm) + else: + progress.start(len(payloads), est_remote_size) + errors = dnf.repo._download_payloads(payloads, drpm) + + if errors._irrecoverable: + raise dnf.exceptions.DownloadError(errors._irrecoverable) + + remote_size = sum(errors._bandwidth_used(pload) + for pload in payloads) + saving = dnf.repo._update_saving((0, 0), payloads, + errors._recoverable) + + retries = self.conf.retries + forever = retries == 0 + while errors._recoverable and (forever or retries > 0): + if retries > 0: + retries -= 1 + + msg = _("Some packages were not downloaded. Retrying.") + logger.info(msg) + + remaining_pkgs = [pkg for pkg in errors._recoverable] + payloads = \ + [dnf.repo._pkg2payload(pkg, progress, dnf.repo.RPMPayload) + for pkg in remaining_pkgs] + est_remote_size = sum(pload.download_size + for pload in payloads) + progress.start(len(payloads), est_remote_size) + errors = dnf.repo._download_payloads(payloads, drpm) + + if errors._irrecoverable: + raise dnf.exceptions.DownloadError(errors._irrecoverable) + + remote_size += \ + sum(errors._bandwidth_used(pload) for pload in payloads) + saving = dnf.repo._update_saving(saving, payloads, {}) + + if errors._recoverable: + msg = dnf.exceptions.DownloadError.errmap2str( + errors._recoverable) + logger.info(msg) + + if callback_total is not None: + callback_total(remote_size, beg_download) + + (real, full) = saving + if real != full: + if real < full: + msg = _("Delta RPMs reduced %.1f MB of updates to %.1f MB " + "(%d.1%% saved)") + elif real > full: + msg = _("Failed Delta RPMs increased %.1f MB of updates to %.1f MB " + "(%d.1%% wasted)") + percent = 100 - real / full * 100 + logger.info(msg, full / 1024 ** 2, real / 1024 ** 2, percent) + + def download_packages(self, pkglist, progress=None, callback_total=None): + # :api + """Download the packages specified by the given list of packages. + + `pkglist` is a list of packages to download, `progress` is an optional + DownloadProgress instance, `callback_total` an optional callback to + output messages about the download operation. + + """ + remote_pkgs, local_pkgs = self._select_remote_pkgs(pkglist) + if remote_pkgs: + if progress is None: + progress = dnf.callback.NullDownloadProgress() + drpm = dnf.drpm.DeltaInfo(self.sack.query().installed(), + progress, self.conf.deltarpm_percentage) + self._add_tempfiles([pkg.localPkg() for pkg in remote_pkgs]) + payloads = [dnf.repo._pkg2payload(pkg, progress, drpm.delta_factory, + dnf.repo.RPMPayload) + for pkg in remote_pkgs] + self._download_remote_payloads(payloads, drpm, progress, callback_total) + + if self.conf.destdir: + for pkg in local_pkgs: + if pkg.baseurl: + location = os.path.join(pkg.baseurl.replace("file://", ""), + pkg.location.lstrip("/")) + else: + location = os.path.join(pkg.repo.pkgdir, pkg.location.lstrip("/")) + shutil.copy(location, self.conf.destdir) + + def add_remote_rpms(self, path_list, strict=True, progress=None): + # :api + pkgs = [] + if not path_list: + return pkgs + pkgs_error = [] + for path in path_list: + if not os.path.exists(path) and '://' in path: + # download remote rpm to a tempfile + path = dnf.util._urlopen_progress(path, self.conf, progress) + self._add_tempfiles([path]) + try: + pkgs.append(self.sack.add_cmdline_package(path)) + except IOError as e: + logger.warning(e) + pkgs_error.append(path) + self._setup_excludes_includes(only_main=True) + if pkgs_error and strict: + raise IOError(_("Could not open: {}").format(' '.join(pkgs_error))) + return pkgs + + def _sig_check_pkg(self, po): + """Verify the GPG signature of the given package object. + + :param po: the package object to verify the signature of + :return: (result, error_string) + where result is:: + + 0 = GPG signature verifies ok or verification is not required. + 1 = GPG verification failed but installation of the right GPG key + might help. + 2 = Fatal GPG verification error, give up. + """ + if po._from_cmdline: + check = self.conf.localpkg_gpgcheck + hasgpgkey = 0 + else: + repo = self.repos[po.repoid] + check = repo.gpgcheck + hasgpgkey = not not repo.gpgkey + + if check: + root = self.conf.installroot + ts = dnf.rpm.transaction.initReadOnlyTransaction(root) + sigresult = dnf.rpm.miscutils.checkSig(ts, po.localPkg()) + localfn = os.path.basename(po.localPkg()) + del ts + if sigresult == 0: + result = 0 + msg = '' + + elif sigresult == 1: + if hasgpgkey: + result = 1 + else: + result = 2 + msg = _('Public key for %s is not installed') % localfn + + elif sigresult == 2: + result = 2 + msg = _('Problem opening package %s') % localfn + + elif sigresult == 3: + if hasgpgkey: + result = 1 + else: + result = 2 + result = 1 + msg = _('Public key for %s is not trusted') % localfn + + elif sigresult == 4: + result = 2 + msg = _('Package %s is not signed') % localfn + + else: + result = 0 + msg = '' + + return result, msg + + def _clean_packages(self, packages): + for fn in packages: + if not os.path.exists(fn): + continue + try: + misc.unlink_f(fn) + except OSError: + logger.warning(_('Cannot remove %s'), fn) + continue + else: + logger.log(dnf.logging.DDEBUG, + _('%s removed'), fn) + + def _do_package_lists(self, pkgnarrow='all', patterns=None, showdups=None, + ignore_case=False, reponame=None): + """Return a :class:`misc.GenericHolder` containing + lists of package objects. The contents of the lists are + specified in various ways by the arguments. + + :param pkgnarrow: a string specifying which types of packages + lists to produces, such as updates, installed, available, + etc. + :param patterns: a list of names or wildcards specifying + packages to list + :param showdups: whether to include duplicate packages in the + lists + :param ignore_case: whether to ignore case when searching by + package names + :param reponame: limit packages list to the given repository + :return: a :class:`misc.GenericHolder` instance with the + following lists defined:: + + available = list of packageObjects + installed = list of packageObjects + upgrades = tuples of packageObjects (updating, installed) + extras = list of packageObjects + obsoletes = tuples of packageObjects (obsoleting, installed) + recent = list of packageObjects + """ + if showdups is None: + showdups = self.conf.showdupesfromrepos + if patterns is None: + return self._list_pattern( + pkgnarrow, patterns, showdups, ignore_case, reponame) + + assert not dnf.util.is_string_type(patterns) + list_fn = functools.partial( + self._list_pattern, pkgnarrow, showdups=showdups, + ignore_case=ignore_case, reponame=reponame) + if patterns is None or len(patterns) == 0: + return list_fn(None) + yghs = map(list_fn, patterns) + return reduce(lambda a, b: a.merge_lists(b), yghs) + + def _list_pattern(self, pkgnarrow, pattern, showdups, ignore_case, + reponame=None): + def is_from_repo(package): + """Test whether given package originates from the repository.""" + if reponame is None: + return True + return self.history.repo(package) == reponame + + def pkgs_from_repo(packages): + """Filter out the packages which do not originate from the repo.""" + return (package for package in packages if is_from_repo(package)) + + def query_for_repo(query): + """Filter out the packages which do not originate from the repo.""" + if reponame is None: + return query + return query.filter(reponame=reponame) + + ygh = misc.GenericHolder(iter=pkgnarrow) + + installed = [] + available = [] + reinstall_available = [] + old_available = [] + updates = [] + obsoletes = [] + obsoletesTuples = [] + recent = [] + extras = [] + autoremove = [] + + # do the initial pre-selection + ic = ignore_case + q = self.sack.query() + if pattern is not None: + subj = dnf.subject.Subject(pattern, ignore_case=ic) + q = subj.get_best_query(self.sack, with_provides=False) + + # list all packages - those installed and available: + if pkgnarrow == 'all': + dinst = {} + ndinst = {} # Newest versions by name.arch + for po in q.installed(): + dinst[po.pkgtup] = po + if showdups: + continue + key = (po.name, po.arch) + if key not in ndinst or po > ndinst[key]: + ndinst[key] = po + installed = list(pkgs_from_repo(dinst.values())) + + avail = query_for_repo(q) + if not showdups: + avail = avail.latest() + for pkg in avail: + if showdups: + if pkg.pkgtup in dinst: + reinstall_available.append(pkg) + else: + available.append(pkg) + else: + key = (pkg.name, pkg.arch) + if pkg.pkgtup in dinst: + reinstall_available.append(pkg) + elif key not in ndinst or pkg.evr_gt(ndinst[key]): + available.append(pkg) + else: + old_available.append(pkg) + + # produce the updates list of tuples + elif pkgnarrow == 'upgrades': + updates = query_for_repo(q).upgrades() + # reduce a query to security upgrades if they are specified + updates = self._merge_update_filters(updates) + # reduce a query to latest packages + updates = updates.latest().run() + + # installed only + elif pkgnarrow == 'installed': + installed = list(pkgs_from_repo(q.installed())) + + # available in a repository + elif pkgnarrow == 'available': + if showdups: + avail = query_for_repo(q).available() + installed_dict = q.installed()._na_dict() + for avail_pkg in avail: + key = (avail_pkg.name, avail_pkg.arch) + installed_pkgs = installed_dict.get(key, []) + same_ver = [pkg for pkg in installed_pkgs + if pkg.evr == avail_pkg.evr] + if len(same_ver) > 0: + reinstall_available.append(avail_pkg) + else: + available.append(avail_pkg) + else: + # we will only look at the latest versions of packages: + available_dict = query_for_repo( + q).available().latest()._na_dict() + installed_dict = q.installed().latest()._na_dict() + for (name, arch) in available_dict: + avail_pkg = available_dict[(name, arch)][0] + inst_pkg = installed_dict.get((name, arch), [None])[0] + if not inst_pkg or avail_pkg.evr_gt(inst_pkg): + available.append(avail_pkg) + elif avail_pkg.evr_eq(inst_pkg): + reinstall_available.append(avail_pkg) + else: + old_available.append(avail_pkg) + + # packages to be removed by autoremove + elif pkgnarrow == 'autoremove': + autoremove_q = query_for_repo(q)._unneeded(self.history.swdb) + autoremove = autoremove_q.run() + + # not in a repo but installed + elif pkgnarrow == 'extras': + extras = [pkg for pkg in q.extras() if is_from_repo(pkg)] + + # obsoleting packages (and what they obsolete) + elif pkgnarrow == 'obsoletes': + inst = q.installed() + obsoletes = query_for_repo( + self.sack.query()).filter(obsoletes=inst) + # reduce a query to security upgrades if they are specified + obsoletes = self._merge_update_filters(obsoletes, warning=False) + obsoletesTuples = [] + for new in obsoletes: + obsoleted_reldeps = new.obsoletes + obsoletesTuples.extend( + [(new, old) for old in + inst.filter(provides=obsoleted_reldeps)]) + + # packages recently added to the repositories + elif pkgnarrow == 'recent': + avail = q.available() + if not showdups: + avail = avail.latest() + recent = query_for_repo(avail)._recent(self.conf.recent) + + ygh.installed = installed + ygh.available = available + ygh.reinstall_available = reinstall_available + ygh.old_available = old_available + ygh.updates = updates + ygh.obsoletes = obsoletes + ygh.obsoletesTuples = obsoletesTuples + ygh.recent = recent + ygh.extras = extras + ygh.autoremove = autoremove + + return ygh + + def _add_comps_trans(self, trans): + self._comps_trans += trans + return len(trans) + + def _remove_if_unneeded(self, query): + """ + Mark to remove packages that are not required by any user installed package (reason group + or user) + :param query: dnf.query.Query() object + """ + query = query.installed() + if not query: + return + + unneeded_pkgs = query._safe_to_remove(self.history.swdb, debug_solver=False) + unneeded_pkgs_history = query.filter( + pkg=[i for i in query if self.history.group.is_removable_pkg(i.name)]) + pkg_with_dependent_pkgs = unneeded_pkgs_history.difference(unneeded_pkgs) + + # mark packages with dependent packages as a dependency to allow removal with dependent + # package + for pkg in pkg_with_dependent_pkgs: + self.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_DEPENDENCY) + unneeded_pkgs = unneeded_pkgs.intersection(unneeded_pkgs_history) + + remove_packages = query.intersection(unneeded_pkgs) + if remove_packages: + for pkg in remove_packages: + self._goal.erase(pkg, clean_deps=self.conf.clean_requirements_on_remove) + + def _finalize_comps_trans(self): + trans = self._comps_trans + basearch = self.conf.substitutions['basearch'] + + def trans_upgrade(query, remove_query, comps_pkg): + sltr = dnf.selector.Selector(self.sack) + sltr.set(pkg=query) + self._goal.upgrade(select=sltr) + return remove_query + + def trans_install(query, remove_query, comps_pkg, strict): + if self.conf.multilib_policy == "all": + if not comps_pkg.requires: + self._install_multiarch(query, strict=strict) + else: + # it installs only one arch for conditional packages + installed_query = query.installed().apply() + self._report_already_installed(installed_query) + sltr = dnf.selector.Selector(self.sack) + sltr.set(provides="({} if {})".format(comps_pkg.name, comps_pkg.requires)) + self._goal.install(select=sltr, optional=not strict) + + else: + sltr = dnf.selector.Selector(self.sack) + if comps_pkg.requires: + sltr.set(provides="({} if {})".format(comps_pkg.name, comps_pkg.requires)) + else: + if self.conf.obsoletes: + query = query.union(self.sack.query().filterm(obsoletes=query)) + sltr.set(pkg=query) + self._goal.install(select=sltr, optional=not strict) + return remove_query + + def trans_remove(query, remove_query, comps_pkg): + remove_query = remove_query.union(query) + return remove_query + + remove_query = self.sack.query().filterm(empty=True) + attr_fn = ((trans.install, functools.partial(trans_install, strict=True)), + (trans.install_opt, functools.partial(trans_install, strict=False)), + (trans.upgrade, trans_upgrade), + (trans.remove, trans_remove)) + + for (attr, fn) in attr_fn: + for comps_pkg in attr: + query_args = {'name': comps_pkg.name} + if (comps_pkg.basearchonly): + query_args.update({'arch': basearch}) + q = self.sack.query().filterm(**query_args).apply() + q.filterm(arch__neq="src") + if not q: + package_string = comps_pkg.name + if comps_pkg.basearchonly: + package_string += '.' + basearch + logger.warning(_('No match for group package "{}"').format(package_string)) + continue + remove_query = fn(q, remove_query, comps_pkg) + self._goal.group_members.add(comps_pkg.name) + + self._remove_if_unneeded(remove_query) + + def _build_comps_solver(self): + def reason_fn(pkgname): + q = self.sack.query().installed().filterm(name=pkgname) + if not q: + return None + try: + return self.history.rpm.get_reason(q[0]) + except AttributeError: + return libdnf.transaction.TransactionItemReason_UNKNOWN + + return dnf.comps.Solver(self.history, self._comps, reason_fn) + + def environment_install(self, env_id, types, exclude=None, strict=True, exclude_groups=None): + # :api + assert dnf.util.is_string_type(env_id) + solver = self._build_comps_solver() + types = self._translate_comps_pkg_types(types) + trans = dnf.comps.install_or_skip(solver._environment_install, + env_id, types, exclude or set(), + strict, exclude_groups) + if not trans: + return 0 + return self._add_comps_trans(trans) + + def environment_remove(self, env_id): + # :api + assert dnf.util.is_string_type(env_id) + solver = self._build_comps_solver() + trans = solver._environment_remove(env_id) + return self._add_comps_trans(trans) + + _COMPS_TRANSLATION = { + 'default': dnf.comps.DEFAULT, + 'mandatory': dnf.comps.MANDATORY, + 'optional': dnf.comps.OPTIONAL, + 'conditional': dnf.comps.CONDITIONAL + } + + @staticmethod + def _translate_comps_pkg_types(pkg_types): + ret = 0 + for (name, enum) in Base._COMPS_TRANSLATION.items(): + if name in pkg_types: + ret |= enum + return ret + + def group_install(self, grp_id, pkg_types, exclude=None, strict=True): + # :api + """Installs packages of selected group + :param exclude: list of package name glob patterns + that will be excluded from install set + :param strict: boolean indicating whether group packages that + exist but are non-installable due to e.g. dependency + issues should be skipped (False) or cause transaction to + fail to resolve (True) + """ + def _pattern_to_pkgname(pattern): + if dnf.util.is_glob_pattern(pattern): + q = self.sack.query().filterm(name__glob=pattern) + return map(lambda p: p.name, q) + else: + return (pattern,) + + assert dnf.util.is_string_type(grp_id) + exclude_pkgnames = None + if exclude: + nested_excludes = [_pattern_to_pkgname(p) for p in exclude] + exclude_pkgnames = itertools.chain.from_iterable(nested_excludes) + + solver = self._build_comps_solver() + pkg_types = self._translate_comps_pkg_types(pkg_types) + trans = dnf.comps.install_or_skip(solver._group_install, + grp_id, pkg_types, exclude_pkgnames, + strict) + if not trans: + return 0 + if strict: + instlog = trans.install + else: + instlog = trans.install_opt + logger.debug(_("Adding packages from group '%s': %s"), + grp_id, instlog) + return self._add_comps_trans(trans) + + def env_group_install(self, patterns, types, strict=True, exclude=None, exclude_groups=None): + q = CompsQuery(self.comps, self.history, CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS, + CompsQuery.AVAILABLE) + cnt = 0 + done = True + for pattern in patterns: + try: + res = q.get(pattern) + except dnf.exceptions.CompsError as err: + logger.error(ucd(err)) + done = False + continue + for group_id in res.groups: + if not exclude_groups or group_id not in exclude_groups: + cnt += self.group_install(group_id, types, exclude=exclude, strict=strict) + for env_id in res.environments: + cnt += self.environment_install(env_id, types, exclude=exclude, strict=strict, + exclude_groups=exclude_groups) + if not done and strict: + raise dnf.exceptions.Error(_('Nothing to do.')) + return cnt + + def group_remove(self, grp_id): + # :api + assert dnf.util.is_string_type(grp_id) + solver = self._build_comps_solver() + trans = solver._group_remove(grp_id) + return self._add_comps_trans(trans) + + def env_group_remove(self, patterns): + q = CompsQuery(self.comps, self.history, + CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS, + CompsQuery.INSTALLED) + try: + res = q.get(*patterns) + except dnf.exceptions.CompsError as err: + logger.error("Warning: %s", ucd(err)) + raise dnf.exceptions.Error(_('No groups marked for removal.')) + cnt = 0 + for env in res.environments: + cnt += self.environment_remove(env) + for grp in res.groups: + cnt += self.group_remove(grp) + return cnt + + def env_group_upgrade(self, patterns): + q = CompsQuery(self.comps, self.history, + CompsQuery.GROUPS | CompsQuery.ENVIRONMENTS, + CompsQuery.INSTALLED) + cnt = 0 + done = True + for pattern in patterns: + try: + res = q.get(pattern) + except dnf.exceptions.CompsError as err: + logger.error(ucd(err)) + done = False + continue + for env in res.environments: + try: + cnt += self.environment_upgrade(env) + except dnf.exceptions.CompsError as err: + logger.error(ucd(err)) + continue + for grp in res.groups: + try: + cnt += self.group_upgrade(grp) + except dnf.exceptions.CompsError as err: + logger.error(ucd(err)) + continue + if not done: + raise dnf.exceptions.Error(_('Nothing to do.')) + if not cnt: + msg = _('No group marked for upgrade.') + raise dnf.cli.CliError(msg) + + def environment_upgrade(self, env_id): + # :api + assert dnf.util.is_string_type(env_id) + solver = self._build_comps_solver() + trans = solver._environment_upgrade(env_id) + return self._add_comps_trans(trans) + + def group_upgrade(self, grp_id): + # :api + assert dnf.util.is_string_type(grp_id) + solver = self._build_comps_solver() + trans = solver._group_upgrade(grp_id) + return self._add_comps_trans(trans) + + def _gpg_key_check(self): + """Checks for the presence of GPG keys in the rpmdb. + + :return: 0 if there are no GPG keys in the rpmdb, and 1 if + there are keys + """ + gpgkeyschecked = self.conf.cachedir + '/.gpgkeyschecked.yum' + if os.path.exists(gpgkeyschecked): + return 1 + + installroot = self.conf.installroot + myts = dnf.rpm.transaction.initReadOnlyTransaction(root=installroot) + myts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)) + idx = myts.dbMatch('name', 'gpg-pubkey') + keys = len(idx) + del idx + del myts + + if keys == 0: + return 0 + else: + mydir = os.path.dirname(gpgkeyschecked) + if not os.path.exists(mydir): + os.makedirs(mydir) + + fo = open(gpgkeyschecked, 'w') + fo.close() + del fo + return 1 + + def _install_multiarch(self, query, reponame=None, strict=True): + already_inst, available = self._query_matches_installed(query) + self._report_already_installed(already_inst) + for packages in available: + sltr = dnf.selector.Selector(self.sack) + q = self.sack.query().filterm(pkg=packages) + if self.conf.obsoletes: + q = q.union(self.sack.query().filterm(obsoletes=q)) + sltr = sltr.set(pkg=q) + if reponame is not None: + sltr = sltr.set(reponame=reponame) + self._goal.install(select=sltr, optional=(not strict)) + return len(available) + + def _categorize_specs(self, install, exclude): + """ + Categorize :param install and :param exclude list into two groups each (packages and groups) + + :param install: list of specs, whether packages ('foo') or groups/modules ('@bar') + :param exclude: list of specs, whether packages ('foo') or groups/modules ('@bar') + :return: categorized install and exclude specs (stored in argparse.Namespace class) + + To access packages use: specs.pkg_specs, + to access groups use: specs.grp_specs + """ + install_specs = argparse.Namespace() + exclude_specs = argparse.Namespace() + _parse_specs(install_specs, install) + _parse_specs(exclude_specs, exclude) + + return install_specs, exclude_specs + + def _exclude_package_specs(self, exclude_specs): + glob_excludes = [exclude for exclude in exclude_specs.pkg_specs + if dnf.util.is_glob_pattern(exclude)] + excludes = [exclude for exclude in exclude_specs.pkg_specs + if exclude not in glob_excludes] + + exclude_query = self.sack.query().filter(name=excludes) + glob_exclude_query = self.sack.query().filter(name__glob=glob_excludes) + + self.sack.add_excludes(exclude_query) + self.sack.add_excludes(glob_exclude_query) + + def _expand_groups(self, group_specs): + groups = set() + q = CompsQuery(self.comps, self.history, + CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS, + CompsQuery.AVAILABLE | CompsQuery.INSTALLED) + + for pattern in group_specs: + try: + res = q.get(pattern) + except dnf.exceptions.CompsError as err: + logger.error("Warning: Module or %s", ucd(err)) + continue + + groups.update(res.groups) + groups.update(res.environments) + + for environment_id in res.environments: + environment = self.comps._environment_by_id(environment_id) + for group in environment.groups_iter(): + groups.add(group.id) + + return list(groups) + + def _install_groups(self, group_specs, excludes, skipped, strict=True): + for group_spec in group_specs: + try: + types = self.conf.group_package_types + + if '/' in group_spec: + split = group_spec.split('/') + group_spec = split[0] + types = split[1].split(',') + + self.env_group_install([group_spec], types, strict, excludes.pkg_specs, + excludes.grp_specs) + except dnf.exceptions.Error: + skipped.append("@" + group_spec) + + def install_specs(self, install, exclude=None, reponame=None, strict=True, forms=None): + # :api + if exclude is None: + exclude = [] + no_match_group_specs = [] + error_group_specs = [] + no_match_pkg_specs = [] + error_pkg_specs = [] + install_specs, exclude_specs = self._categorize_specs(install, exclude) + + self._exclude_package_specs(exclude_specs) + for spec in install_specs.pkg_specs: + try: + self.install(spec, reponame=reponame, strict=strict, forms=forms) + except dnf.exceptions.MarkingError as e: + logger.error(str(e)) + no_match_pkg_specs.append(spec) + no_match_module_specs = [] + module_depsolv_errors = () + if WITH_MODULES and install_specs.grp_specs: + try: + module_base = dnf.module.module_base.ModuleBase(self) + module_base.install(install_specs.grp_specs, strict) + except dnf.exceptions.MarkingErrors as e: + if e.no_match_group_specs: + for e_spec in e.no_match_group_specs: + no_match_module_specs.append(e_spec) + if e.error_group_specs: + for e_spec in e.error_group_specs: + error_group_specs.append("@" + e_spec) + module_depsolv_errors = e.module_depsolv_errors + + else: + no_match_module_specs = install_specs.grp_specs + + if no_match_module_specs: + self.read_comps(arch_filter=True) + exclude_specs.grp_specs = self._expand_groups(exclude_specs.grp_specs) + self._install_groups(no_match_module_specs, exclude_specs, no_match_group_specs, strict) + + if no_match_group_specs or error_group_specs or no_match_pkg_specs or error_pkg_specs \ + or module_depsolv_errors: + raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_group_specs, + error_group_specs=error_group_specs, + no_match_pkg_specs=no_match_pkg_specs, + error_pkg_specs=error_pkg_specs, + module_depsolv_errors=module_depsolv_errors) + + def install(self, pkg_spec, reponame=None, strict=True, forms=None): + # :api + """Mark package(s) given by pkg_spec and reponame for installation.""" + + subj = dnf.subject.Subject(pkg_spec) + solution = subj.get_best_solution(self.sack, forms=forms, with_src=False) + + if self.conf.multilib_policy == "all" or subj._is_arch_specified(solution): + q = solution['query'] + if reponame is not None: + q.filterm(reponame=reponame) + if not q: + self._raise_package_not_found_error(pkg_spec, forms, reponame) + return self._install_multiarch(q, reponame=reponame, strict=strict) + + elif self.conf.multilib_policy == "best": + sltrs = subj._get_best_selectors(self, + forms=forms, + obsoletes=self.conf.obsoletes, + reponame=reponame, + reports=True, + solution=solution) + if not sltrs: + self._raise_package_not_found_error(pkg_spec, forms, reponame) + + for sltr in sltrs: + self._goal.install(select=sltr, optional=(not strict)) + return 1 + return 0 + + def package_downgrade(self, pkg, strict=False): + # :api + if pkg._from_system: + msg = 'downgrade_package() for an installed package.' + raise NotImplementedError(msg) + + q = self.sack.query().installed().filterm(name=pkg.name, arch=[pkg.arch, "noarch"]) + if not q: + msg = _("Package %s not installed, cannot downgrade it.") + logger.warning(msg, pkg.name) + raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg.location, pkg.name) + elif sorted(q)[0] > pkg: + sltr = dnf.selector.Selector(self.sack) + sltr.set(pkg=[pkg]) + self._goal.install(select=sltr, optional=(not strict)) + return 1 + else: + msg = _("Package %s of lower version already installed, " + "cannot downgrade it.") + logger.warning(msg, pkg.name) + return 0 + + def package_install(self, pkg, strict=True): + # :api + q = self.sack.query()._nevra(pkg.name, pkg.evr, pkg.arch) + already_inst, available = self._query_matches_installed(q) + if pkg in already_inst: + self._report_already_installed([pkg]) + elif pkg not in itertools.chain.from_iterable(available): + raise dnf.exceptions.PackageNotFoundError(_('No match for argument: %s'), pkg.location) + else: + sltr = dnf.selector.Selector(self.sack) + sltr.set(pkg=[pkg]) + self._goal.install(select=sltr, optional=(not strict)) + return 1 + + def package_reinstall(self, pkg): + if self.sack.query().installed().filterm(name=pkg.name, evr=pkg.evr, arch=pkg.arch): + self._goal.install(pkg) + return 1 + msg = _("Package %s not installed, cannot reinstall it.") + logger.warning(msg, str(pkg)) + raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg.location, pkg.name) + + def package_remove(self, pkg): + self._goal.erase(pkg) + return 1 + + def package_upgrade(self, pkg): + # :api + if pkg._from_system: + msg = 'upgrade_package() for an installed package.' + raise NotImplementedError(msg) + + if pkg.arch == 'src': + msg = _("File %s is a source package and cannot be updated, ignoring.") + logger.info(msg, pkg.location) + return 0 + + q = self.sack.query().installed().filterm(name=pkg.name, arch=[pkg.arch, "noarch"]) + if not q: + msg = _("Package %s not installed, cannot update it.") + logger.warning(msg, pkg.name) + raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg.location, pkg.name) + elif sorted(q)[-1] < pkg: + sltr = dnf.selector.Selector(self.sack) + sltr.set(pkg=[pkg]) + self._goal.upgrade(select=sltr) + return 1 + else: + msg = _("The same or higher version of %s is already installed, " + "cannot update it.") + logger.warning(msg, pkg.name) + return 0 + + def _upgrade_internal(self, query, obsoletes, reponame, pkg_spec=None): + installed = self.sack.query().installed() + q = query.intersection(self.sack.query().filterm(name=[pkg.name for pkg in installed])) + if obsoletes: + obsoletes = self.sack.query().available().filterm( + obsoletes=q.installed().union(q.upgrades())) + # add obsoletes into transaction + q = q.union(obsoletes) + if reponame is not None: + q.filterm(reponame=reponame) + q = self._merge_update_filters(q, pkg_spec=pkg_spec) + if q: + sltr = dnf.selector.Selector(self.sack) + sltr.set(pkg=q) + self._goal.upgrade(select=sltr) + return 1 + + + def upgrade(self, pkg_spec, reponame=None): + # :api + subj = dnf.subject.Subject(pkg_spec) + solution = subj.get_best_solution(self.sack) + q = solution["query"] + if q: + wildcard = dnf.util.is_glob_pattern(pkg_spec) + # wildcard shouldn't print not installed packages + # only solution with nevra.name provide packages with same name + if not wildcard and solution['nevra'] and solution['nevra'].name: + installed = self.sack.query().installed() + pkg_name = solution['nevra'].name + installed.filterm(name=pkg_name).apply() + if not installed: + msg = _('Package %s available, but not installed.') + logger.warning(msg, pkg_name) + raise dnf.exceptions.PackagesNotInstalledError( + _('No match for argument: %s') % pkg_spec, pkg_spec) + if solution['nevra'].arch and not dnf.util.is_glob_pattern(solution['nevra'].arch): + if not installed.filter(arch=solution['nevra'].arch): + msg = _('Package %s available, but installed for different architecture.') + logger.warning(msg, "{}.{}".format(pkg_name, solution['nevra'].arch)) + obsoletes = self.conf.obsoletes and solution['nevra'] \ + and solution['nevra'].has_just_name() + return self._upgrade_internal(q, obsoletes, reponame, pkg_spec) + raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg_spec, pkg_spec) + + def upgrade_all(self, reponame=None): + # :api + # provide only available packages to solver to trigger targeted upgrade + # possibilities will be ignored + # usage of selected packages will unify dnf behavior with other upgrade functions + return self._upgrade_internal( + self.sack.query(), self.conf.obsoletes, reponame, pkg_spec=None) + + def distro_sync(self, pkg_spec=None): + if pkg_spec is None: + self._goal.distupgrade_all() + else: + subject = dnf.subject.Subject(pkg_spec) + solution = subject.get_best_solution(self.sack, with_src=False) + solution["query"].filterm(reponame__neq=hawkey.SYSTEM_REPO_NAME) + sltrs = subject._get_best_selectors(self, solution=solution, + obsoletes=self.conf.obsoletes, reports=True) + if not sltrs: + logger.info(_('No package %s installed.'), pkg_spec) + return 0 + for sltr in sltrs: + self._goal.distupgrade(select=sltr) + return 1 + + def autoremove(self, forms=None, pkg_specs=None, grp_specs=None, filenames=None): + # :api + """Removes all 'leaf' packages from the system that were originally + installed as dependencies of user-installed packages but which are + no longer required by any such package.""" + + if any([grp_specs, pkg_specs, filenames]): + pkg_specs += filenames + done = False + # Remove groups. + if grp_specs and forms: + for grp_spec in grp_specs: + msg = _('Not a valid form: %s') + logger.warning(msg, grp_spec) + elif grp_specs: + self.read_comps(arch_filter=True) + if self.env_group_remove(grp_specs): + done = True + + for pkg_spec in pkg_specs: + try: + self.remove(pkg_spec, forms=forms) + except dnf.exceptions.MarkingError as e: + logger.info(str(e)) + else: + done = True + + if not done: + logger.warning(_('No packages marked for removal.')) + + else: + pkgs = self.sack.query()._unneeded(self.history.swdb, + debug_solver=self.conf.debug_solver) + for pkg in pkgs: + self.package_remove(pkg) + + def remove(self, pkg_spec, reponame=None, forms=None): + # :api + """Mark the specified package for removal.""" + + matches = dnf.subject.Subject(pkg_spec).get_best_query(self.sack, forms=forms) + installed = [ + pkg for pkg in matches.installed() + if reponame is None or + self.history.repo(pkg) == reponame] + if not installed: + self._raise_package_not_installed_error(pkg_spec, forms, reponame) + + clean_deps = self.conf.clean_requirements_on_remove + for pkg in installed: + self._goal.erase(pkg, clean_deps=clean_deps) + return len(installed) + + def reinstall(self, pkg_spec, old_reponame=None, new_reponame=None, + new_reponame_neq=None, remove_na=False): + subj = dnf.subject.Subject(pkg_spec) + q = subj.get_best_query(self.sack) + installed_pkgs = [ + pkg for pkg in q.installed() + if old_reponame is None or + self.history.repo(pkg) == old_reponame] + + available_q = q.available() + if new_reponame is not None: + available_q.filterm(reponame=new_reponame) + if new_reponame_neq is not None: + available_q.filterm(reponame__neq=new_reponame_neq) + available_nevra2pkg = dnf.query._per_nevra_dict(available_q) + + if not installed_pkgs: + raise dnf.exceptions.PackagesNotInstalledError( + 'no package matched', pkg_spec, available_nevra2pkg.values()) + + cnt = 0 + clean_deps = self.conf.clean_requirements_on_remove + for installed_pkg in installed_pkgs: + try: + available_pkg = available_nevra2pkg[ucd(installed_pkg)] + except KeyError: + if not remove_na: + continue + self._goal.erase(installed_pkg, clean_deps=clean_deps) + else: + self._goal.install(available_pkg) + cnt += 1 + + if cnt == 0: + raise dnf.exceptions.PackagesNotAvailableError( + 'no package matched', pkg_spec, installed_pkgs) + + return cnt + + def downgrade(self, pkg_spec): + # :api + """Mark a package to be downgraded. + + This is equivalent to first removing the currently installed package, + and then installing an older version. + + """ + return self.downgrade_to(pkg_spec) + + def downgrade_to(self, pkg_spec, strict=False): + """Downgrade to specific version if specified otherwise downgrades + to one version lower than the package installed. + """ + subj = dnf.subject.Subject(pkg_spec) + q = subj.get_best_query(self.sack) + if not q: + msg = _('No match for argument: %s') % pkg_spec + raise dnf.exceptions.PackageNotFoundError(msg, pkg_spec) + done = 0 + available_pkgs = q.available() + available_pkg_names = list(available_pkgs._name_dict().keys()) + q_installed = self.sack.query().installed().filterm(name=available_pkg_names) + if len(q_installed) == 0: + msg = _('Packages for argument %s available, but not installed.') % pkg_spec + raise dnf.exceptions.PackagesNotInstalledError(msg, pkg_spec, available_pkgs) + for pkg_name in q_installed._name_dict().keys(): + downgrade_pkgs = available_pkgs.downgrades().filter(name=pkg_name) + if not downgrade_pkgs: + msg = _("Package %s of lowest version already installed, cannot downgrade it.") + logger.warning(msg, pkg_name) + continue + sltr = dnf.selector.Selector(self.sack) + sltr.set(pkg=downgrade_pkgs) + self._goal.install(select=sltr, optional=(not strict)) + done = 1 + return done + + def provides(self, provides_spec): + providers = self.sack.query().filterm(file__glob=provides_spec) + if providers: + return providers, [provides_spec] + providers = dnf.query._by_provides(self.sack, provides_spec) + if providers: + return providers, [provides_spec] + if provides_spec.startswith('/bin/') or provides_spec.startswith('/sbin/'): + # compatibility for packages that didn't do UsrMove + binary_provides = ['/usr' + provides_spec] + elif provides_spec.startswith('/'): + # provides_spec is a file path + return providers, [provides_spec] + else: + # suppose that provides_spec is a command, search in /usr/sbin/ + binary_provides = [prefix + provides_spec + for prefix in ['/bin/', '/sbin/', '/usr/bin/', '/usr/sbin/']] + return self.sack.query().filterm(file__glob=binary_provides), binary_provides + + def _history_undo_operations(self, operations, first_trans, rollback=False, strict=True): + """Undo the operations on packages by their NEVRAs. + + :param operations: a NEVRAOperations to be undone + :param first_trans: first transaction id being undone + :param rollback: True if transaction is performing a rollback + :param strict: if True, raise an exception on any errors + """ + + # map actions to their opposites + action_map = { + libdnf.transaction.TransactionItemAction_DOWNGRADE: None, + libdnf.transaction.TransactionItemAction_DOWNGRADED: libdnf.transaction.TransactionItemAction_UPGRADE, + libdnf.transaction.TransactionItemAction_INSTALL: libdnf.transaction.TransactionItemAction_REMOVE, + libdnf.transaction.TransactionItemAction_OBSOLETE: None, + libdnf.transaction.TransactionItemAction_OBSOLETED: libdnf.transaction.TransactionItemAction_INSTALL, + libdnf.transaction.TransactionItemAction_REINSTALL: None, + # reinstalls are skipped as they are considered as no-operation from history perspective + libdnf.transaction.TransactionItemAction_REINSTALLED: None, + libdnf.transaction.TransactionItemAction_REMOVE: libdnf.transaction.TransactionItemAction_INSTALL, + libdnf.transaction.TransactionItemAction_UPGRADE: None, + libdnf.transaction.TransactionItemAction_UPGRADED: libdnf.transaction.TransactionItemAction_DOWNGRADE, + libdnf.transaction.TransactionItemAction_REASON_CHANGE: None, + } + + failed = False + for ti in operations.packages(): + try: + action = action_map[ti.action] + except KeyError: + raise RuntimeError(_("Action not handled: {}".format(action))) + + if action is None: + continue + + if action == libdnf.transaction.TransactionItemAction_REMOVE: + query = self.sack.query().installed().filterm(nevra_strict=str(ti)) + if not query: + logger.error(_('No package %s installed.'), ucd(str(ti))) + failed = True + continue + else: + query = self.sack.query().filterm(nevra_strict=str(ti)) + if not query: + logger.error(_('No package %s available.'), ucd(str(ti))) + failed = True + continue + + if action == libdnf.transaction.TransactionItemAction_REMOVE: + for pkg in query: + self._goal.erase(pkg) + else: + selector = dnf.selector.Selector(self.sack) + selector.set(pkg=query) + self._goal.install(select=selector, optional=(not strict)) + + if strict and failed: + raise dnf.exceptions.PackageNotFoundError(_('no package matched')) + + def _merge_update_filters(self, q, pkg_spec=None, warning=True): + """ + Merge Queries in _update_filters and return intersection with q Query + @param q: Query + @return: Query + """ + if not self._update_security_filters or not q: + return q + merged_queries = self._update_security_filters[0] + for query in self._update_security_filters[1:]: + merged_queries = merged_queries.union(query) + + self._update_security_filters = [merged_queries] + merged_queries = q.intersection(merged_queries) + if not merged_queries: + if warning: + q = q.upgrades() + count = len(q._name_dict().keys()) + if pkg_spec is None: + msg1 = _("No security updates needed, but {} update " + "available").format(count) + msg2 = _("No security updates needed, but {} updates " + "available").format(count) + logger.warning(P_(msg1, msg2, count)) + else: + msg1 = _('No security updates needed for "{}", but {} ' + 'update available').format(pkg_spec, count) + msg2 = _('No security updates needed for "{}", but {} ' + 'updates available').format(pkg_spec, count) + logger.warning(P_(msg1, msg2, count)) + return merged_queries + + def _get_key_for_package(self, po, askcb=None, fullaskcb=None): + """Retrieve a key for a package. If needed, use the given + callback to prompt whether the key should be imported. + + :param po: the package object to retrieve the key of + :param askcb: Callback function to use to ask permission to + import a key. The arguments *askck* should take are the + package object, the userid of the key, and the keyid + :param fullaskcb: Callback function to use to ask permission to + import a key. This differs from *askcb* in that it gets + passed a dictionary so that we can expand the values passed. + :raises: :class:`dnf.exceptions.Error` if there are errors + retrieving the keys + """ + repo = self.repos[po.repoid] + key_installed = repo.id in self._repo_set_imported_gpg_keys + keyurls = [] if key_installed else repo.gpgkey + + def _prov_key_data(msg): + msg += _('. Failing package is: %s') % (po) + '\n ' + msg += _('GPG Keys are configured as: %s') % \ + (', '.join(repo.gpgkey)) + return msg + + user_cb_fail = False + self._repo_set_imported_gpg_keys.add(repo.id) + for keyurl in keyurls: + keys = dnf.crypto.retrieve(keyurl, repo) + + for info in keys: + # Check if key is already installed + if misc.keyInstalled(self._ts, info.rpm_id, info.timestamp) >= 0: + msg = _('GPG key at %s (0x%s) is already installed') + logger.info(msg, keyurl, info.short_id) + continue + + # DNS Extension: create a key object, pass it to the verification class + # and print its result as an advice to the user. + if self.conf.gpgkey_dns_verification: + dns_input_key = dnf.dnssec.KeyInfo.from_rpm_key_object(info.userid, + info.raw_key) + dns_result = dnf.dnssec.DNSSECKeyVerification.verify(dns_input_key) + logger.info(dnf.dnssec.nice_user_msg(dns_input_key, dns_result)) + + # Try installing/updating GPG key + info.url = keyurl + dnf.crypto.log_key_import(info) + rc = False + if self.conf.assumeno: + rc = False + elif self.conf.assumeyes: + # DNS Extension: We assume, that the key is trusted in case it is valid, + # its existence is explicitly denied or in case the domain is not signed + # and therefore there is no way to know for sure (this is mainly for + # backward compatibility) + # FAQ: + # * What is PROVEN_NONEXISTENCE? + # In DNSSEC, your domain does not need to be signed, but this state + # (not signed) has to be proven by the upper domain. e.g. when example.com. + # is not signed, com. servers have to sign the message, that example.com. + # does not have any signing key (KSK to be more precise). + if self.conf.gpgkey_dns_verification: + if dns_result in (dnf.dnssec.Validity.VALID, + dnf.dnssec.Validity.PROVEN_NONEXISTENCE): + rc = True + logger.info(dnf.dnssec.any_msg(_("The key has been approved."))) + else: + rc = False + logger.info(dnf.dnssec.any_msg(_("The key has been rejected."))) + else: + rc = True + + # grab the .sig/.asc for the keyurl, if it exists if it + # does check the signature on the key if it is signed by + # one of our ca-keys for this repo or the global one then + # rc = True else ask as normal. + + elif fullaskcb: + rc = fullaskcb({"po": po, "userid": info.userid, + "hexkeyid": info.short_id, + "keyurl": keyurl, + "fingerprint": info.fingerprint, + "timestamp": info.timestamp}) + elif askcb: + rc = askcb(po, info.userid, info.short_id) + + if not rc: + user_cb_fail = True + continue + + # Import the key + # If rpm.RPMTRANS_FLAG_TEST in self._ts, gpg keys cannot be imported successfully + # therefore the flag was removed for import operation + test_flag = self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST) + if test_flag: + orig_flags = self._ts.getTsFlags() + self._ts.setFlags(orig_flags - rpm.RPMTRANS_FLAG_TEST) + result = self._ts.pgpImportPubkey(misc.procgpgkey(info.raw_key)) + if test_flag: + self._ts.setFlags(orig_flags) + if result != 0: + msg = _('Key import failed (code %d)') % result + raise dnf.exceptions.Error(_prov_key_data(msg)) + logger.info(_('Key imported successfully')) + key_installed = True + + if not key_installed and user_cb_fail: + raise dnf.exceptions.Error(_("Didn't install any keys")) + + if not key_installed: + msg = _('The GPG keys listed for the "%s" repository are ' + 'already installed but they are not correct for this ' + 'package.\n' + 'Check that the correct key URLs are configured for ' + 'this repository.') % repo.name + raise dnf.exceptions.Error(_prov_key_data(msg)) + + # Check if the newly installed keys helped + result, errmsg = self._sig_check_pkg(po) + if result != 0: + if keyurls: + msg = _("Import of key(s) didn't help, wrong key(s)?") + logger.info(msg) + errmsg = ucd(errmsg) + raise dnf.exceptions.Error(_prov_key_data(errmsg)) + + def _run_rpm_check(self): + results = [] + self._ts.check() + for prob in self._ts.problems(): + # Newer rpm (4.8.0+) has problem objects, older have just strings. + # Should probably move to using the new objects, when we can. For + # now just be compatible. + results.append(ucd(prob)) + + return results + + def urlopen(self, url, repo=None, mode='w+b', **kwargs): + # :api + """ + Open the specified absolute url, return a file object + which respects proxy setting even for non-repo downloads + """ + return dnf.util._urlopen(url, self.conf, repo, mode, **kwargs) + + def _get_installonly_query(self, q=None): + if q is None: + q = self._sack.query() + installonly = q.filter(provides=self.conf.installonlypkgs) + return installonly + + def _report_icase_hint(self, pkg_spec): + subj = dnf.subject.Subject(pkg_spec, ignore_case=True) + solution = subj.get_best_solution(self.sack, with_nevra=True, + with_provides=False, with_filenames=False) + if solution['query'] and solution['nevra'] and solution['nevra'].name and \ + pkg_spec != solution['query'][0].name: + logger.info(_(" * Maybe you meant: {}").format(solution['query'][0].name)) + + def _select_remote_pkgs(self, install_pkgs): + """ Check checksum of packages from local repositories and returns list packages from remote + repositories that will be downloaded. Packages from commandline are skipped. + + :param install_pkgs: list of packages + :return: list of remote pkgs + """ + def _verification_of_packages(pkg_list, logger_msg): + all_packages_verified = True + for pkg in pkg_list: + pkg_successfully_verified = False + try: + pkg_successfully_verified = pkg.verifyLocalPkg() + except Exception as e: + logger.critical(str(e)) + if pkg_successfully_verified is not True: + logger.critical(logger_msg.format(pkg, pkg.reponame)) + all_packages_verified = False + + return all_packages_verified + + remote_pkgs = [] + local_repository_pkgs = [] + for pkg in install_pkgs: + if pkg._is_local_pkg(): + if pkg.reponame != hawkey.CMDLINE_REPO_NAME: + local_repository_pkgs.append(pkg) + else: + remote_pkgs.append(pkg) + + msg = _('Package "{}" from local repository "{}" has incorrect checksum') + if not _verification_of_packages(local_repository_pkgs, msg): + raise dnf.exceptions.Error( + _("Some packages from local repository have incorrect checksum")) + + if self.conf.cacheonly: + msg = _('Package "{}" from repository "{}" has incorrect checksum') + if not _verification_of_packages(remote_pkgs, msg): + raise dnf.exceptions.Error( + _('Some packages have invalid cache, but cannot be downloaded due to ' + '"--cacheonly" option')) + remote_pkgs = [] + + return remote_pkgs, local_repository_pkgs + + def _report_already_installed(self, packages): + for pkg in packages: + _msg_installed(pkg) + + def _raise_package_not_found_error(self, pkg_spec, forms, reponame): + all_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES) + subject = dnf.subject.Subject(pkg_spec) + solution = subject.get_best_solution( + self.sack, forms=forms, with_src=False, query=all_query) + if reponame is not None: + solution['query'].filterm(reponame=reponame) + if not solution['query']: + raise dnf.exceptions.PackageNotFoundError(_('No match for argument'), pkg_spec) + else: + with_regular_query = self.sack.query(flags=hawkey.IGNORE_REGULAR_EXCLUDES) + with_regular_query = solution['query'].intersection(with_regular_query) + # Modular filtering is applied on a package set that already has regular excludes + # filtered out. So if a package wasn't filtered out by regular excludes, it must have + # been filtered out by modularity. + if with_regular_query: + msg = _('All matches were filtered out by exclude filtering for argument') + else: + msg = _('All matches were filtered out by modular filtering for argument') + raise dnf.exceptions.PackageNotFoundError(msg, pkg_spec) + + def _raise_package_not_installed_error(self, pkg_spec, forms, reponame): + all_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES).installed() + subject = dnf.subject.Subject(pkg_spec) + solution = subject.get_best_solution( + self.sack, forms=forms, with_src=False, query=all_query) + + if not solution['query']: + raise dnf.exceptions.PackagesNotInstalledError(_('No match for argument'), pkg_spec) + if reponame is not None: + installed = [pkg for pkg in solution['query'] if self.history.repo(pkg) == reponame] + else: + installed = solution['query'] + if not installed: + msg = _('All matches were installed from a different repository for argument') + else: + msg = _('All matches were filtered out by exclude filtering for argument') + raise dnf.exceptions.PackagesNotInstalledError(msg, pkg_spec) + + +def _msg_installed(pkg): + name = ucd(pkg) + msg = _('Package %s is already installed.') + logger.info(msg, name) diff --git a/dnf/callback.py b/dnf/callback.py new file mode 100644 index 0000000..bf50ccf --- /dev/null +++ b/dnf/callback.py @@ -0,0 +1,125 @@ +# callbacks.py +# Abstract interfaces to communicate progress on tasks. +# +# Copyright (C) 2014-2015 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals +import dnf.yum.rpmtrans + +import dnf.transaction + +PKG_DOWNGRADE = dnf.transaction.PKG_DOWNGRADE # :api +PKG_DOWNGRADED = dnf.transaction.PKG_DOWNGRADED # :api +PKG_INSTALL = dnf.transaction.PKG_INSTALL # :api +PKG_OBSOLETE = dnf.transaction.PKG_OBSOLETE # :api +PKG_OBSOLETED = dnf.transaction.PKG_OBSOLETED # :api +PKG_REINSTALL = dnf.transaction.PKG_REINSTALL # :api +PKG_REINSTALLED = dnf.transaction.PKG_REINSTALLED # :api +PKG_REMOVE = dnf.transaction.PKG_ERASE # :api +PKG_ERASE = PKG_REMOVE # deprecated, use PKG_REMOVE instead +PKG_UPGRADE = dnf.transaction.PKG_UPGRADE # :api +PKG_UPGRADED = dnf.transaction.PKG_UPGRADED # :api + +PKG_CLEANUP = dnf.transaction.PKG_CLEANUP # :api +PKG_VERIFY = dnf.transaction.PKG_VERIFY # :api +PKG_SCRIPTLET = dnf.transaction.PKG_SCRIPTLET # :api + +TRANS_PREPARATION = dnf.transaction.TRANS_PREPARATION # :api +TRANS_POST = dnf.transaction.TRANS_POST # :api + +STATUS_OK = None # :api +STATUS_FAILED = 1 # :api +STATUS_ALREADY_EXISTS = 2 # :api +STATUS_MIRROR = 3 # :api +STATUS_DRPM = 4 # :api + + +class KeyImport(object): + def _confirm(self, id, userid, fingerprint, url, timestamp): + """Ask the user if the key should be imported.""" + return False + + +class Payload(object): + # :api + + def __init__(self, progress): + self.progress = progress + + def __str__(self): + """Nice, human-readable representation. :api""" + pass + + @property + def download_size(self): + """Total size of the download. :api""" + pass + + +class DownloadProgress(object): + # :api + + def end(self, payload, status, msg): + """Communicate the information that `payload` has finished downloading. + + :api, `status` is a constant denoting the type of outcome, `err_msg` is an + error message in case the outcome was an error. + + """ + pass + + def message(self, msg): + pass + + def progress(self, payload, done): + """Update the progress display. :api + + `payload` is the payload this call reports progress for, `done` is how + many bytes of this payload are already downloaded. + + """ + + pass + + def start(self, total_files, total_size, total_drpms=0): + """Start new progress metering. :api + + `total_files` the number of files that will be downloaded, + `total_size` total size of all files. + + """ + + pass + + +class NullDownloadProgress(DownloadProgress): + pass + + +class Depsolve(object): + def start(self): + pass + + def pkg_added(self, pkg, mode): + pass + + def end(self): + pass + + +TransactionProgress = dnf.yum.rpmtrans.TransactionDisplay # :api diff --git a/dnf/cli/CMakeLists.txt b/dnf/cli/CMakeLists.txt new file mode 100644 index 0000000..a32305b --- /dev/null +++ b/dnf/cli/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB cli_SRCS *.py) +INSTALL (FILES ${cli_SRCS} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/cli) +# completion_helper.py is generated so the glob alone won't see it: +INSTALL (FILES completion_helper.py DESTINATION ${PYTHON_INSTALL_DIR}/dnf/cli) + +ADD_SUBDIRECTORY (commands) diff --git a/dnf/cli/__init__.py b/dnf/cli/__init__.py new file mode 100644 index 0000000..0454c25 --- /dev/null +++ b/dnf/cli/__init__.py @@ -0,0 +1,31 @@ +# __init__.py +# DNF cli subpackage. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +import dnf.exceptions + + +class CliError(dnf.exceptions.Error): + """CLI Exception. :api""" + pass + + +from dnf.cli.cli import Cli # :api +from dnf.cli.commands import Command # :api diff --git a/dnf/cli/aliases.py b/dnf/cli/aliases.py new file mode 100644 index 0000000..0b3ba8f --- /dev/null +++ b/dnf/cli/aliases.py @@ -0,0 +1,204 @@ +# aliases.py +# Resolving aliases in CLI arguments. +# +# Copyright (C) 2018 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ + +import collections +import dnf.cli +from dnf.conf.config import PRIO_DEFAULT +import dnf.exceptions +import libdnf.conf +import logging +import os +import os.path + +logger = logging.getLogger('dnf') + +ALIASES_DROPIN_DIR = '/etc/dnf/aliases.d/' +ALIASES_CONF_PATH = os.path.join(ALIASES_DROPIN_DIR, 'ALIASES.conf') +ALIASES_USER_PATH = os.path.join(ALIASES_DROPIN_DIR, 'USER.conf') + + +class AliasesConfig(object): + def __init__(self, path): + self._path = path + self._parser = libdnf.conf.ConfigParser() + self._parser.read(self._path) + + @property + def enabled(self): + option = libdnf.conf.OptionBool(True) + try: + option.set(PRIO_DEFAULT, self._parser.getData()["main"]["enabled"]) + except IndexError: + pass + return option.getValue() + + @property + def aliases(self): + result = collections.OrderedDict() + section = "aliases" + if not self._parser.hasSection(section): + return result + for key in self._parser.options(section): + value = self._parser.getValue(section, key) + if not value: + continue + result[key] = value.split() + return result + + +class Aliases(object): + def __init__(self): + self.aliases = collections.OrderedDict() + self.conf = None + self.enabled = True + + if self._disabled_by_environ(): + self.enabled = False + return + + self._load_main() + + if not self.enabled: + return + + self._load_aliases() + + def _disabled_by_environ(self): + option = libdnf.conf.OptionBool(True) + try: + option.set(PRIO_DEFAULT, os.environ['DNF_DISABLE_ALIASES']) + return option.getValue() + except KeyError: + return False + except RuntimeError: + logger.warning( + _('Unexpected value of environment variable: ' + 'DNF_DISABLE_ALIASES=%s'), os.environ['DNF_DISABLE_ALIASES']) + return True + + def _load_conf(self, path): + try: + return AliasesConfig(path) + except RuntimeError as e: + raise dnf.exceptions.ConfigError( + _('Parsing file "%s" failed: %s') % (path, e)) + except IOError as e: + raise dnf.exceptions.ConfigError( + _('Cannot read file "%s": %s') % (path, e)) + + def _load_main(self): + try: + self.conf = self._load_conf(ALIASES_CONF_PATH) + self.enabled = self.conf.enabled + except dnf.exceptions.ConfigError as e: + logger.debug(_('Config error: %s'), e) + + def _load_aliases(self, filenames=None): + if filenames is None: + try: + filenames = self._dropin_dir_filenames() + except dnf.exceptions.ConfigError: + return + for filename in filenames: + try: + conf = self._load_conf(filename) + self.aliases.update(conf.aliases) + except dnf.exceptions.ConfigError as e: + logger.warning(_('Config error: %s'), e) + + def _dropin_dir_filenames(self): + # Get default aliases config filenames: + # all files from ALIASES_DROPIN_DIR, + # and ALIASES_USER_PATH as the last one (-> override all others) + ignored_filenames = [os.path.basename(ALIASES_CONF_PATH), + os.path.basename(ALIASES_USER_PATH)] + + def _ignore_filename(filename): + return filename in ignored_filenames or\ + filename.startswith('.') or\ + not filename.endswith(('.conf', '.CONF')) + + filenames = [] + try: + if not os.path.exists(ALIASES_DROPIN_DIR): + os.mkdir(ALIASES_DROPIN_DIR) + for fn in os.listdir(ALIASES_DROPIN_DIR): + if _ignore_filename(fn): + continue + filenames.append(os.path.join(ALIASES_DROPIN_DIR, fn)) + except (IOError, OSError) as e: + raise dnf.exceptions.ConfigError(e) + if os.path.exists(ALIASES_USER_PATH): + filenames.append(ALIASES_USER_PATH) + return filenames + + def _resolve(self, args): + stack = [] + self.prefix_options = [] + + def store_prefix(args): + num = 0 + for arg in args: + if arg and arg[0] != '-': + break + num += 1 + + self.prefix_options += args[:num] + + return args[num:] + + def subresolve(args): + suffix = store_prefix(args) + + if (not suffix or # Current alias on stack is resolved + suffix[0] not in self.aliases or # End resolving + suffix[0].startswith('\\')): # End resolving + try: + stack.pop() + except IndexError: + pass + return suffix + + if suffix[0] in stack: # Infinite recursion detected + raise dnf.exceptions.Error( + _('Aliases contain infinite recursion')) + + # Next word must be an alias + stack.append(suffix[0]) + current_alias_result = subresolve(self.aliases[suffix[0]]) + if current_alias_result: # We reached non-alias or '\' + return current_alias_result + suffix[1:] + else: # Need to resolve aliases in the rest + return subresolve(suffix[1:]) + + suffix = subresolve(args) + return self.prefix_options + suffix + + def resolve(self, args): + if self.enabled: + try: + args = self._resolve(args) + except dnf.exceptions.Error as e: + logger.error(_('%s, using original arguments.'), e) + return args diff --git a/dnf/cli/cli.py b/dnf/cli/cli.py new file mode 100644 index 0000000..80df950 --- /dev/null +++ b/dnf/cli/cli.py @@ -0,0 +1,1162 @@ +# Copyright 2005 Duke University +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Written by Seth Vidal + +""" +Command line interface yum class and related. +""" + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import unicode_literals + +try: + from collections.abc import Sequence +except ImportError: + from collections import Sequence +import datetime +import logging +import operator +import os +import random +import rpm +import sys +import time + +import hawkey +import libdnf.transaction + +from . import output +from dnf.cli import CliError +from dnf.i18n import ucd, _ +import dnf +import dnf.cli.aliases +import dnf.cli.commands +import dnf.cli.commands.alias +import dnf.cli.commands.autoremove +import dnf.cli.commands.check +import dnf.cli.commands.clean +import dnf.cli.commands.deplist +import dnf.cli.commands.distrosync +import dnf.cli.commands.downgrade +import dnf.cli.commands.group +import dnf.cli.commands.install +import dnf.cli.commands.makecache +import dnf.cli.commands.mark +import dnf.cli.commands.module +import dnf.cli.commands.reinstall +import dnf.cli.commands.remove +import dnf.cli.commands.repolist +import dnf.cli.commands.repoquery +import dnf.cli.commands.search +import dnf.cli.commands.shell +import dnf.cli.commands.swap +import dnf.cli.commands.updateinfo +import dnf.cli.commands.upgrade +import dnf.cli.commands.upgrademinimal +import dnf.cli.demand +import dnf.cli.format +import dnf.cli.option_parser +import dnf.conf +import dnf.conf.substitutions +import dnf.const +import dnf.db.history +import dnf.exceptions +import dnf.logging +import dnf.persistor +import dnf.plugin +import dnf.rpm +import dnf.sack +import dnf.transaction +import dnf.util +import dnf.yum.misc + +logger = logging.getLogger('dnf') + + +def _add_pkg_simple_list_lens(data, pkg, indent=''): + """ Get the length of each pkg's column. Add that to data. + This "knows" about simpleList and printVer. """ + na = len(pkg.name) + 1 + len(pkg.arch) + len(indent) + ver = len(pkg.evr) + rid = len(pkg._from_repo) + for (d, v) in (('na', na), ('ver', ver), ('rid', rid)): + data[d].setdefault(v, 0) + data[d][v] += 1 + + +def _list_cmd_calc_columns(output, ypl): + """ Work out the dynamic size of the columns to pass to fmtColumns. """ + data = {'na' : {}, 'ver' : {}, 'rid' : {}} + for lst in (ypl.installed, ypl.available, ypl.extras, ypl.autoremove, + ypl.updates, ypl.recent): + for pkg in lst: + _add_pkg_simple_list_lens(data, pkg) + if len(ypl.obsoletes) > 0: + for (npkg, opkg) in ypl.obsoletesTuples: + _add_pkg_simple_list_lens(data, npkg) + _add_pkg_simple_list_lens(data, opkg, indent=" " * 4) + + data = [data['na'], data['ver'], data['rid']] + columns = output.calcColumns(data, remainder_column=1) + return (-columns[0], -columns[1], -columns[2]) + + +def print_versions(pkgs, base, output): + def sm_ui_time(x): + return time.strftime("%c", time.gmtime(x)) + + rpmdb_sack = dnf.sack.rpmdb_sack(base) + done = False + for pkg in rpmdb_sack.query().installed().filterm(name=pkgs): + if done: + print("") + done = True + if pkg.epoch == '0': + ver = '%s-%s.%s' % (pkg.version, pkg.release, pkg.arch) + else: + ver = '%s:%s-%s.%s' % (pkg.epoch, + pkg.version, pkg.release, pkg.arch) + name = output.term.bold(pkg.name) + print(_(" Installed: %s-%s at %s") %(name, ver, + sm_ui_time(pkg.installtime))) + print(_(" Built : %s at %s") % (pkg.packager if pkg.packager else "", + sm_ui_time(pkg.buildtime))) + # :hawkey, no changelist information yet + # print(_(" Committed: %s at %s") % (pkg.committer, + # sm_ui_date(pkg.committime))) + + +def report_module_switch(switchedModules): + msg1 = _("The operation would result in switching of module '{0}' stream '{1}' to " + "stream '{2}'") + for moduleName, streams in switchedModules.items(): + logger.warning(msg1.format(moduleName, streams[0], streams[1])) + + +class BaseCli(dnf.Base): + """This is the base class for yum cli.""" + + def __init__(self, conf=None): + conf = conf or dnf.conf.Conf() + super(BaseCli, self).__init__(conf=conf) + self.output = output.Output(self, self.conf) + + def do_transaction(self, display=()): + """Take care of package downloading, checking, user + confirmation and actually running the transaction. + + :param display: `rpm.callback.TransactionProgress` object(s) + :return: history database transaction ID or None + """ + if dnf.base.WITH_MODULES: + switchedModules = dict(self._moduleContainer.getSwitchedStreams()) + if switchedModules: + report_module_switch(switchedModules) + msg = _("It is not possible to switch enabled streams of a module.\n" + "It is recommended to remove all installed content from the module, and " + "reset the module using '{prog} module reset ' command. After " + "you reset the module, you can install the other stream.").format( + prog=dnf.util.MAIN_PROG) + raise dnf.exceptions.Error(msg) + + trans = self.transaction + pkg_str = self.output.list_transaction(trans) + if pkg_str: + logger.info(pkg_str) + + if trans: + # Check which packages have to be downloaded + install_pkgs = [] + rmpkgs = [] + install_only = True + for tsi in trans: + if tsi.action in dnf.transaction.FORWARD_ACTIONS: + install_pkgs.append(tsi.pkg) + elif tsi.action in dnf.transaction.BACKWARD_ACTIONS: + install_only = False + rmpkgs.append(tsi.pkg) + + # Close the connection to the rpmdb so that rpm doesn't hold the + # SIGINT handler during the downloads. + del self._ts + + # report the total download size to the user + if not install_pkgs: + self.output.reportRemoveSize(rmpkgs) + else: + self.output.reportDownloadSize(install_pkgs, install_only) + + if trans or self._moduleContainer.isChanged() or \ + (self._history and (self._history.group or self._history.env)): + # confirm with user + if self.conf.downloadonly: + logger.info(_("{prog} will only download packages for the transaction.").format( + prog=dnf.util.MAIN_PROG_UPPER)) + elif 'test' in self.conf.tsflags: + logger.info(_("{prog} will only download packages, install gpg keys, and check the " + "transaction.").format(prog=dnf.util.MAIN_PROG_UPPER)) + if self._promptWanted(): + if self.conf.assumeno or not self.output.userconfirm(): + raise CliError(_("Operation aborted.")) + else: + logger.info(_('Nothing to do.')) + return + + if trans: + if install_pkgs: + logger.info(_('Downloading Packages:')) + try: + total_cb = self.output.download_callback_total_cb + self.download_packages(install_pkgs, self.output.progress, total_cb) + except dnf.exceptions.DownloadError as e: + specific = dnf.cli.format.indent_block(ucd(e)) + errstr = _('Error downloading packages:') + '\n%s' % specific + # setting the new line to prevent next chars being eaten up + # by carriage returns + print() + raise dnf.exceptions.Error(errstr) + # Check GPG signatures + self.gpgsigcheck(install_pkgs) + + if self.conf.downloadonly: + return + + if not isinstance(display, Sequence): + display = [display] + display = [output.CliTransactionDisplay()] + list(display) + tid = super(BaseCli, self).do_transaction(display) + + # display last transaction (which was closed during do_transaction()) + if tid is not None: + trans = self.history.old([tid])[0] + trans = dnf.db.group.RPMTransaction(self.history, trans._trans) + else: + trans = None + + if trans: + msg = self.output.post_transaction_output(trans) + logger.info(msg) + for tsi in trans: + if tsi.state == libdnf.transaction.TransactionItemState_ERROR: + raise dnf.exceptions.Error(_('Transaction failed')) + + return tid + + def gpgsigcheck(self, pkgs): + """Perform GPG signature verification on the given packages, + installing keys if possible. + + :param pkgs: a list of package objects to verify the GPG + signatures of + :raises: Will raise :class:`Error` if there's a problem + """ + error_messages = [] + for po in pkgs: + result, errmsg = self._sig_check_pkg(po) + + if result == 0: + # Verified ok, or verify not req'd + continue + + elif result == 1: + ay = self.conf.assumeyes and not self.conf.assumeno + if (not sys.stdin or not sys.stdin.isatty()) and not ay: + raise dnf.exceptions.Error(_('Refusing to automatically import keys when running ' \ + 'unattended.\nUse "-y" to override.')) + + # the callback here expects to be able to take options which + # userconfirm really doesn't... so fake it + fn = lambda x, y, z: self.output.userconfirm() + try: + self._get_key_for_package(po, fn) + except dnf.exceptions.Error as e: + error_messages.append(str(e)) + + else: + # Fatal error + error_messages.append(errmsg) + + if error_messages: + for msg in error_messages: + logger.critical(msg) + raise dnf.exceptions.Error(_("GPG check FAILED")) + + def latest_changelogs(self, package): + """Return list of changelogs for package newer then installed version""" + newest = None + # find the date of the newest changelog for installed version of package + # stored in rpmdb + for mi in self._rpmconn.readonly_ts.dbMatch('name', package.name): + changelogtimes = mi[rpm.RPMTAG_CHANGELOGTIME] + if changelogtimes: + newest = datetime.date.fromtimestamp(changelogtimes[0]) + break + chlogs = [chlog for chlog in package.changelogs + if newest is None or chlog['timestamp'] > newest] + return chlogs + + def format_changelog(self, changelog): + """Return changelog formatted as in spec file""" + chlog_str = '* %s %s\n%s\n' % ( + changelog['timestamp'].strftime("%a %b %d %X %Y"), + dnf.i18n.ucd(changelog['author']), + dnf.i18n.ucd(changelog['text'])) + return chlog_str + + def print_changelogs(self, packages): + # group packages by src.rpm to avoid showing duplicate changelogs + bysrpm = dict() + for p in packages: + # there are packages without source_name, use name then. + bysrpm.setdefault(p.source_name or p.name, []).append(p) + for source_name in sorted(bysrpm.keys()): + bin_packages = bysrpm[source_name] + print(_("Changelogs for {}").format(', '.join([str(pkg) for pkg in bin_packages]))) + for chl in self.latest_changelogs(bin_packages[0]): + print(self.format_changelog(chl)) + + def check_updates(self, patterns=(), reponame=None, print_=True, changelogs=False): + """Check updates matching given *patterns* in selected repository.""" + ypl = self.returnPkgLists('upgrades', patterns, reponame=reponame) + if self.conf.obsoletes or self.conf.verbose: + typl = self.returnPkgLists('obsoletes', patterns, reponame=reponame) + ypl.obsoletes = typl.obsoletes + ypl.obsoletesTuples = typl.obsoletesTuples + + if print_: + columns = _list_cmd_calc_columns(self.output, ypl) + if len(ypl.updates) > 0: + local_pkgs = {} + highlight = self.output.term.MODE['bold'] + if highlight: + # Do the local/remote split we get in "yum updates" + for po in sorted(ypl.updates): + local = po.localPkg() + if os.path.exists(local) and po.verifyLocalPkg(): + local_pkgs[(po.name, po.arch)] = po + + cul = self.conf.color_update_local + cur = self.conf.color_update_remote + self.output.listPkgs(ypl.updates, '', outputType='list', + highlight_na=local_pkgs, columns=columns, + highlight_modes={'=' : cul, 'not in' : cur}) + if changelogs: + self.print_changelogs(ypl.updates) + + if len(ypl.obsoletes) > 0: + print(_('Obsoleting Packages')) + # The tuple is (newPkg, oldPkg) ... so sort by new + for obtup in sorted(ypl.obsoletesTuples, + key=operator.itemgetter(0)): + self.output.updatesObsoletesList(obtup, 'obsoletes', + columns=columns) + + return ypl.updates or ypl.obsoletes + + def distro_sync_userlist(self, userlist): + """ Upgrade or downgrade packages to match the latest versions available + in the enabled repositories. + + :return: (exit_code, [ errors ]) + + exit_code is:: + 0 = we're done, exit + 1 = we've errored, exit with error string + 2 = we've got work yet to do, onto the next stage + """ + oldcount = self._goal.req_length() + if len(userlist) == 0: + self.distro_sync() + else: + for pkg_spec in userlist: + self.distro_sync(pkg_spec) + + cnt = self._goal.req_length() - oldcount + if cnt <= 0 and not self._goal.req_has_distupgrade_all(): + msg = _('No packages marked for distribution synchronization.') + raise dnf.exceptions.Error(msg) + + def downgradePkgs(self, specs=[], file_pkgs=[], strict=False): + """Attempt to take the user specified list of packages or + wildcards and downgrade them. If a complete version number is + specified, attempt to downgrade them to the specified version + + :param specs: a list of names or wildcards specifying packages to downgrade + :param file_pkgs: a list of pkg objects from local files + """ + + oldcount = self._goal.req_length() + for pkg in file_pkgs: + try: + self.package_downgrade(pkg, strict=strict) + continue # it was something on disk and it ended in rpm + # no matter what we don't go looking at repos + except dnf.exceptions.MarkingError as e: + logger.info(_('No match for argument: %s'), + self.output.term.bold(pkg.location)) + # it was something on disk and it ended in rpm + # no matter what we don't go looking at repos + + for arg in specs: + try: + self.downgrade_to(arg, strict=strict) + except dnf.exceptions.PackageNotFoundError as err: + msg = _('No package %s available.') + logger.info(msg, self.output.term.bold(arg)) + except dnf.exceptions.PackagesNotInstalledError as err: + logger.info(_('Packages for argument %s available, but not installed.'), + self.output.term.bold(err.pkg_spec)) + except dnf.exceptions.MarkingError: + assert False + cnt = self._goal.req_length() - oldcount + if cnt <= 0: + raise dnf.exceptions.Error(_('No packages marked for downgrade.')) + + def output_packages(self, basecmd, pkgnarrow='all', patterns=(), reponame=None): + """Output selection *pkgnarrow* of packages matching *patterns* and *repoid*.""" + try: + highlight = self.output.term.MODE['bold'] + ypl = self.returnPkgLists( + pkgnarrow, patterns, installed_available=highlight, reponame=reponame) + except dnf.exceptions.Error as e: + return 1, [str(e)] + else: + update_pkgs = {} + inst_pkgs = {} + local_pkgs = {} + + columns = None + if basecmd == 'list': + # Dynamically size the columns + columns = _list_cmd_calc_columns(self.output, ypl) + + if highlight and ypl.installed: + # If we have installed and available lists, then do the + # highlighting for the installed packages so you can see what's + # available to update, an extra, or newer than what we have. + for pkg in (ypl.hidden_available + + ypl.reinstall_available + + ypl.old_available): + key = (pkg.name, pkg.arch) + if key not in update_pkgs or pkg > update_pkgs[key]: + update_pkgs[key] = pkg + + if highlight and ypl.available: + # If we have installed and available lists, then do the + # highlighting for the available packages so you can see what's + # available to install vs. update vs. old. + for pkg in ypl.hidden_installed: + key = (pkg.name, pkg.arch) + if key not in inst_pkgs or pkg > inst_pkgs[key]: + inst_pkgs[key] = pkg + + if highlight and ypl.updates: + # Do the local/remote split we get in "yum updates" + for po in sorted(ypl.updates): + if po.reponame != hawkey.SYSTEM_REPO_NAME: + local_pkgs[(po.name, po.arch)] = po + + # Output the packages: + clio = self.conf.color_list_installed_older + clin = self.conf.color_list_installed_newer + clir = self.conf.color_list_installed_reinstall + clie = self.conf.color_list_installed_extra + rip = self.output.listPkgs(ypl.installed, _('Installed Packages'), basecmd, + highlight_na=update_pkgs, columns=columns, + highlight_modes={'>' : clio, '<' : clin, + '=' : clir, 'not in' : clie}) + clau = self.conf.color_list_available_upgrade + clad = self.conf.color_list_available_downgrade + clar = self.conf.color_list_available_reinstall + clai = self.conf.color_list_available_install + rap = self.output.listPkgs(ypl.available, _('Available Packages'), basecmd, + highlight_na=inst_pkgs, columns=columns, + highlight_modes={'<' : clau, '>' : clad, + '=' : clar, 'not in' : clai}) + raep = self.output.listPkgs(ypl.autoremove, _('Autoremove Packages'), + basecmd, columns=columns) + rep = self.output.listPkgs(ypl.extras, _('Extra Packages'), basecmd, + columns=columns) + cul = self.conf.color_update_local + cur = self.conf.color_update_remote + rup = self.output.listPkgs(ypl.updates, _('Available Upgrades'), basecmd, + highlight_na=local_pkgs, columns=columns, + highlight_modes={'=' : cul, 'not in' : cur}) + + # XXX put this into the ListCommand at some point + if len(ypl.obsoletes) > 0 and basecmd == 'list': + # if we've looked up obsolete lists and it's a list request + rop = [0, ''] + print(_('Obsoleting Packages')) + for obtup in sorted(ypl.obsoletesTuples, + key=operator.itemgetter(0)): + self.output.updatesObsoletesList(obtup, 'obsoletes', + columns=columns) + else: + rop = self.output.listPkgs(ypl.obsoletes, _('Obsoleting Packages'), + basecmd, columns=columns) + rrap = self.output.listPkgs(ypl.recent, _('Recently Added Packages'), + basecmd, columns=columns) + if len(patterns) and \ + rrap[0] and rop[0] and rup[0] and rep[0] and rap[0] and \ + raep[0] and rip[0]: + raise dnf.exceptions.Error(_('No matching Packages to list')) + + def returnPkgLists(self, pkgnarrow='all', patterns=None, + installed_available=False, reponame=None): + """Return a :class:`dnf.yum.misc.GenericHolder` object containing + lists of package objects that match the given names or wildcards. + + :param pkgnarrow: a string specifying which types of packages + lists to produce, such as updates, installed, available, etc. + :param patterns: a list of names or wildcards specifying + packages to list + :param installed_available: whether the available package list + is present as .hidden_available when doing all, available, + or installed + :param reponame: limit packages list to the given repository + + :return: a :class:`dnf.yum.misc.GenericHolder` instance with the + following lists defined:: + + available = list of packageObjects + installed = list of packageObjects + upgrades = tuples of packageObjects (updating, installed) + extras = list of packageObjects + obsoletes = tuples of packageObjects (obsoleting, installed) + recent = list of packageObjects + """ + + done_hidden_available = False + done_hidden_installed = False + if installed_available and pkgnarrow == 'installed': + done_hidden_available = True + pkgnarrow = 'all' + elif installed_available and pkgnarrow == 'available': + done_hidden_installed = True + pkgnarrow = 'all' + + ypl = self._do_package_lists( + pkgnarrow, patterns, ignore_case=True, reponame=reponame) + if self.conf.showdupesfromrepos: + for pkg in ypl.reinstall_available: + if not pkg.installed and not done_hidden_available: + ypl.available.append(pkg) + + if installed_available: + ypl.hidden_available = ypl.available + ypl.hidden_installed = ypl.installed + if done_hidden_available: + ypl.available = [] + if done_hidden_installed: + ypl.installed = [] + return ypl + + def provides(self, args): + """Print out a list of packages that provide the given file or + feature. This a cli wrapper to the provides methods in the + rpmdb and pkgsack. + + :param args: the name of a file or feature to search for + :return: (exit_code, [ errors ]) + + exit_code is:: + + 0 = we're done, exit + 1 = we've errored, exit with error string + 2 = we've got work yet to do, onto the next stage + """ + # always in showdups mode + old_sdup = self.conf.showdupesfromrepos + self.conf.showdupesfromrepos = True + + matches = [] + used_search_strings = [] + for spec in args: + query, used_search_string = super(BaseCli, self).provides(spec) + matches.extend(query) + used_search_strings.extend(used_search_string) + for pkg in sorted(matches): + self.output.matchcallback_verbose(pkg, used_search_strings, args) + self.conf.showdupesfromrepos = old_sdup + + if not matches: + raise dnf.exceptions.Error(_('No Matches found')) + + def _promptWanted(self): + # shortcut for the always-off/always-on options + if self.conf.assumeyes and not self.conf.assumeno: + return False + return True + + def _history_get_transactions(self, extcmds): + if not extcmds: + logger.critical(_('No transaction ID given')) + return None + + old = self.history.old(extcmds) + if not old: + logger.critical(_('Not found given transaction ID')) + return None + return old + + def history_get_transaction(self, extcmds): + old = self._history_get_transactions(extcmds) + if old is None: + return None + if len(old) > 1: + logger.critical(_('Found more than one transaction ID!')) + return old[0] + + def history_rollback_transaction(self, extcmd): + """Rollback given transaction.""" + old = self.history_get_transaction((extcmd,)) + if old is None: + return 1, ['Failed history rollback, no transaction'] + last = self.history.last() + if last is None: + return 1, ['Failed history rollback, no last?'] + if old.tid == last.tid: + return 0, ['Rollback to current, nothing to do'] + + mobj = None + for trans in self.history.old(list(range(old.tid + 1, last.tid + 1))): + if trans.altered_lt_rpmdb: + logger.warning(_('Transaction history is incomplete, before %u.'), trans.tid) + elif trans.altered_gt_rpmdb: + logger.warning(_('Transaction history is incomplete, after %u.'), trans.tid) + + if mobj is None: + mobj = dnf.db.history.MergedTransactionWrapper(trans) + else: + mobj.merge(trans) + + tm = dnf.util.normalize_time(old.beg_timestamp) + print("Rollback to transaction %u, from %s" % (old.tid, tm)) + print(self.output.fmtKeyValFill(" Undoing the following transactions: ", + ", ".join((str(x) for x in mobj.tids())))) + self.output.historyInfoCmdPkgsAltered(mobj) # :todo + +# history = dnf.history.open_history(self.history) # :todo +# m = libdnf.transaction.MergedTransaction() + +# return + +# operations = dnf.history.NEVRAOperations() +# for id_ in range(old.tid + 1, last.tid + 1): +# operations += history.transaction_nevra_ops(id_) + + try: + self._history_undo_operations(mobj, old.tid + 1, True, strict=self.conf.strict) + except dnf.exceptions.PackagesNotInstalledError as err: + raise + logger.info(_('No package %s installed.'), + self.output.term.bold(ucd(err.pkg_spec))) + return 1, ['A transaction cannot be undone'] + except dnf.exceptions.PackagesNotAvailableError as err: + raise + logger.info(_('No package %s available.'), + self.output.term.bold(ucd(err.pkg_spec))) + return 1, ['A transaction cannot be undone'] + except dnf.exceptions.MarkingError: + raise + assert False + else: + return 2, ["Rollback to transaction %u" % (old.tid,)] + + def history_undo_transaction(self, extcmd): + """Undo given transaction.""" + old = self.history_get_transaction((extcmd,)) + if old is None: + return 1, ['Failed history undo'] + + tm = dnf.util.normalize_time(old.beg_timestamp) + msg = _("Undoing transaction {}, from {}").format(old.tid, ucd(tm)) + logger.info(msg) + self.output.historyInfoCmdPkgsAltered(old) # :todo + + + mobj = dnf.db.history.MergedTransactionWrapper(old) + + try: + self._history_undo_operations(mobj, old.tid, strict=self.conf.strict) + except dnf.exceptions.PackagesNotInstalledError as err: + logger.info(_('No package %s installed.'), + self.output.term.bold(ucd(err.pkg_spec))) + return 1, ['An operation cannot be undone'] + except dnf.exceptions.PackagesNotAvailableError as err: + logger.info(_('No package %s available.'), + self.output.term.bold(ucd(err.pkg_spec))) + return 1, ['An operation cannot be undone'] + except dnf.exceptions.MarkingError: + raise + else: + return 2, ["Undoing transaction %u" % (old.tid,)] + +class Cli(object): + def __init__(self, base): + self.base = base + self.cli_commands = {} + self.command = None + self.demands = dnf.cli.demand.DemandSheet() # :api + + self.register_command(dnf.cli.commands.alias.AliasCommand) + self.register_command(dnf.cli.commands.autoremove.AutoremoveCommand) + self.register_command(dnf.cli.commands.check.CheckCommand) + self.register_command(dnf.cli.commands.clean.CleanCommand) + self.register_command(dnf.cli.commands.distrosync.DistroSyncCommand) + self.register_command(dnf.cli.commands.deplist.DeplistCommand) + self.register_command(dnf.cli.commands.downgrade.DowngradeCommand) + self.register_command(dnf.cli.commands.group.GroupCommand) + self.register_command(dnf.cli.commands.install.InstallCommand) + self.register_command(dnf.cli.commands.makecache.MakeCacheCommand) + self.register_command(dnf.cli.commands.mark.MarkCommand) + self.register_command(dnf.cli.commands.module.ModuleCommand) + self.register_command(dnf.cli.commands.reinstall.ReinstallCommand) + self.register_command(dnf.cli.commands.remove.RemoveCommand) + self.register_command(dnf.cli.commands.repolist.RepoListCommand) + self.register_command(dnf.cli.commands.repoquery.RepoQueryCommand) + self.register_command(dnf.cli.commands.search.SearchCommand) + self.register_command(dnf.cli.commands.shell.ShellCommand) + self.register_command(dnf.cli.commands.swap.SwapCommand) + self.register_command(dnf.cli.commands.updateinfo.UpdateInfoCommand) + self.register_command(dnf.cli.commands.upgrade.UpgradeCommand) + self.register_command(dnf.cli.commands.upgrademinimal.UpgradeMinimalCommand) + self.register_command(dnf.cli.commands.InfoCommand) + self.register_command(dnf.cli.commands.ListCommand) + self.register_command(dnf.cli.commands.ProvidesCommand) + self.register_command(dnf.cli.commands.CheckUpdateCommand) + self.register_command(dnf.cli.commands.RepoPkgsCommand) + self.register_command(dnf.cli.commands.HelpCommand) + self.register_command(dnf.cli.commands.HistoryCommand) + + def _configure_repos(self, opts): + self.base.read_all_repos(opts) + if opts.repofrompath: + for label, path in opts.repofrompath.items(): + this_repo = self.base.repos.add_new_repo(label, self.base.conf, baseurl=[path]) + this_repo._configure_from_options(opts) + # do not let this repo to be disabled + opts.repos_ed.append((label, "enable")) + + if opts.repo: + opts.repos_ed.insert(0, ("*", "disable")) + opts.repos_ed.extend([(r, "enable") for r in opts.repo]) + + notmatch = set() + + # Process repo enables and disables in order + try: + for (repo, operation) in opts.repos_ed: + repolist = self.base.repos.get_matching(repo) + if not repolist: + if self.base.conf.strict and operation == "enable": + msg = _("Unknown repo: '%s'") + raise dnf.exceptions.RepoError(msg % repo) + notmatch.add(repo) + + if operation == "enable": + repolist.enable() + else: + repolist.disable() + except dnf.exceptions.ConfigError as e: + logger.critical(e) + self.optparser.print_help() + sys.exit(1) + + for repo in notmatch: + logger.warning(_("No repository match: %s"), repo) + + for rid in self.base._repo_persistor.get_expired_repos(): + repo = self.base.repos.get(rid) + if repo: + repo._repo.expire() + + # setup the progress bars/callbacks + (bar, self.base._ds_callback) = self.base.output.setup_progress_callbacks() + self.base.repos.all().set_progress_bar(bar) + key_import = output.CliKeyImport(self.base, self.base.output) + self.base.repos.all()._set_key_import(key_import) + + def _log_essentials(self): + logger.debug('{prog} version: %s'.format(prog=dnf.util.MAIN_PROG_UPPER), + dnf.const.VERSION) + logger.log(dnf.logging.DDEBUG, + 'Command: %s', self.cmdstring) + logger.log(dnf.logging.DDEBUG, + 'Installroot: %s', self.base.conf.installroot) + logger.log(dnf.logging.DDEBUG, 'Releasever: %s', + self.base.conf.releasever) + logger.debug("cachedir: %s", self.base.conf.cachedir) + + def _process_demands(self): + demands = self.demands + repos = self.base.repos + + if demands.root_user: + if not dnf.util.am_i_root(): + raise dnf.exceptions.Error(_('This command has to be run under the root user.')) + + if demands.changelogs: + for repo in repos.iter_enabled(): + repo.load_metadata_other = True + + if demands.cacheonly or self.base.conf.cacheonly: + self.base.conf.cacheonly = True + for repo in repos.values(): + repo._repo.setSyncStrategy(dnf.repo.SYNC_ONLY_CACHE) + else: + if demands.freshest_metadata: + for repo in repos.iter_enabled(): + repo._repo.expire() + elif not demands.fresh_metadata: + for repo in repos.values(): + repo._repo.setSyncStrategy(dnf.repo.SYNC_LAZY) + + if demands.sack_activation: + self.base.fill_sack( + load_system_repo='auto' if self.demands.load_system_repo else False, + load_available_repos=self.demands.available_repos) + + def _parse_commands(self, opts, args): + """Check that the requested CLI command exists.""" + + basecmd = opts.command + command_cls = self.cli_commands.get(basecmd) + if command_cls is None: + logger.critical(_('No such command: %s. Please use %s --help'), + basecmd, sys.argv[0]) + if self.base.conf.plugins: + logger.critical(_("It could be a {PROG} plugin command, " + "try: \"{prog} install 'dnf-command(%s)'\"").format( + prog=dnf.util.MAIN_PROG, PROG=dnf.util.MAIN_PROG_UPPER), basecmd) + else: + logger.critical(_("It could be a {prog} plugin command, " + "but loading of plugins is currently disabled.").format( + prog=dnf.util.MAIN_PROG_UPPER)) + raise CliError + self.command = command_cls(self) + + logger.log(dnf.logging.DDEBUG, 'Base command: %s', basecmd) + logger.log(dnf.logging.DDEBUG, 'Extra commands: %s', args) + + def configure(self, args, option_parser=None): + """Parse command line arguments, and set up :attr:`self.base.conf` and + :attr:`self.cmds`, as well as logger objects in base instance. + + :param args: a list of command line arguments + :param option_parser: a class for parsing cli options + """ + aliases = dnf.cli.aliases.Aliases() + args = aliases.resolve(args) + + self.optparser = dnf.cli.option_parser.OptionParser() \ + if option_parser is None else option_parser + opts = self.optparser.parse_main_args(args) + + # Just print out the version if that's what the user wanted + if opts.version: + print(dnf.const.VERSION) + print_versions(self.base.conf.history_record_packages, self.base, + self.base.output) + sys.exit(0) + + if opts.quiet: + opts.debuglevel = 0 + opts.errorlevel = 2 + if opts.verbose: + opts.debuglevel = opts.errorlevel = dnf.const.VERBOSE_LEVEL + + # Read up configuration options and initialize plugins + try: + if opts.cacheonly: + self.base.conf._set_value("cachedir", self.base.conf.system_cachedir, + dnf.conf.PRIO_DEFAULT) + self.demands.cacheonly = True + self.base.conf._configure_from_options(opts) + self._read_conf_file(opts.releasever) + if 'arch' in opts: + self.base.conf.arch = opts.arch + self.base.conf._adjust_conf_options() + except (dnf.exceptions.ConfigError, ValueError) as e: + logger.critical(_('Config error: %s'), e) + sys.exit(1) + except IOError as e: + e = '%s: %s' % (ucd(str(e)), repr(e.filename)) + logger.critical(_('Config error: %s'), e) + sys.exit(1) + if opts.destdir is not None: + self.base.conf.destdir = opts.destdir + if not self.base.conf.downloadonly and opts.command not in ( + 'download', 'system-upgrade', 'reposync'): + logger.critical(_('--destdir or --downloaddir must be used with --downloadonly ' + 'or download or system-upgrade command.') + ) + sys.exit(1) + if (opts.set_enabled or opts.set_disabled) and opts.command != 'config-manager': + logger.critical( + _('--enable, --set-enabled and --disable, --set-disabled ' + 'must be used with config-manager command.')) + sys.exit(1) + + if opts.sleeptime is not None: + time.sleep(random.randrange(opts.sleeptime * 60)) + + # store the main commands & summaries, before plugins are loaded + self.optparser.add_commands(self.cli_commands, 'main') + # store the plugin commands & summaries + self.base.init_plugins(opts.disableplugin, opts.enableplugin, self) + self.optparser.add_commands(self.cli_commands,'plugin') + + # show help if no command specified + # this is done here, because we first have the full + # usage info after the plugins are loaded. + if not opts.command: + self.optparser.print_help() + sys.exit(0) + + # save our original args out + self.base.args = args + # save out as a nice command string + self.cmdstring = self.optparser.prog + ' ' + for arg in self.base.args: + self.cmdstring += '%s ' % arg + + self._log_essentials() + try: + self._parse_commands(opts, args) + except CliError: + sys.exit(1) + + # show help for dnf --help / --help-cmd + if opts.help: + self.optparser.print_help(self.command) + sys.exit(0) + + opts = self.optparser.parse_command_args(self.command, args) + + if opts.allowerasing: + self.demands.allow_erasing = opts.allowerasing + self.base._allow_erasing = True + if opts.freshest_metadata: + self.demands.freshest_metadata = opts.freshest_metadata + if opts.debugsolver: + self.base.conf.debug_solver = True + if opts.obsoletes: + self.base.conf.obsoletes = True + self.command.pre_configure() + self.base.pre_configure_plugins() + + # with cachedir in place we can configure stuff depending on it: + self.base._activate_persistor() + + self._configure_repos(opts) + + self.base.configure_plugins() + + self.base.conf._configure_from_options(opts) + + self.command.configure() + + if self.base.conf.destdir: + dnf.util.ensure_dir(self.base.conf.destdir) + self.base.repos.all().pkgdir = self.base.conf.destdir + + if self.base.conf.color != 'auto': + self.base.output.term.reinit(color=self.base.conf.color) + + if rpm.expandMacro('%_pkgverify_level') in ('signature', 'all'): + forcing = False + for repo in self.base.repos.iter_enabled(): + if repo.gpgcheck: + continue + repo.gpgcheck = True + forcing = True + if not self.base.conf.localpkg_gpgcheck: + self.base.conf.localpkg_gpgcheck = True + forcing = True + if forcing: + logger.warning( + _("Warning: Enforcing GPG signature check globally " + "as per active RPM security policy (see 'gpgcheck' in " + "dnf.conf(5) for how to squelch this message)" + ) + ) + + def _read_conf_file(self, releasever=None): + timer = dnf.logging.Timer('config') + conf = self.base.conf + + # replace remote config path with downloaded file + conf._check_remote_file('config_file_path') + + # search config file inside the installroot first + conf._search_inside_installroot('config_file_path') + + # check whether a config file is requested from command line and the file exists + filename = conf._get_value('config_file_path') + if (conf._get_priority('config_file_path') == dnf.conf.PRIO_COMMANDLINE) and \ + not os.path.isfile(filename): + raise dnf.exceptions.ConfigError(_('Config file "{}" does not exist').format(filename)) + + # read config + conf.read(priority=dnf.conf.PRIO_MAINCONFIG) + + # search reposdir file inside the installroot first + from_root = conf._search_inside_installroot('reposdir') + # Update vars from same root like repos were taken + if conf._get_priority('varsdir') == dnf.conf.PRIO_COMMANDLINE: + from_root = "/" + subst = conf.substitutions + subst.update_from_etc(from_root, varsdir=conf._get_value('varsdir')) + # cachedir, logs, releasever, and gpgkey are taken from or stored in installroot + if releasever is None and conf.releasever is None: + releasever = dnf.rpm.detect_releasever(conf.installroot) + elif releasever == '/': + releasever = dnf.rpm.detect_releasever(releasever) + if releasever is not None: + conf.releasever = releasever + if conf.releasever is None: + logger.warning(_("Unable to detect release version (use '--releasever' to specify " + "release version)")) + + for opt in ('cachedir', 'logdir', 'persistdir'): + conf.prepend_installroot(opt) + + self.base._logging._setup_from_dnf_conf(conf) + + timer() + return conf + + def _populate_update_security_filter(self, opts, query, cmp_type='eq', all=None): + """ + + :param opts: + :param query: base package set for filters + :param cmp_type: string like "eq", "gt", "gte", "lt", "lte" + :param all: + :return: + """ + if (opts is None) and (all is None): + return + filters = [] + if opts.bugfix or all: + key = {'advisory_type__' + cmp_type: 'bugfix'} + filters.append(query.filter(**key)) + if opts.enhancement or all: + key = {'advisory_type__' + cmp_type: 'enhancement'} + filters.append(query.filter(**key)) + if opts.newpackage or all: + key = {'advisory_type__' + cmp_type: 'newpackage'} + filters.append(query.filter(**key)) + if opts.security or all: + key = {'advisory_type__' + cmp_type: 'security'} + filters.append(query.filter(**key)) + if opts.advisory: + key = {'advisory__' + cmp_type: opts.advisory} + filters.append(query.filter(**key)) + if opts.bugzilla: + key = {'advisory_bug__' + cmp_type: opts.bugzilla} + filters.append(query.filter(**key)) + if opts.cves: + key = {'advisory_cve__' + cmp_type: opts.cves} + filters.append(query.filter(**key)) + if opts.severity: + key = {'advisory_severity__' + cmp_type: opts.severity} + filters.append(query.filter(**key)) + self.base._update_security_filters = filters + + def redirect_logger(self, stdout=None, stderr=None): + # :api + """ + Change minimal logger level for terminal output to stdout and stderr according to specific + command requirements + @param stdout: logging.INFO, logging.WARNING, ... + @param stderr:logging.INFO, logging.WARNING, ... + """ + if stdout is not None: + self.base._logging.stdout_handler.setLevel(stdout) + if stderr is not None: + self.base._logging.stderr_handler.setLevel(stderr) + + def redirect_repo_progress(self, fo=sys.stderr): + progress = dnf.cli.progress.MultiFileProgressMeter(fo) + self.base.output.progress = progress + self.base.repos.all().set_progress_bar(progress) + + def _check_running_kernel(self): + kernel = self.base.sack.get_running_kernel() + if kernel is None: + return + + q = self.base.sack.query().filterm(provides=kernel.name) + q = q.installed() + q.filterm(advisory_type='security') + + ikpkg = kernel + for pkg in q: + if pkg > ikpkg: + ikpkg = pkg + + if ikpkg > kernel: + print('Security: %s is an installed security update' % ikpkg) + print('Security: %s is the currently running version' % kernel) + + def _option_conflict(self, option_string_1, option_string_2): + print(self.optparser.print_usage()) + raise dnf.exceptions.Error(_("argument {}: not allowed with argument {}".format( + option_string_1, option_string_2))) + + def register_command(self, command_cls): + """Register a Command. :api""" + for name in command_cls.aliases: + if name in self.cli_commands: + raise dnf.exceptions.ConfigError(_('Command "%s" already defined') % name) + self.cli_commands[name] = command_cls + + def run(self): + """Call the base command, and pass it the extended commands or + arguments. + + :return: (exit_code, [ errors ]) + + exit_code is:: + + 0 = we're done, exit + 1 = we've errored, exit with error string + 2 = we've got work yet to do, onto the next stage + """ + self._process_demands() + + # Reports about excludes and includes (but not from plugins) + if self.base.conf.excludepkgs: + logger.debug( + _('Excludes in dnf.conf: ') + ", ".join(sorted(set(self.base.conf.excludepkgs)))) + if self.base.conf.includepkgs: + logger.debug( + _('Includes in dnf.conf: ') + ", ".join(sorted(set(self.base.conf.includepkgs)))) + for repo in self.base.repos.iter_enabled(): + if repo.excludepkgs: + logger.debug(_('Excludes in repo ') + repo.id + ": " + + ", ".join(sorted(set(repo.excludepkgs)))) + if repo.includepkgs: + logger.debug(_('Includes in repo ') + repo.id + ": " + + ", ".join(sorted(set(repo.includepkgs)))) + + return self.command.run() diff --git a/dnf/cli/commands/CMakeLists.txt b/dnf/cli/commands/CMakeLists.txt new file mode 100644 index 0000000..232a315 --- /dev/null +++ b/dnf/cli/commands/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB commands_SRCS *.py) +INSTALL (FILES ${commands_SRCS} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/cli/commands) diff --git a/dnf/cli/commands/__init__.py b/dnf/cli/commands/__init__.py new file mode 100644 index 0000000..2a0726b --- /dev/null +++ b/dnf/cli/commands/__init__.py @@ -0,0 +1,1029 @@ +# Copyright 2006 Duke University +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Written by Seth Vidal + +""" +Classes for subcommands of the yum command line interface. +""" + +from __future__ import print_function +from __future__ import unicode_literals + +import libdnf + +from dnf.cli.option_parser import OptionParser +from dnf.i18n import _, ucd + +import argparse +import dnf.cli +import dnf.cli.demand +import dnf.const +import dnf.exceptions +import dnf.i18n +import dnf.pycomp +import dnf.transaction +import dnf.util +import functools +import logging +import operator +import os + +logger = logging.getLogger('dnf') +_RPM_VERIFY = _("To diagnose the problem, try running: '%s'.") % \ + 'rpm -Va --nofiles --nodigest' +_RPM_REBUILDDB = _("You probably have corrupted RPMDB, running '%s'" + " might fix the issue.") % 'rpm --rebuilddb' + +gpg_msg = \ + _("""You have enabled checking of packages via GPG keys. This is a good thing. +However, you do not have any GPG public keys installed. You need to download +the keys for packages you wish to install and install them. +You can do that by running the command: + rpm --import public.gpg.key + + +Alternatively you can specify the url to the key you would like to use +for a repository in the 'gpgkey' option in a repository section and {prog} +will install it for you. + +For more information contact your distribution or package provider.""") + + +def _checkGPGKey(base, cli): + """Verify that there are gpg keys for the enabled repositories in the + rpm database. + + :param base: a :class:`dnf.Base` object. + :raises: :class:`cli.CliError` + """ + if not base.conf.gpgcheck: + return + if not base._gpg_key_check(): + for repo in base.repos.iter_enabled(): + if (repo.gpgcheck or repo.repo_gpgcheck) and not repo.gpgkey: + logger.critical("\n%s\n", gpg_msg.format(prog=dnf.util.MAIN_PROG_UPPER)) + logger.critical(_("Problem repository: %s"), repo) + raise dnf.cli.CliError + + +def _checkEnabledRepo(base, possible_local_files=()): + """Verify that there is at least one enabled repo. + + :param base: a :class:`dnf.Base` object. + :param possible_local_files: the list of strings that could be a local rpms + :raises: :class:`cli.CliError`: + """ + if base.repos._any_enabled(): + return + + for lfile in possible_local_files: + if lfile.endswith(".rpm") and os.path.exists(lfile): + return + scheme = dnf.pycomp.urlparse.urlparse(lfile)[0] + if scheme in ('http', 'ftp', 'file', 'https'): + return + msg = _('There are no enabled repositories in "{}".').format('", "'.join(base.conf.reposdir)) + raise dnf.cli.CliError(msg) + + +class Command(object): + """Abstract base class for CLI commands.""" + + aliases = [] # :api + summary = "" # :api + opts = None + + def __init__(self, cli): + # :api + self.cli = cli + + @property + def base(self): + # :api + return self.cli.base + + @property + def _basecmd(self): + return self.aliases[0] + + @property + def output(self): + return self.cli.base.output + + def set_argparser(self, parser): + """Define command specific options and arguments. #:api""" + pass + + def pre_configure(self): + # :api + """Do any command-specific pre-configuration.""" + pass + + def configure(self): + # :api + """Do any command-specific configuration.""" + pass + + def get_error_output(self, error): + """Get suggestions for resolving the given error.""" + if isinstance(error, dnf.exceptions.TransactionCheckError): + return (_RPM_VERIFY, _RPM_REBUILDDB) + raise NotImplementedError('error not supported yet: %s' % error) + + def run(self): + # :api + """Execute the command.""" + pass + + def run_transaction(self): + """Finalize operations post-transaction.""" + pass + +class InfoCommand(Command): + """A class containing methods needed by the cli to execute the + info command. + """ + + aliases = ('info',) + summary = _('display details about a package or group of packages') + DEFAULT_PKGNARROW = 'all' + pkgnarrows = {'available', 'installed', 'extras', 'updates', 'upgrades', + 'autoremove', 'recent', 'obsoletes', DEFAULT_PKGNARROW} + + @classmethod + def set_argparser(cls, parser): + narrows = parser.add_mutually_exclusive_group() + narrows.add_argument('--all', dest='_packages_action', + action='store_const', const='all', default=None, + help=_("show all packages (default)")) + narrows.add_argument('--available', dest='_packages_action', + action='store_const', const='available', + help=_("show only available packages")) + narrows.add_argument('--installed', dest='_packages_action', + action='store_const', const='installed', + help=_("show only installed packages")) + narrows.add_argument('--extras', dest='_packages_action', + action='store_const', const='extras', + help=_("show only extras packages")) + narrows.add_argument('--updates', dest='_packages_action', + action='store_const', const='upgrades', + help=_("show only upgrades packages")) + narrows.add_argument('--upgrades', dest='_packages_action', + action='store_const', const='upgrades', + help=_("show only upgrades packages")) + narrows.add_argument('--autoremove', dest='_packages_action', + action='store_const', const='autoremove', + help=_("show only autoremove packages")) + narrows.add_argument('--recent', dest='_packages_action', + action='store_const', const='recent', + help=_("show only recently changed packages")) + parser.add_argument('packages', nargs='*', metavar=_('PACKAGE'), + choices=cls.pkgnarrows, default=cls.DEFAULT_PKGNARROW, + action=OptionParser.PkgNarrowCallback, + help=_("Package name specification")) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + if self.opts._packages_action: + self.opts.packages_action = self.opts._packages_action + if self.opts.packages_action != 'installed': + demands.available_repos = True + if self.opts.obsoletes: + if self.opts._packages_action: + self.cli._option_conflict("--obsoletes", "--" + self.opts._packages_action) + else: + self.opts.packages_action = 'obsoletes' + if self.opts.packages_action == 'updates': + self.opts.packages_action = 'upgrades' + + def run(self): + self.cli._populate_update_security_filter(self.opts, self.base.sack.query()) + return self.base.output_packages('info', self.opts.packages_action, + self.opts.packages) + +class ListCommand(InfoCommand): + """A class containing methods needed by the cli to execute the + list command. + """ + + aliases = ('list',) + summary = _('list a package or groups of packages') + + def run(self): + self.cli._populate_update_security_filter(self.opts, self.base.sack.query()) + return self.base.output_packages('list', self.opts.packages_action, + self.opts.packages) + + +class ProvidesCommand(Command): + """A class containing methods needed by the cli to execute the + provides command. + """ + + aliases = ('provides', 'whatprovides', 'prov') + summary = _('find what package provides the given value') + + @staticmethod + def set_argparser(parser): + parser.add_argument('dependency', nargs='+', metavar=_('PROVIDE'), + help=_("Provide specification to search for")) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.fresh_metadata = False + demands.sack_activation = True + + def run(self): + logger.debug(_("Searching Packages: ")) + return self.base.provides(self.opts.dependency) + +class CheckUpdateCommand(Command): + """A class containing methods needed by the cli to execute the + check-update command. + """ + + aliases = ('check-update', 'check-upgrade') + summary = _('check for available package upgrades') + + @staticmethod + def set_argparser(parser): + parser.add_argument('--changelogs', dest='changelogs', + default=False, action='store_true', + help=_('show changelogs before update')) + parser.add_argument('packages', nargs='*', metavar=_('PACKAGE')) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.plugin_filtering_enabled = True + if self.opts.changelogs: + demands.changelogs = True + _checkEnabledRepo(self.base) + + def run(self): + query = self.base.sack.query().upgrades() + if self.base.conf.obsoletes: + obsoleted = query.union(self.base.sack.query().installed()) + obsoletes = self.base.sack.query().filter(obsoletes=obsoleted) + query = query.union(obsoletes) + self.cli._populate_update_security_filter(self.opts, query, cmp_type="gte") + + found = self.base.check_updates(self.opts.packages, print_=True, + changelogs=self.opts.changelogs) + if found: + self.cli.demands.success_exit_status = 100 + + if self.base.conf.autocheck_running_kernel: + self.cli._check_running_kernel() + + +class RepoPkgsCommand(Command): + """Implementation of the repository-packages command.""" + + class CheckUpdateSubCommand(Command): + """Implementation of the info sub-command.""" + + aliases = ('check-update',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + found = self.base.check_updates(self.opts.pkg_specs, + self.reponame, print_=True) + if found: + self.cli.demands.success_exit_status = 100 + + class InfoSubCommand(Command): + """Implementation of the info sub-command.""" + + aliases = ('info',) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + if self.opts._pkg_specs_action: + self.opts.pkg_specs_action = self.opts._pkg_specs_action + if self.opts.pkg_specs_action != 'installed': + demands.available_repos = True + if self.opts.obsoletes: + if self.opts._pkg_specs_action: + self.cli._option_conflict("--obsoletes", "--" + self.opts._pkg_specs_action) + else: + self.opts.pkg_specs_action = 'obsoletes' + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + self.cli._populate_update_security_filter(self.opts, self.base.sack.query()) + self.base.output_packages('info', self.opts.pkg_specs_action, + self.opts.pkg_specs, self.reponame) + + class InstallSubCommand(Command): + """Implementation of the install sub-command.""" + + aliases = ('install',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + demands.resolving = True + demands.root_user = True + + def run_on_repo(self): + self.cli._populate_update_security_filter(self.opts, self.base.sack.query()) + """Execute the command with respect to given arguments *cli_args*.""" + _checkGPGKey(self.base, self.cli) + + done = False + + if not self.opts.pkg_specs: + # Install all packages. + try: + self.base.install('*', self.reponame) + except dnf.exceptions.MarkingError: + logger.info(_('No package available.')) + else: + done = True + else: + # Install packages. + for pkg_spec in self.opts.pkg_specs: + try: + self.base.install(pkg_spec, self.reponame) + except dnf.exceptions.MarkingError as e: + msg = '{}: {}'.format(e.value, self.base.output.term.bold(pkg_spec)) + logger.info(msg) + else: + done = True + + if not done: + raise dnf.exceptions.Error(_('No packages marked for install.')) + + class ListSubCommand(InfoSubCommand): + """Implementation of the list sub-command.""" + + aliases = ('list',) + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + self.cli._populate_update_security_filter(self.opts, self.base.sack.query()) + self.base.output_packages('list', self.opts.pkg_specs_action, + self.opts.pkg_specs, self.reponame) + + class MoveToSubCommand(Command): + """Implementation of the move-to sub-command.""" + + aliases = ('move-to',) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + _checkGPGKey(self.base, self.cli) + + done = False + + if not self.opts.pkg_specs: + # Reinstall all packages. + try: + self.base.reinstall('*', new_reponame=self.reponame) + except dnf.exceptions.PackagesNotInstalledError: + logger.info(_('No package installed.')) + except dnf.exceptions.PackagesNotAvailableError: + logger.info(_('No package available.')) + except dnf.exceptions.MarkingError: + assert False, 'Only the above marking errors are expected.' + else: + done = True + else: + # Reinstall packages. + for pkg_spec in self.opts.pkg_specs: + try: + self.base.reinstall(pkg_spec, new_reponame=self.reponame) + except dnf.exceptions.PackagesNotInstalledError: + msg = _('No match for argument: %s') + logger.info(msg, pkg_spec) + except dnf.exceptions.PackagesNotAvailableError as err: + for pkg in err.packages: + xmsg = '' + pkgrepo = self.base.history.repo(pkg) + if pkgrepo: + xmsg = _(' (from %s)') % pkgrepo + msg = _('Installed package %s%s not available.') + logger.info(msg, self.output.term.bold(pkg), xmsg) + except dnf.exceptions.MarkingError: + assert False, \ + 'Only the above marking errors are expected.' + else: + done = True + + if not done: + raise dnf.exceptions.Error(_('Nothing to do.')) + + class ReinstallOldSubCommand(Command): + """Implementation of the reinstall-old sub-command.""" + + aliases = ('reinstall-old',) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + _checkGPGKey(self.base, self.cli) + + done = False + + if not self.opts.pkg_specs: + # Reinstall all packages. + try: + self.base.reinstall('*', self.reponame, self.reponame) + except dnf.exceptions.PackagesNotInstalledError: + msg = _('No package installed from the repository.') + logger.info(msg) + except dnf.exceptions.PackagesNotAvailableError: + logger.info(_('No package available.')) + except dnf.exceptions.MarkingError: + assert False, 'Only the above marking errors are expected.' + else: + done = True + else: + # Reinstall packages. + for pkg_spec in self.opts.pkg_specs: + try: + self.base.reinstall(pkg_spec, self.reponame, + self.reponame) + except dnf.exceptions.PackagesNotInstalledError: + msg = _('No match for argument: %s') + logger.info(msg, pkg_spec) + except dnf.exceptions.PackagesNotAvailableError as err: + for pkg in err.packages: + xmsg = '' + pkgrepo = self.base.history.repo(pkg) + if pkgrepo: + xmsg = _(' (from %s)') % pkgrepo + msg = _('Installed package %s%s not available.') + logger.info(msg, self.output.term.bold(pkg), xmsg) + except dnf.exceptions.MarkingError: + assert False, \ + 'Only the above marking errors are expected.' + else: + done = True + + if not done: + raise dnf.exceptions.Error(_('Nothing to do.')) + + class ReinstallSubCommand(Command): + """Implementation of the reinstall sub-command.""" + + aliases = ('reinstall',) + + def __init__(self, cli): + """Initialize the command.""" + super(RepoPkgsCommand.ReinstallSubCommand, self).__init__(cli) + self.wrapped_commands = (RepoPkgsCommand.ReinstallOldSubCommand(cli), + RepoPkgsCommand.MoveToSubCommand(cli)) + + def configure(self): + self.cli.demands.available_repos = True + for command in self.wrapped_commands: + command.opts = self.opts + command.reponame = self.reponame + command.configure() + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + _checkGPGKey(self.base, self.cli) + for command in self.wrapped_commands: + try: + command.run_on_repo() + except dnf.exceptions.Error: + continue + else: + break + else: + raise dnf.exceptions.Error(_('No packages marked for reinstall.')) + + class RemoveOrDistroSyncSubCommand(Command): + """Implementation of the remove-or-distro-sync sub-command.""" + + aliases = ('remove-or-distro-sync',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + demands.resolving = True + demands.root_user = True + + def _replace(self, pkg_spec, reponame): + """Synchronize a package with another repository or remove it.""" + self.cli.base.sack.disable_repo(reponame) + + subject = dnf.subject.Subject(pkg_spec) + matches = subject.get_best_query(self.cli.base.sack) + history = self.cli.base.history + installed = [ + pkg for pkg in matches.installed() + if history.repo(pkg) == reponame] + if not installed: + raise dnf.exceptions.PackagesNotInstalledError( + 'no package matched', pkg_spec) + available = matches.available() + clean_deps = self.cli.base.conf.clean_requirements_on_remove + for package in installed: + if available.filter(name=package.name, arch=package.arch): + self.cli.base._goal.distupgrade(package) + else: + self.cli.base._goal.erase(package, clean_deps=clean_deps) + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + _checkGPGKey(self.base, self.cli) + + done = False + + if not self.opts.pkg_specs: + # Sync all packages. + try: + self._replace('*', self.reponame) + except dnf.exceptions.PackagesNotInstalledError: + msg = _('No package installed from the repository.') + logger.info(msg) + else: + done = True + else: + # Reinstall packages. + for pkg_spec in self.opts.pkg_specs: + try: + self._replace(pkg_spec, self.reponame) + except dnf.exceptions.PackagesNotInstalledError: + msg = _('No match for argument: %s') + logger.info(msg, pkg_spec) + else: + done = True + + if not done: + raise dnf.exceptions.Error(_('Nothing to do.')) + + class RemoveOrReinstallSubCommand(Command): + """Implementation of the remove-or-reinstall sub-command.""" + + aliases = ('remove-or-reinstall',) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + _checkGPGKey(self.base, self.cli) + + done = False + + if not self.opts.pkg_specs: + # Reinstall all packages. + try: + self.base.reinstall('*', old_reponame=self.reponame, + new_reponame_neq=self.reponame, + remove_na=True) + except dnf.exceptions.PackagesNotInstalledError: + msg = _('No package installed from the repository.') + logger.info(msg) + except dnf.exceptions.MarkingError: + assert False, 'Only the above marking error is expected.' + else: + done = True + else: + # Reinstall packages. + for pkg_spec in self.opts.pkg_specs: + try: + self.base.reinstall( + pkg_spec, old_reponame=self.reponame, + new_reponame_neq=self.reponame, remove_na=True) + except dnf.exceptions.PackagesNotInstalledError: + msg = _('No match for argument: %s') + logger.info(msg, pkg_spec) + except dnf.exceptions.MarkingError: + assert False, 'Only the above marking error is expected.' + else: + done = True + + if not done: + raise dnf.exceptions.Error(_('Nothing to do.')) + + class RemoveSubCommand(Command): + """Implementation of the remove sub-command.""" + + aliases = ('remove',) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.allow_erasing = True + demands.available_repos = False + demands.resolving = True + demands.root_user = True + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + + done = False + + if not self.opts.pkg_specs: + # Remove all packages. + try: + self.base.remove('*', self.reponame) + except dnf.exceptions.MarkingError: + msg = _('No package installed from the repository.') + logger.info(msg) + else: + done = True + else: + # Remove packages. + for pkg_spec in self.opts.pkg_specs: + try: + self.base.remove(pkg_spec, self.reponame) + except dnf.exceptions.MarkingError as e: + logger.info(str(e)) + else: + done = True + + if not done: + logger.warning(_('No packages marked for removal.')) + + class UpgradeSubCommand(Command): + """Implementation of the upgrade sub-command.""" + + aliases = ('upgrade', 'upgrade-to') + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + + def run_on_repo(self): + """Execute the command with respect to given arguments *cli_args*.""" + _checkGPGKey(self.base, self.cli) + + done = False + + if not self.opts.pkg_specs: + # Update all packages. + self.base.upgrade_all(self.reponame) + done = True + else: + # Update packages. + for pkg_spec in self.opts.pkg_specs: + try: + self.base.upgrade(pkg_spec, self.reponame) + except dnf.exceptions.MarkingError: + logger.info(_('No match for argument: %s'), pkg_spec) + else: + done = True + + if not done: + raise dnf.exceptions.Error(_('No packages marked for upgrade.')) + + SUBCMDS = {CheckUpdateSubCommand, InfoSubCommand, InstallSubCommand, + ListSubCommand, MoveToSubCommand, ReinstallOldSubCommand, + ReinstallSubCommand, RemoveOrDistroSyncSubCommand, + RemoveOrReinstallSubCommand, RemoveSubCommand, + UpgradeSubCommand} + + aliases = ('repository-packages', + 'repo-pkgs', 'repo-packages', 'repository-pkgs') + summary = _('run commands on top of all packages in given repository') + + def __init__(self, cli): + """Initialize the command.""" + super(RepoPkgsCommand, self).__init__(cli) + subcmd_objs = (subcmd(cli) for subcmd in self.SUBCMDS) + self.subcmd = None + self._subcmd_name2obj = { + alias: subcmd for subcmd in subcmd_objs for alias in subcmd.aliases} + + def set_argparser(self, parser): + narrows = parser.add_mutually_exclusive_group() + narrows.add_argument('--all', dest='_pkg_specs_action', + action='store_const', const='all', default=None, + help=_("show all packages (default)")) + narrows.add_argument('--available', dest='_pkg_specs_action', + action='store_const', const='available', + help=_("show only available packages")) + narrows.add_argument('--installed', dest='_pkg_specs_action', + action='store_const', const='installed', + help=_("show only installed packages")) + narrows.add_argument('--extras', dest='_pkg_specs_action', + action='store_const', const='extras', + help=_("show only extras packages")) + narrows.add_argument('--updates', dest='_pkg_specs_action', + action='store_const', const='upgrades', + help=_("show only upgrades packages")) + narrows.add_argument('--upgrades', dest='_pkg_specs_action', + action='store_const', const='upgrades', + help=_("show only upgrades packages")) + narrows.add_argument('--autoremove', dest='_pkg_specs_action', + action='store_const', const='autoremove', + help=_("show only autoremove packages")) + narrows.add_argument('--recent', dest='_pkg_specs_action', + action='store_const', const='recent', + help=_("show only recently changed packages")) + + parser.add_argument( + 'reponame', nargs=1, action=OptionParser._RepoCallbackEnable, + metavar=_('REPOID'), help=_("Repository ID")) + subcommand_choices = [subcmd.aliases[0] for subcmd in self.SUBCMDS] + parser.add_argument('subcmd', nargs=1, metavar="SUBCOMMAND", + choices=subcommand_choices, help=", ".join(subcommand_choices)) + DEFAULT_PKGNARROW = 'all' + pkgnarrows = {DEFAULT_PKGNARROW, 'installed', 'available', + 'autoremove', 'extras', 'obsoletes', 'recent', + 'upgrades'} + parser.add_argument('pkg_specs', nargs='*', metavar=_('PACKAGE'), + choices=pkgnarrows, default=DEFAULT_PKGNARROW, + action=OptionParser.PkgNarrowCallback, + help=_("Package specification")) + + def configure(self): + """Verify whether the command can run with given arguments.""" + # Check sub-command. + try: + self.subcmd = self._subcmd_name2obj[self.opts.subcmd[0]] + except (dnf.cli.CliError, KeyError) as e: + self.cli.optparser.print_usage() + raise dnf.cli.CliError + self.subcmd.opts = self.opts + self.subcmd.reponame = self.opts.reponame[0] + self.subcmd.configure() + + def run(self): + """Execute the command with respect to given arguments *extcmds*.""" + self.subcmd.run_on_repo() + +class HelpCommand(Command): + """A class containing methods needed by the cli to execute the + help command. + """ + + aliases = ('help',) + summary = _('display a helpful usage message') + + @staticmethod + def set_argparser(parser): + parser.add_argument('cmd', nargs='?', metavar=_('COMMAND'), + help=_("{prog} command to get help for").format( + prog=dnf.util.MAIN_PROG_UPPER)) + + def run(self): + if (not self.opts.cmd + or self.opts.cmd not in self.cli.cli_commands): + self.cli.optparser.print_help() + else: + command = self.cli.cli_commands[self.opts.cmd] + self.cli.optparser.print_help(command(self)) + +class HistoryCommand(Command): + """A class containing methods needed by the cli to execute the + history command. + """ + + aliases = ('history', 'hist') + summary = _('display, or use, the transaction history') + + _CMDS = ['list', 'info', 'redo', 'undo', 'rollback', 'userinstalled'] + + transaction_ids = set() + merged_transaction_ids = set() + + @staticmethod + def set_argparser(parser): + parser.add_argument('transactions_action', nargs='?', metavar="COMMAND", + help="Available commands: {} (default), {}".format( + HistoryCommand._CMDS[0], + ", ".join(HistoryCommand._CMDS[1:]))) + parser.add_argument('transactions', nargs='*', metavar="TRANSACTION", + help="Transaction ID (, 'last' or 'last-' " + "for one transaction, .. " + "for range)") + + def configure(self): + if not self.opts.transactions_action: + # no positional argument given + self.opts.transactions_action = self._CMDS[0] + elif self.opts.transactions_action not in self._CMDS: + # first positional argument is not a command + self.opts.transactions.insert(0, self.opts.transactions_action) + self.opts.transactions_action = self._CMDS[0] + + require_one_transaction_id = False + require_one_transaction_id_msg = _("Found more than one transaction ID.\n" + "'{}' requires one transaction ID or package name." + ).format(self.opts.transactions_action) + demands = self.cli.demands + if self.opts.transactions_action in ['redo', 'undo', 'rollback']: + demands.root_user = True + require_one_transaction_id = True + if not self.opts.transactions: + msg = _('No transaction ID or package name given.') + logger.critical(msg) + raise dnf.cli.CliError(msg) + elif len(self.opts.transactions) > 1: + logger.critical(require_one_transaction_id_msg) + raise dnf.cli.CliError(require_one_transaction_id_msg) + demands.available_repos = True + _checkGPGKey(self.base, self.cli) + else: + demands.fresh_metadata = False + demands.sack_activation = True + if not os.access(self.base.history.path, os.R_OK): + msg = _("You don't have access to the history DB.") + logger.critical(msg) + raise dnf.cli.CliError(msg) + self.transaction_ids = self._args2transaction_ids(self.merged_transaction_ids, + require_one_transaction_id, + require_one_transaction_id_msg) + + def get_error_output(self, error): + """Get suggestions for resolving the given error.""" + if isinstance(error, dnf.exceptions.TransactionCheckError): + if self.opts.transactions_action == 'undo': + id_, = self.opts.transactions + return (_('Cannot undo transaction %s, doing so would result ' + 'in an inconsistent package database.') % id_,) + elif self.opts.transactions_action == 'rollback': + id_, = (self.opts.transactions if self.opts.transactions[0] != 'force' + else self.opts.transactions[1:]) + return (_('Cannot rollback transaction %s, doing so would ' + 'result in an inconsistent package database.') % id_,) + + return Command.get_error_output(self, error) + + def _hcmd_redo(self, extcmds): + old = self.base.history_get_transaction(extcmds) + if old is None: + return 1, ['Failed history redo'] + tm = dnf.util.normalize_time(old.beg_timestamp) + print('Repeating transaction %u, from %s' % (old.tid, tm)) + self.output.historyInfoCmdPkgsAltered(old) + + for i in old.packages(): + pkgs = list(self.base.sack.query().filter(nevra=str(i), reponame=i.from_repo)) + if i.action in dnf.transaction.FORWARD_ACTIONS: + if not pkgs: + logger.info(_('No package %s available.'), + self.output.term.bold(ucd(str(i)))) + return 1, ['An operation cannot be redone'] + pkg = pkgs[0] + self.base.install(str(pkg)) + elif i.action == libdnf.transaction.TransactionItemAction_REMOVE: + if not pkgs: + # package was removed already, we can skip removing it again + continue + pkg = pkgs[0] + self.base.remove(str(pkg)) + + self.base.resolve() + self.base.do_transaction() + + def _hcmd_undo(self, extcmds): + try: + return self.base.history_undo_transaction(extcmds[0]) + except dnf.exceptions.Error as err: + return 1, [str(err)] + + def _hcmd_rollback(self, extcmds): + try: + return self.base.history_rollback_transaction(extcmds[0]) + except dnf.exceptions.Error as err: + return 1, [str(err)] + + def _hcmd_userinstalled(self): + """Execute history userinstalled command.""" + pkgs = tuple(self.base.iter_userinstalled()) + return self.output.listPkgs(pkgs, 'Packages installed by user', 'nevra') + + def _args2transaction_ids(self, merged_ids=set(), + require_one_trans_id=False, require_one_trans_id_msg=''): + """Convert commandline arguments to transaction ids""" + + def str2transaction_id(s): + if s == 'last': + s = '0' + elif s.startswith('last-'): + s = s[4:] + transaction_id = int(s) + if transaction_id <= 0: + transaction_id += self.output.history.last().tid + return transaction_id + + transaction_ids = set() + for t in self.opts.transactions: + if '..' in t: + try: + begin_transaction_id, end_transaction_id = t.split('..', 2) + except ValueError: + logger.critical( + _("Invalid transaction ID range definition '{}'.\n" + "Use '..'." + ).format(t)) + raise dnf.cli.CliError + cant_convert_msg = _("Can't convert '{}' to transaction ID.\n" + "Use '', 'last', 'last-'.") + try: + begin_transaction_id = str2transaction_id(begin_transaction_id) + except ValueError: + logger.critical(_(cant_convert_msg).format(begin_transaction_id)) + raise dnf.cli.CliError + try: + end_transaction_id = str2transaction_id(end_transaction_id) + except ValueError: + logger.critical(_(cant_convert_msg).format(end_transaction_id)) + raise dnf.cli.CliError + if require_one_trans_id and begin_transaction_id != end_transaction_id: + logger.critical(require_one_trans_id_msg) + raise dnf.cli.CliError + if begin_transaction_id > end_transaction_id: + begin_transaction_id, end_transaction_id = \ + end_transaction_id, begin_transaction_id + merged_ids.add((begin_transaction_id, end_transaction_id)) + transaction_ids.update(range(begin_transaction_id, end_transaction_id + 1)) + else: + try: + transaction_ids.add(str2transaction_id(t)) + except ValueError: + # not a transaction id, assume it's package name + transact_ids_from_pkgname = self.output.history.search([t]) + if transact_ids_from_pkgname: + transaction_ids.update(transact_ids_from_pkgname) + else: + msg = _("No transaction which manipulates package '{}' was found." + ).format(t) + if require_one_trans_id: + logger.critical(msg) + raise dnf.cli.CliError + else: + logger.info(msg) + + return sorted(transaction_ids, reverse=True) + + def run(self): + vcmd = self.opts.transactions_action + + ret = None + if vcmd == 'list' and (self.transaction_ids or not self.opts.transactions): + ret = self.output.historyListCmd(self.transaction_ids) + elif vcmd == 'info' and (self.transaction_ids or not self.opts.transactions): + ret = self.output.historyInfoCmd(self.transaction_ids, self.opts.transactions, + self.merged_transaction_ids) + elif vcmd == 'undo': + ret = self._hcmd_undo(self.transaction_ids) + elif vcmd == 'redo': + ret = self._hcmd_redo(self.transaction_ids) + elif vcmd == 'rollback': + ret = self._hcmd_rollback(self.transaction_ids) + elif vcmd == 'userinstalled': + ret = self._hcmd_userinstalled() + + if ret is None: + return + (code, strs) = ret + if code == 2: + self.cli.demands.resolving = True + elif code != 0: + raise dnf.exceptions.Error(strs[0]) diff --git a/dnf/cli/commands/alias.py b/dnf/cli/commands/alias.py new file mode 100644 index 0000000..10f5886 --- /dev/null +++ b/dnf/cli/commands/alias.py @@ -0,0 +1,187 @@ +# alias.py +# Alias CLI command. +# +# Copyright (C) 2018 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import os.path + +import dnf.cli +import dnf.cli.aliases +from dnf.cli import commands +import dnf.conf +import dnf.exceptions +from dnf.i18n import _ + +logger = logging.getLogger('dnf') + + +class AliasCommand(commands.Command): + aliases = ('alias',) + summary = _('List or create command aliases') + + @staticmethod + def set_argparser(parser): + enable_group = parser.add_mutually_exclusive_group() + enable_group.add_argument( + '--enable-resolving', default=False, action='store_true', + help=_('enable aliases resolving')) + enable_group.add_argument( + '--disable-resolving', default=False, action='store_true', + help=_('disable aliases resolving')) + parser.add_argument("subcommand", nargs='?', default='list', + choices=['add', 'list', 'delete'], + help=_("action to do with aliases")) + parser.add_argument("alias", nargs="*", metavar="command[=result]", + help=_("alias definition")) + + def configure(self): + demands = self.cli.demands + if self.opts.subcommand in ('add', 'delete'): + demands.root_user = True + self.aliases_base = dnf.cli.aliases.Aliases() + self.aliases_base._load_aliases() + self.resolving_enabled = self.aliases_base.enabled + self._update_config_from_options() + + def _update_config_from_options(self): + enabled = None + if self.opts.enable_resolving: + enabled = True + logger.info(_("Aliases are now enabled")) + if self.opts.disable_resolving: + enabled = False + logger.info(_("Aliases are now disabled")) + + if enabled is not None: + if not os.path.exists(dnf.cli.aliases.ALIASES_CONF_PATH): + open(dnf.cli.aliases.ALIASES_CONF_PATH, 'w').close() + dnf.conf.BaseConfig.write_raw_configfile( + dnf.cli.aliases.ALIASES_CONF_PATH, + 'main', None, {'enabled': enabled}) + if not self.aliases_base._disabled_by_environ(): + self.aliases_base.enabled = enabled + + def _parse_option_alias(self): + new_aliases = {} + for alias in self.opts.alias: + alias = alias.split('=', 1) + cmd = alias[0].strip() + if len(cmd.split()) != 1: + logger.warning(_("Invalid alias key: %s"), cmd) + continue + if cmd.startswith('-'): + logger.warning(_("Invalid alias key: %s"), cmd) + continue + if len(alias) == 1: + logger.warning(_("Alias argument has no value: %s"), cmd) + continue + new_aliases[cmd] = alias[1].split() + return new_aliases + + def _load_user_aliases(self): + if not os.path.exists(dnf.cli.aliases.ALIASES_USER_PATH): + open(dnf.cli.aliases.ALIASES_USER_PATH, 'w').close() + try: + conf = dnf.cli.aliases.AliasesConfig( + dnf.cli.aliases.ALIASES_USER_PATH) + except dnf.exceptions.ConfigError as e: + logger.warning(_('Config error: %s'), e) + return None + return conf + + def _store_user_aliases(self, user_aliases, enabled): + fileobj = open(dnf.cli.aliases.ALIASES_USER_PATH, 'w') + output = "[main]\n" + output += "enabled = {}\n\n".format(enabled) + output += "[aliases]\n" + for key, value in user_aliases.items(): + output += "{} = {}\n".format(key, ' '.join(value)) + fileobj.write(output) + + def add_aliases(self, aliases): + conf = self._load_user_aliases() + user_aliases = conf.aliases + if user_aliases is None: + return + + user_aliases.update(aliases) + + self._store_user_aliases(user_aliases, conf.enabled) + logger.info(_("Aliases added: %s"), ', '.join(aliases.keys())) + + def remove_aliases(self, cmds): + conf = self._load_user_aliases() + user_aliases = conf.aliases + if user_aliases is None: + return + + valid_cmds = [] + for cmd in cmds: + try: + del user_aliases[cmd] + valid_cmds.append(cmd) + except KeyError: + logger.info(_("Alias not found: %s"), cmd) + + self._store_user_aliases(user_aliases, conf.enabled) + logger.info(_("Aliases deleted: %s"), ', '.join(valid_cmds)) + + def list_alias(self, cmd): + args = [cmd] + try: + args = self.aliases_base._resolve(args) + except dnf.exceptions.Error as e: + logger.error(_('%s, alias %s'), e, cmd) + else: + print(_("Alias %s='%s'") % (cmd, " ".join(args))) + + def run(self): + if not self.aliases_base.enabled: + logger.warning(_("Aliases resolving is disabled.")) + + if self.opts.subcommand == 'add': # Add new alias + aliases = self._parse_option_alias() + if not aliases: + raise dnf.exceptions.Error(_("No aliases specified.")) + self.add_aliases(aliases) + return + + if self.opts.subcommand == 'delete': # Remove alias + cmds = self.opts.alias + if cmds == []: + raise dnf.exceptions.Error(_("No alias specified.")) + self.remove_aliases(cmds) + return + + if not self.opts.alias: # List all aliases + if not self.aliases_base.aliases: + logger.info(_("No aliases defined.")) + return + for cmd in self.aliases_base.aliases: + self.list_alias(cmd) + else: # List alias by key + for cmd in self.opts.alias: + if cmd not in self.aliases_base.aliases: + logger.info(_("No match for alias: %s") % cmd) + continue + self.list_alias(cmd) diff --git a/dnf/cli/commands/autoremove.py b/dnf/cli/commands/autoremove.py new file mode 100644 index 0000000..23603f5 --- /dev/null +++ b/dnf/cli/commands/autoremove.py @@ -0,0 +1,76 @@ +# autoremove.py +# Autoremove CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser +from dnf.i18n import _ + +import dnf.exceptions +import hawkey +import logging + +logger = logging.getLogger("dnf") + + +class AutoremoveCommand(commands.Command): + + nevra_forms = {'autoremove-n': hawkey.FORM_NAME, + 'autoremove-na': hawkey.FORM_NA, + 'autoremove-nevra': hawkey.FORM_NEVRA} + + aliases = ('autoremove',) + tuple(nevra_forms.keys()) + summary = _('remove all unneeded packages that were originally installed ' + 'as dependencies') + + @staticmethod + def set_argparser(parser): + parser.add_argument('packages', nargs='*', help=_('Package to remove'), + action=OptionParser.ParseSpecGroupFileCallback, + metavar=_('PACKAGE')) + + def configure(self): + demands = self.cli.demands + demands.resolving = True + demands.root_user = True + demands.sack_activation = True + + if any([self.opts.grp_specs, self.opts.pkg_specs, self.opts.filenames]): + self.base.conf.clean_requirements_on_remove = True + demands.allow_erasing = True + # disable all available repos to delete whole dependency tree + # instead of replacing removable package with available packages + demands.available_repos = False + else: + demands.available_repos = True + demands.fresh_metadata = False + + def run(self): + if any([self.opts.grp_specs, self.opts.pkg_specs, self.opts.filenames]): + forms = [self.nevra_forms[command] for command in self.opts.command + if command in list(self.nevra_forms.keys())] + + self.base.autoremove(forms, + self.opts.pkg_specs, + self.opts.grp_specs, + self.opts.filenames) + else: + self.base.autoremove() diff --git a/dnf/cli/commands/check.py b/dnf/cli/commands/check.py new file mode 100644 index 0000000..f49a339 --- /dev/null +++ b/dnf/cli/commands/check.py @@ -0,0 +1,147 @@ +# +# Copyright (C) 2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ +from dnf.cli import commands + +import argparse +import dnf.exceptions + + +class CheckCommand(commands.Command): + """A class containing methods needed by the cli to execute the check + command. + """ + + aliases = ('check',) + summary = _('check for problems in the packagedb') + + @staticmethod + def set_argparser(parser): + parser.add_argument('--all', dest='check_types', + action='append_const', const='all', + help=_('show all problems; default')) + parser.add_argument('--dependencies', dest='check_types', + action='append_const', const='dependencies', + help=_('show dependency problems')) + parser.add_argument('--duplicates', dest='check_types', + action='append_const', const='duplicates', + help=_('show duplicate problems')) + parser.add_argument('--obsoleted', dest='check_types', + action='append_const', const='obsoleted', + help=_('show obsoleted packages')) + parser.add_argument('--provides', dest='check_types', + action='append_const', const='provides', + help=_('show problems with provides')) + # Add compatibility with yum but invisible in help + # In choices [] allows to return empty list if no argument otherwise it fails + parser.add_argument('check_yum_types', nargs='*', choices=[ + 'all', 'dependencies', 'duplicates', 'obsoleted', 'provides', []], + help=argparse.SUPPRESS) + + def configure(self): + self.cli.demands.sack_activation = True + if self.opts.check_yum_types: + if self.opts.check_types: + self.opts.check_types = self.opts.check_types + \ + self.opts.check_yum_types + else: + self.opts.check_types = self.opts.check_yum_types + if not self.opts.check_types: + self.opts.check_types = {'all'} + else: + self.opts.check_types = set(self.opts.check_types) + self.base.conf.disable_excludes += ["all"] + + def run(self): + output_set = set() + q = self.base.sack.query().installed() + + if self.opts.check_types.intersection({'all', 'dependencies'}): + sack = None + for pkg in q: + for require in pkg.requires: + if str(require).startswith('rpmlib'): + continue + if not len(q.filter(provides=[require])): + if str(require).startswith('('): + # rich deps can be only tested by solver + if sack is None: + sack = dnf.sack.rpmdb_sack(self.base) + selector = dnf.selector.Selector(sack) + selector.set(provides=str(require)) + goal = dnf.goal.Goal(sack) + goal.install(select=selector, optional=False) + solved = goal.run() + # there ase only @system repo in sack, therefore solved is only in case + # when rich deps doesn't require any additional package + if solved: + continue + msg = _("{} has missing requires of {}") + output_set.add(msg.format( + self.base.output.term.bold(pkg), + self.base.output.term.bold(require))) + for conflict in pkg.conflicts: + conflicted = q.filter(provides=[conflict], + name=str(conflict).split()[0]) + for conflict_pkg in conflicted: + msg = '{} has installed conflict "{}": {}' + output_set.add(msg.format( + self.base.output.term.bold(pkg), + self.base.output.term.bold(conflict), + self.base.output.term.bold(conflict_pkg))) + + if self.opts.check_types.intersection({'all', 'duplicates'}): + installonly = self.base._get_installonly_query(q) + dups = q.duplicated().difference(installonly)._name_dict() + for name, pkgs in dups.items(): + pkgs.sort() + for dup in pkgs[1:]: + msg = _("{} is a duplicate with {}").format( + self.base.output.term.bold(pkgs[0]), + self.base.output.term.bold(dup)) + output_set.add(msg) + + if self.opts.check_types.intersection({'all', 'obsoleted'}): + for pkg in q: + for obsolete in pkg.obsoletes: + obsoleted = q.filter(provides=[obsolete], + name=str(obsolete).split()[0]) + if len(obsoleted): + msg = _("{} is obsoleted by {}").format( + self.base.output.term.bold(obsoleted[0]), + self.base.output.term.bold(pkg)) + output_set.add(msg) + + if self.opts.check_types.intersection({'all', 'provides'}): + for pkg in q: + for provide in pkg.provides: + if pkg not in q.filter(provides=[provide]): + msg = _("{} provides {} but it cannot be found") + output_set.add(msg.format( + self.base.output.term.bold(pkg), + self.base.output.term.bold(provide))) + + for msg in sorted(output_set): + print(msg) + + if output_set: + raise dnf.exceptions.Error( + 'Check discovered {} problem(s)'.format(len(output_set))) diff --git a/dnf/cli/commands/clean.py b/dnf/cli/commands/clean.py new file mode 100644 index 0000000..6ad4850 --- /dev/null +++ b/dnf/cli/commands/clean.py @@ -0,0 +1,123 @@ +# clean.py +# Clean CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli import commands +from dnf.i18n import _, P_ +from dnf.yum import misc + +import dnf.cli +import dnf.exceptions +import dnf.lock +import dnf.logging +import dnf.repo +import logging +import os +import re +import time + +logger = logging.getLogger("dnf") + +# Dict mapping cmdline arguments to actual data types to be cleaned up +_CACHE_TYPES = { + 'metadata': ['metadata', 'dbcache', 'expire-cache'], + 'packages': ['packages'], + 'dbcache': ['dbcache'], + 'expire-cache': ['expire-cache'], + 'all': ['metadata', 'packages', 'dbcache'], +} + + +def _tree(dirpath): + """Traverse dirpath recursively and yield relative filenames.""" + for root, dirs, files in os.walk(dirpath): + base = os.path.relpath(root, dirpath) + for f in files: + path = os.path.join(base, f) + yield os.path.normpath(path) + + +def _filter(files, patterns): + """Yield those filenames that match any of the patterns.""" + return (f for f in files for p in patterns if re.match(p, f)) + + +def _clean(dirpath, files): + """Remove the given filenames from dirpath.""" + count = 0 + for f in files: + path = os.path.join(dirpath, f) + logger.log(dnf.logging.DDEBUG, _('Removing file %s'), path) + misc.unlink_f(path) + count += 1 + return count + + +def _cached_repos(files): + """Return the repo IDs that have some cached metadata around.""" + metapat = dnf.repo.CACHE_FILES['metadata'] + matches = (re.match(metapat, f) for f in files) + return set(m.group('repoid') for m in matches if m) + + +class CleanCommand(commands.Command): + """A class containing methods needed by the cli to execute the + clean command. + """ + + aliases = ('clean',) + summary = _('remove cached data') + + @staticmethod + def set_argparser(parser): + parser.add_argument('type', nargs='+', + choices=_CACHE_TYPES.keys(), + help=_('Metadata type to clean')) + + def run(self): + cachedir = self.base.conf.cachedir + md_lock = dnf.lock.build_metadata_lock(cachedir, True) + download_lock = dnf.lock.build_download_lock(cachedir, True) + rpmdb_lock = dnf.lock.build_rpmdb_lock(self.base.conf.persistdir, True) + while True: + try: + with md_lock and download_lock and rpmdb_lock: + types = set(t for c in self.opts.type for t in _CACHE_TYPES[c]) + files = list(_tree(cachedir)) + logger.debug(_('Cleaning data: ' + ' '.join(types))) + + if 'expire-cache' in types: + expired = _cached_repos(files) + self.base._repo_persistor.expired_to_add.update(expired) + types.remove('expire-cache') + logger.info(_('Cache was expired')) + + patterns = [dnf.repo.CACHE_FILES[t] for t in types] + count = _clean(cachedir, _filter(files, patterns)) + logger.info(P_('%d file removed', '%d files removed', count) % count) + return + except dnf.exceptions.LockError as e: + if not self.base.conf.exit_on_lock: + msg = _('Waiting for process with pid %d to finish.') % (e.pid) + logger.info(msg) + time.sleep(3) + else: + raise e diff --git a/dnf/cli/commands/deplist.py b/dnf/cli/commands/deplist.py new file mode 100644 index 0000000..a18c3bd --- /dev/null +++ b/dnf/cli/commands/deplist.py @@ -0,0 +1,36 @@ +# +# Copyright (C) 2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ +from dnf.cli.commands.repoquery import RepoQueryCommand + + +class DeplistCommand(RepoQueryCommand): + """ + The command is alias for 'dnf repoquery --deplist' + """ + + aliases = ('deplist',) + summary = _("List package's dependencies and what packages provide them") + + def configure(self): + RepoQueryCommand.configure(self) + self.opts.deplist = True diff --git a/dnf/cli/commands/distrosync.py b/dnf/cli/commands/distrosync.py new file mode 100644 index 0000000..3d472e5 --- /dev/null +++ b/dnf/cli/commands/distrosync.py @@ -0,0 +1,48 @@ +# distrosync.py +# distro-sync CLI command. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from dnf.cli import commands +from dnf.i18n import _ + + +class DistroSyncCommand(commands.Command): + """A class containing methods needed by the cli to execute the + distro-synch command. + """ + + aliases = ('distro-sync', 'distrosync', 'distribution-synchronization', 'dsync') + summary = _('synchronize installed packages to the latest available versions') + + @staticmethod + def set_argparser(parser): + parser.add_argument('package', nargs='*', help=_('Package to synchronize')) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + commands._checkGPGKey(self.base, self.cli) + commands._checkEnabledRepo(self.base, self.opts.package) + + def run(self): + return self.base.distro_sync_userlist(self.opts.package) diff --git a/dnf/cli/commands/downgrade.py b/dnf/cli/commands/downgrade.py new file mode 100644 index 0000000..9e27962 --- /dev/null +++ b/dnf/cli/commands/downgrade.py @@ -0,0 +1,58 @@ +# downgrade.py +# Downgrade CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser +from dnf.i18n import _ + + +class DowngradeCommand(commands.Command): + """A class containing methods needed by the cli to execute the + downgrade command. + """ + + aliases = ('downgrade', 'dg') + summary = _("Downgrade a package") + + @staticmethod + def set_argparser(parser): + parser.add_argument('package', nargs='*', help=_('Package to downgrade'), + action=OptionParser.ParseSpecGroupFileCallback) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + + commands._checkGPGKey(self.base, self.cli) + if not self.opts.filenames: + commands._checkEnabledRepo(self.base) + + def run(self): + file_pkgs = self.base.add_remote_rpms(self.opts.filenames, strict=False, + progress=self.base.output.progress) + return self.base.downgradePkgs( + specs=self.opts.pkg_specs + ['@' + x for x in self.opts.grp_specs], + file_pkgs=file_pkgs, + strict=self.base.conf.strict) diff --git a/dnf/cli/commands/group.py b/dnf/cli/commands/group.py new file mode 100644 index 0000000..f535a50 --- /dev/null +++ b/dnf/cli/commands/group.py @@ -0,0 +1,419 @@ +# group.py +# Group CLI command. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.comps import CompsQuery +from dnf.cli import commands +from dnf.i18n import _, ucd + +import dnf.cli +import dnf.exceptions +import dnf.util +import logging + +logger = logging.getLogger("dnf") + +class GroupCommand(commands.Command): + """ Single sub-command interface for most groups interaction. """ + + direct_commands = {'grouplist' : 'list', + 'groupinstall' : 'install', + 'groupupdate' : 'install', + 'groupremove' : 'remove', + 'grouperase' : 'remove', + 'groupinfo' : 'info'} + aliases = ('group', 'groups', 'grp') + tuple(direct_commands.keys()) + summary = _('display, or use, the groups information') + + _CMD_ALIASES = {'update' : 'upgrade', + 'erase' : 'remove'} + _MARK_CMDS = ('install', 'remove') + _GROUP_SUBCOMMANDS = ('summary', 'list', 'info', 'remove', 'install', 'upgrade', 'mark') + + + def _canonical(self): + # were we called with direct command? + direct = self.direct_commands.get(self.opts.command) + if direct: + # canonize subcmd and args + if self.opts.subcmd is not None: + self.opts.args.insert(0, self.opts.subcmd) + self.opts.subcmd = direct + if self.opts.subcmd is None: + self.opts.subcmd = 'summary' + self.opts.subcmd = self._CMD_ALIASES.get(self.opts.subcmd, + self.opts.subcmd) + + def __init__(self, cli): + super(GroupCommand, self).__init__(cli) + self._remark = False + + def _assert_comps(self): + msg = _('No group data available for configured repositories.') + if not len(self.base.comps): + raise dnf.exceptions.CompsError(msg) + + def _environment_lists(self, patterns): + def available_pred(env): + env_found = self.base.history.env.get(env.id) + return not(env_found) + + self._assert_comps() + if patterns is None: + envs = self.base.comps.environments + else: + envs = self.base.comps.environments_by_pattern(",".join(patterns)) + + return dnf.util.mapall(list, dnf.util.partition(available_pred, envs)) + + def _group_lists(self, uservisible, patterns): + def installed_pred(group): + group_found = self.base.history.group.get(group.id) + if group_found: + return True + return False + installed = [] + available = [] + + self._assert_comps() + + if patterns is None: + grps = self.base.comps.groups + else: + grps = self.base.comps.groups_by_pattern(",".join(patterns)) + for grp in grps: + tgt_list = available + if installed_pred(grp): + tgt_list = installed + if not uservisible or grp.uservisible: + tgt_list.append(grp) + + return installed, available + + def _grp_setup(self): + self.base.read_comps(arch_filter=True) + + def _info(self, userlist): + for strng in userlist: + group_matched = False + + for env in self.base.comps.environments_by_pattern(strng): + self.output.display_groups_in_environment(env) + group_matched = True + + for group in self.base.comps.groups_by_pattern(strng): + self.output.display_pkgs_in_groups(group) + group_matched = True + + if not group_matched: + logger.error(_('Warning: Group %s does not exist.'), strng) + + return 0, [] + + def _list(self, userlist): + uservisible = 1 + showinstalled = 0 + showavailable = 0 + print_ids = self.base.conf.verbose or self.opts.ids + + while userlist: + if userlist[0] == 'hidden': + uservisible = 0 + userlist.pop(0) + elif userlist[0] == 'installed': + showinstalled = 1 + userlist.pop(0) + elif userlist[0] == 'available': + showavailable = 1 + userlist.pop(0) + elif userlist[0] == 'ids': + print_ids = True + userlist.pop(0) + else: + break + if self.opts.hidden: + uservisible = 0 + if self.opts.installed: + showinstalled = 1 + if self.opts.available: + showavailable = 1 + if not userlist: + userlist = None # Match everything... + + errs = False + if userlist is not None: + for group in userlist: + comps = self.base.comps + in_group = len(comps.groups_by_pattern(group)) > 0 + in_environment = len(comps.environments_by_pattern(group)) > 0 + if not in_group and not in_environment: + logger.error(_('Warning: No groups match:') + '\n %s', + group) + errs = True + if errs: + return 0, [] + + env_inst, env_avail = self._environment_lists(userlist) + installed, available = self._group_lists(uservisible, userlist) + + def _out_grp(sect, group): + if not done: + print(sect) + msg = ' %s' % group.ui_name + if print_ids: + msg += ' (%s)' % group.id + if group.lang_only: + msg += ' [%s]' % group.lang_only + print('{}'.format(msg)) + + def _out_env(sect, envs): + if envs: + print(sect) + for e in envs: + msg = ' %s' % e.ui_name + if print_ids: + msg += ' (%s)' % e.id + print(msg) + + if not showinstalled: + _out_env(_('Available Environment Groups:'), env_avail) + if not showavailable: + _out_env(_('Installed Environment Groups:'), env_inst) + + if not showavailable: + done = False + for group in installed: + if group.lang_only: + continue + _out_grp(_('Installed Groups:'), group) + done = True + + done = False + for group in installed: + if not group.lang_only: + continue + _out_grp(_('Installed Language Groups:'), group) + done = True + + if showinstalled: + return 0, [] + + done = False + for group in available: + if group.lang_only: + continue + _out_grp(_('Available Groups:'), group) + done = True + + done = False + for group in available: + if not group.lang_only: + continue + _out_grp(_('Available Language Groups:'), group) + done = True + + return 0, [] + + def _mark_install(self, patterns): + q = CompsQuery(self.base.comps, self.base.history, + CompsQuery.GROUPS | CompsQuery.ENVIRONMENTS, + CompsQuery.AVAILABLE | CompsQuery.INSTALLED) + solver = self.base._build_comps_solver() + res = q.get(*patterns) + + if self.opts.with_optional: + types = tuple(self.base.conf.group_package_types + ['optional']) + else: + types = tuple(self.base.conf.group_package_types) + pkg_types = self.base._translate_comps_pkg_types(types) + for env_id in res.environments: + dnf.comps.install_or_skip(solver._environment_install, env_id, pkg_types) + for group_id in res.groups: + dnf.comps.install_or_skip(solver._group_install, group_id, pkg_types) + + def _mark_remove(self, patterns): + q = CompsQuery(self.base.comps, self.base.history, + CompsQuery.GROUPS | CompsQuery.ENVIRONMENTS, + CompsQuery.INSTALLED) + solver = self.base._build_comps_solver() + res = q.get(*patterns) + for env_id in res.environments: + assert dnf.util.is_string_type(env_id) + solver._environment_remove(env_id) + for grp_id in res.groups: + assert dnf.util.is_string_type(grp_id) + solver._group_remove(grp_id) + + def _mark_subcmd(self, extcmds): + if extcmds[0] in self._MARK_CMDS: + return extcmds[0], extcmds[1:] + return 'install', extcmds + + def _summary(self, userlist): + uservisible = 1 + if len(userlist) > 0: + if userlist[0] == 'hidden': + uservisible = 0 + userlist.pop(0) + if self.opts.hidden: + uservisible = 0 + if not userlist: + userlist = None # Match everything... + + installed, available = self._group_lists(uservisible, userlist) + + def _out_grp(sect, num): + if not num: + return + logger.info('%s %u', sect, num) + done = 0 + for group in installed: + if group.lang_only: + continue + done += 1 + _out_grp(_('Installed Groups:'), done) + + done = 0 + for group in installed: + if not group.lang_only: + continue + done += 1 + _out_grp(_('Installed Language Groups:'), done) + + done = False + for group in available: + if group.lang_only: + continue + done += 1 + _out_grp(_('Available Groups:'), done) + + done = False + for group in available: + if not group.lang_only: + continue + done += 1 + _out_grp(_('Available Language Groups:'), done) + + return 0, [] + + @staticmethod + def set_argparser(parser): + parser.add_argument('--with-optional', action='store_true', + help=_("include optional packages from group")) + grpparser = parser.add_mutually_exclusive_group() + grpparser.add_argument('--hidden', action='store_true', + help=_("show also hidden groups")) + grpparser.add_argument('--installed', action='store_true', + help=_("show only installed groups")) + grpparser.add_argument('--available', action='store_true', + help=_("show only available groups")) + grpparser.add_argument('--ids', action='store_true', + help=_("show also ID of groups")) + parser.add_argument('subcmd', nargs='?', metavar='COMMAND', + help=_('available subcommands: {} (default), {}').format( + GroupCommand._GROUP_SUBCOMMANDS[0], + ', '.join(GroupCommand._GROUP_SUBCOMMANDS[1:]))) + parser.add_argument('args', nargs='*', metavar='COMMAND_ARG', + help=_('argument for group subcommand')) + + def configure(self): + self._canonical() + + cmd = self.opts.subcmd + args = self.opts.args + + if cmd not in self._GROUP_SUBCOMMANDS: + logger.critical(_('Invalid groups sub-command, use: %s.'), + ", ".join(self._GROUP_SUBCOMMANDS)) + raise dnf.cli.CliError + if cmd in ('install', 'remove', 'mark', 'info') and not args: + self.cli.optparser.print_help(self) + raise dnf.cli.CliError + + demands = self.cli.demands + demands.sack_activation = True + if cmd in ('install', 'mark', 'remove', 'upgrade'): + demands.root_user = True + demands.resolving = True + if cmd == 'remove': + demands.allow_erasing = True + demands.available_repos = False + else: + demands.available_repos = True + + commands._checkEnabledRepo(self.base) + + if cmd in ('install', 'upgrade'): + commands._checkGPGKey(self.base, self.cli) + + def run(self): + cmd = self.opts.subcmd + extcmds = self.opts.args + + self._grp_setup() + + if cmd == 'summary': + return self._summary(extcmds) + if cmd == 'list': + return self._list(extcmds) + if cmd == 'info': + return self._info(extcmds) + if cmd == 'mark': + (subcmd, extcmds) = self._mark_subcmd(extcmds) + if subcmd == 'remove': + return self._mark_remove(extcmds) + else: + assert subcmd == 'install' + return self._mark_install(extcmds) + + if cmd == 'install': + if self.opts.with_optional: + types = tuple(self.base.conf.group_package_types + ['optional']) + else: + types = tuple(self.base.conf.group_package_types) + + self._remark = True + try: + return self.base.env_group_install(extcmds, types, + self.base.conf.strict) + except dnf.exceptions.MarkingError as e: + msg = _('No package %s available.') + logger.info(msg, self.base.output.term.bold(e)) + raise dnf.exceptions.PackagesNotAvailableError( + _("Unable to find a mandatory group package.")) + if cmd == 'upgrade': + return self.base.env_group_upgrade(extcmds) + if cmd == 'remove': + for arg in extcmds: + try: + self.base.env_group_remove([arg]) + except dnf.exceptions.Error: + pass + + def run_transaction(self): + if not self._remark: + return + goal = self.base._goal + history = self.base.history + names = goal.group_members + for pkg in self.base.sack.query().installed().filterm(name=names): + reason = history.rpm.get_reason(pkg) + history.set_reason(pkg, goal.group_reason(pkg, reason)) diff --git a/dnf/cli/commands/install.py b/dnf/cli/commands/install.py new file mode 100644 index 0000000..56efef2 --- /dev/null +++ b/dnf/cli/commands/install.py @@ -0,0 +1,185 @@ +# install.py +# Install CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +import logging +from itertools import chain + +import hawkey + +import dnf.exceptions +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser +from dnf.i18n import _ + +logger = logging.getLogger('dnf') + + +class InstallCommand(commands.Command): + """A class containing methods needed by the cli to execute the + install command. + """ + nevra_forms = {'install-n': hawkey.FORM_NAME, + 'install-na': hawkey.FORM_NA, + 'install-nevra': hawkey.FORM_NEVRA} + alternatives_provide = 'alternative-for({})' + + aliases = ('install', 'localinstall', 'in') + tuple(nevra_forms.keys()) + summary = _('install a package or packages on your system') + + @staticmethod + def set_argparser(parser): + parser.add_argument('package', nargs='+', metavar=_('PACKAGE'), + action=OptionParser.ParseSpecGroupFileCallback, + help=_('Package to install')) + + def configure(self): + """Verify that conditions are met so that this command can run. + That there are enabled repositories with gpg keys, and that + this command is called with appropriate arguments. + """ + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + commands._checkGPGKey(self.base, self.cli) + if not self.opts.filenames: + commands._checkEnabledRepo(self.base) + + def run(self): + err_pkgs = [] + errs = [] + error_module_specs = [] + + nevra_forms = self._get_nevra_forms_from_command() + + self.cli._populate_update_security_filter(self.opts, self.base.sack.query()) + if self.opts.command == ['localinstall'] and (self.opts.grp_specs or self.opts.pkg_specs): + self._log_not_valid_rpm_file_paths(self.opts.grp_specs) + if self.base.conf.strict: + raise dnf.exceptions.Error(_('Nothing to do.')) + skipped_grp_specs = [] + if self.opts.grp_specs and self.opts.command != ['localinstall']: + if dnf.base.WITH_MODULES: + try: + module_base = dnf.module.module_base.ModuleBase(self.base) + module_base.install(self.opts.grp_specs, strict=self.base.conf.strict) + except dnf.exceptions.MarkingErrors as e: + if e.no_match_group_specs: + for e_spec in e.no_match_group_specs: + skipped_grp_specs.append(e_spec) + if e.error_group_specs: + for e_spec in e.error_group_specs: + error_module_specs.append("@" + e_spec) + module_depsolv_errors = e.module_depsolv_errors + if module_depsolv_errors: + logger.error(dnf.module.module_base.format_modular_solver_errors( + module_depsolv_errors[0])) + else: + skipped_grp_specs = self.opts.grp_specs + if self.opts.filenames and nevra_forms: + self._inform_not_a_valid_combination(self.opts.filenames) + if self.base.conf.strict: + raise dnf.exceptions.Error(_('Nothing to do.')) + else: + err_pkgs = self._install_files() + + if skipped_grp_specs and nevra_forms: + self._inform_not_a_valid_combination(skipped_grp_specs) + if self.base.conf.strict: + raise dnf.exceptions.Error(_('Nothing to do.')) + elif skipped_grp_specs and self.opts.command != ['localinstall']: + self._install_groups(skipped_grp_specs) + + if self.opts.command != ['localinstall']: + errs = self._install_packages(nevra_forms) + + if (len(errs) != 0 or len(err_pkgs) != 0 or error_module_specs) and self.base.conf.strict: + raise dnf.exceptions.PackagesNotAvailableError(_("Unable to find a match"), + pkg_spec=' '.join(errs), + packages=err_pkgs) + + def _get_nevra_forms_from_command(self): + return [self.nevra_forms[command] + for command in self.opts.command + if command in list(self.nevra_forms.keys()) + ] + + def _log_not_valid_rpm_file_paths(self, grp_specs): + group_names = map(lambda g: '@' + g, grp_specs) + for pkg in chain(self.opts.pkg_specs, group_names): + msg = _('Not a valid rpm file path: %s') + logger.info(msg, self.base.output.term.bold(pkg)) + + def _inform_not_a_valid_combination(self, forms): + for form in forms: + msg = _('Not a valid form: %s') + logger.warning(msg, self.base.output.term.bold(form)) + + def _install_files(self): + err_pkgs = [] + strict = self.base.conf.strict + for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=strict, + progress=self.base.output.progress): + try: + self.base.package_install(pkg, strict=strict) + except dnf.exceptions.MarkingError: + msg = _('No match for argument: %s') + logger.info(msg, self.base.output.term.bold(pkg.location)) + err_pkgs.append(pkg) + + return err_pkgs + + def _install_groups(self, grp_specs): + self.base.read_comps(arch_filter=True) + try: + self.base.env_group_install(grp_specs, + tuple(self.base.conf.group_package_types), + strict=self.base.conf.strict) + except dnf.exceptions.Error: + if self.base.conf.strict: + raise + + def _report_alternatives(self, pkg_spec): + query = self.base.sack.query().filterm( + provides=self.alternatives_provide.format(pkg_spec)) + if query: + msg = _('There are following alternatives for "{0}": {1}') + logger.info(msg.format( + pkg_spec, + ', '.join(sorted(set([alt.name for alt in query]))))) + + def _install_packages(self, nevra_forms): + errs = [] + strict = self.base.conf.strict + for pkg_spec in self.opts.pkg_specs: + try: + self.base.install(pkg_spec, strict=strict, forms=nevra_forms) + except dnf.exceptions.MarkingError as e: + msg = '{}: {}'.format(e.value, self.base.output.term.bold(pkg_spec)) + logger.info(msg) + self.base._report_icase_hint(pkg_spec) + self._report_alternatives(pkg_spec) + errs.append(pkg_spec) + + return errs diff --git a/dnf/cli/commands/makecache.py b/dnf/cli/commands/makecache.py new file mode 100644 index 0000000..c367b1e --- /dev/null +++ b/dnf/cli/commands/makecache.py @@ -0,0 +1,50 @@ +# makecache.py +# Makecache CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli import commands +from dnf.i18n import _ + +import argparse +import dnf.cli +import dnf.exceptions +import dnf.util +import logging + +logger = logging.getLogger("dnf") + + +class MakeCacheCommand(commands.Command): + aliases = ('makecache', 'mc') + summary = _('generate the metadata cache') + + @staticmethod + def set_argparser(parser): + parser.add_argument('--timer', action='store_true', dest="timer_opt") + # compatibility with dnf < 2.0 + parser.add_argument('timer', nargs='?', choices=['timer'], + metavar='timer', help=argparse.SUPPRESS) + + def run(self): + timer = self.opts.timer is not None or self.opts.timer_opt + msg = _("Making cache files for all metadata files.") + logger.debug(msg) + return self.base.update_cache(timer) diff --git a/dnf/cli/commands/mark.py b/dnf/cli/commands/mark.py new file mode 100644 index 0000000..ec16b73 --- /dev/null +++ b/dnf/cli/commands/mark.py @@ -0,0 +1,97 @@ +# mark.py +# Mark CLI command. +# +# Copyright (C) 2015-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import print_function +from __future__ import unicode_literals + +import libdnf.transaction + +from dnf.i18n import _ +from dnf.cli import commands + +import dnf +import functools +import logging + +logger = logging.getLogger("dnf") + + +class MarkCommand(commands.Command): + + aliases = ('mark',) + summary = _('mark or unmark installed packages as installed by user.') + + @staticmethod + def set_argparser(parser): + parser.add_argument('mark', nargs=1, choices=['install', 'remove', 'group'], + help=_("install: mark as installed by user\n" + "remove: unmark as installed by user\n" + "group: mark as installed by group")) + parser.add_argument('package', nargs='+', metavar="PACKAGE", + help=_("Package specification")) + + def _mark_install(self, pkg): + self.base.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_USER) + logger.info(_('%s marked as user installed.'), str(pkg)) + + def _mark_remove(self, pkg): + self.base.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_DEPENDENCY) + logger.info(_('%s unmarked as user installed.'), str(pkg)) + + def _mark_group(self, pkg): + self.base.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_GROUP) + logger.info(_('%s marked as group installed.'), str(pkg)) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.root_user = True + demands.available_repos = False + demands.resolving = False + + def run(self): + cmd = self.opts.mark[0] + pkgs = self.opts.package + + mark_func = functools.partial(getattr(self, '_mark_' + cmd)) + + notfound = [] + for pkg in pkgs: + subj = dnf.subject.Subject(pkg) + q = subj.get_best_query(self.base.sack) + for pkg in q: + mark_func(pkg) + if len(q) == 0: + notfound.append(pkg) + + if notfound: + logger.error(_('Error:')) + for pkg in notfound: + logger.error(_('Package %s is not installed.'), pkg) + raise dnf.cli.CliError + + old = self.base.history.last() + if old is None: + rpmdb_version = self.sack._rpmdb_version() + else: + rpmdb_version = old.end_rpmdb_version + + self.base.history.beg(rpmdb_version, [], []) + self.base.history.end(rpmdb_version) diff --git a/dnf/cli/commands/module.py b/dnf/cli/commands/module.py new file mode 100644 index 0000000..07883af --- /dev/null +++ b/dnf/cli/commands/module.py @@ -0,0 +1,374 @@ +# supplies the 'module' command. +# +# Copyright (C) 2014-2017 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import print_function + +from dnf.cli import commands, CliError +from dnf.i18n import _ +from dnf.module.exceptions import NoModuleException +from dnf.util import logger +import dnf.util + +import sys +import os + +import hawkey +import libdnf +import dnf.module.module_base +import dnf.exceptions + + +class ModuleCommand(commands.Command): + class SubCommand(commands.Command): + + def __init__(self, cli): + super(ModuleCommand.SubCommand, self).__init__(cli) + self.module_base = dnf.module.module_base.ModuleBase(self.base) + + def _get_modules_from_name_stream_specs(self): + modules_from_specs = set() + for module_spec in self.opts.module_spec: + __, nsvcap = self.module_base._get_modules(module_spec) + name = nsvcap.name if nsvcap.name else "" + stream = nsvcap.stream if nsvcap.stream else "" + if (nsvcap.version and nsvcap.version != -1) or nsvcap.context: + logger.info(_("Only module name, stream, architecture or profile is used. " + "Ignoring unneeded information in argument: '{}'").format( + module_spec)) + arch = nsvcap.arch if nsvcap.arch else "" + modules = self.base._moduleContainer.query(name, stream, "", "", arch) + modules_from_specs.update(modules) + return modules_from_specs + + def _get_module_artifact_names(self, use_modules, skip_modules): + artifacts = set() + pkg_names = set() + for module in use_modules: + if module not in skip_modules: + if self.base._moduleContainer.isModuleActive(module): + artifacts.update(module.getArtifacts()) + for artifact in artifacts: + subj = hawkey.Subject(artifact) + for nevra_obj in subj.get_nevra_possibilities( + forms=[hawkey.FORM_NEVRA]): + if nevra_obj.name: + pkg_names.add(nevra_obj.name) + return pkg_names, artifacts + + class ListSubCommand(SubCommand): + + aliases = ('list',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + + def run_on_module(self): + mods = self.module_base + + if self.opts.enabled: + output = mods._get_brief_description( + self.opts.module_spec, libdnf.module.ModulePackageContainer.ModuleState_ENABLED) + elif self.opts.disabled: + output = mods._get_brief_description( + self.opts.module_spec, + libdnf.module.ModulePackageContainer.ModuleState_DISABLED) + elif self.opts.installed: + output = mods._get_brief_description( + self.opts.module_spec, + libdnf.module.ModulePackageContainer.ModuleState_INSTALLED) + else: + output = mods._get_brief_description( + self.opts.module_spec, libdnf.module.ModulePackageContainer.ModuleState_UNKNOWN) + if output: + print(output) + return + if self.opts.module_spec: + msg = _('No matching Modules to list') + raise dnf.exceptions.Error(msg) + + class InfoSubCommand(SubCommand): + + aliases = ('info',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + + def run_on_module(self): + if self.opts.verbose: + output = self.module_base._get_full_info(self.opts.module_spec) + elif self.opts.profile: + output = self.module_base._get_info_profiles(self.opts.module_spec) + else: + output = self.module_base._get_info(self.opts.module_spec) + if output: + print(output) + else: + raise dnf.exceptions.Error(_('No matching Modules to list')) + + class EnableSubCommand(SubCommand): + + aliases = ('enable',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + demands.resolving = True + demands.root_user = True + + def run_on_module(self): + try: + self.module_base.enable(self.opts.module_spec) + except dnf.exceptions.MarkingErrors as e: + if self.base.conf.strict: + if e.no_match_group_specs or e.error_group_specs: + raise e + if e.module_depsolv_errors and e.module_depsolv_errors[1] != \ + libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS: + raise e + logger.error(str(e)) + + class DisableSubCommand(SubCommand): + + aliases = ('disable',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + demands.resolving = True + demands.root_user = True + + def run_on_module(self): + try: + self.module_base.disable(self.opts.module_spec) + except dnf.exceptions.MarkingErrors as e: + if self.base.conf.strict: + if e.no_match_group_specs or e.error_group_specs: + raise e + if e.module_depsolv_errors and e.module_depsolv_errors[1] != \ + libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS: + raise e + logger.error(str(e)) + + class ResetSubCommand(SubCommand): + + aliases = ('reset',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + demands.resolving = True + demands.root_user = True + + def run_on_module(self): + try: + self.module_base.reset(self.opts.module_spec) + except dnf.exceptions.MarkingErrors as e: + if self.base.conf.strict: + if e.no_match_group_specs: + raise e + logger.error(str(e)) + + class InstallSubCommand(SubCommand): + + aliases = ('install',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + demands.resolving = True + demands.root_user = True + + def run_on_module(self): + try: + self.module_base.install(self.opts.module_spec, self.base.conf.strict) + except dnf.exceptions.MarkingErrors as e: + if self.base.conf.strict: + if e.no_match_group_specs or e.error_group_specs: + raise e + logger.error(str(e)) + + class UpdateSubCommand(SubCommand): + + aliases = ('update',) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + demands.resolving = True + demands.root_user = True + + def run_on_module(self): + module_specs = self.module_base.upgrade(self.opts.module_spec) + if module_specs: + raise NoModuleException(", ".join(module_specs)) + + class RemoveSubCommand(SubCommand): + + aliases = ('remove', 'erase',) + + def configure(self): + demands = self.cli.demands + demands.allow_erasing = True + demands.available_repos = True + demands.fresh_metadata = False + demands.resolving = True + demands.root_user = True + demands.sack_activation = True + + def run_on_module(self): + skipped_groups = self.module_base.remove(self.opts.module_spec) + if self.opts.all: + modules_from_specs = self._get_modules_from_name_stream_specs() + remove_names_from_spec, __ = self._get_module_artifact_names( + modules_from_specs, set()) + keep_names, __ = self._get_module_artifact_names( + self.base._moduleContainer.getModulePackages(), modules_from_specs) + remove_query = self.base.sack.query().installed().filterm( + name=remove_names_from_spec) + keep_query = self.base.sack.query().installed().filterm(name=keep_names) + for pkg in remove_query: + if pkg in keep_query: + msg = _("Package {} belongs to multiple modules, skipping").format(pkg) + logger.info(msg) + else: + self.base.goal.erase( + pkg, clean_deps=self.base.conf.clean_requirements_on_remove) + if not skipped_groups: + return + + logger.error(dnf.exceptions.MarkingErrors(no_match_group_specs=skipped_groups)) + + class ProvidesSubCommand(SubCommand): + + aliases = ("provides", ) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + + def run_on_module(self): + output = self.module_base._what_provides(self.opts.module_spec) + if output: + print(output) + + class RepoquerySubCommand(SubCommand): + + aliases = ("repoquery", ) + + def configure(self): + demands = self.cli.demands + demands.available_repos = True + demands.sack_activation = True + + def run_on_module(self): + modules_from_specs = set() + for module_spec in self.opts.module_spec: + modules, __ = self.module_base._get_modules(module_spec) + modules_from_specs.update(modules) + names_from_spec, spec_artifacts = self._get_module_artifact_names( + modules_from_specs, set()) + package_strings = set() + if self.opts.available or not self.opts.installed: + query = self.base.sack.query().available().filterm(nevra_strict=spec_artifacts) + for pkg in query: + package_strings.add(str(pkg)) + if self.opts.installed: + query = self.base.sack.query().installed().filterm(name=names_from_spec) + for pkg in query: + package_strings.add(str(pkg)) + + output = "\n".join(sorted(package_strings)) + print(output) + + + SUBCMDS = {ListSubCommand, InfoSubCommand, EnableSubCommand, + DisableSubCommand, ResetSubCommand, InstallSubCommand, UpdateSubCommand, + RemoveSubCommand, ProvidesSubCommand, RepoquerySubCommand} + + SUBCMDS_NOT_REQUIRED_ARG = {ListSubCommand} + + aliases = ("module",) + summary = _("Interact with Modules.") + + def __init__(self, cli): + super(ModuleCommand, self).__init__(cli) + subcmd_objs = (subcmd(cli) for subcmd in self.SUBCMDS) + self.subcmd = None + self._subcmd_name2obj = { + alias: subcmd for subcmd in subcmd_objs for alias in subcmd.aliases} + + def set_argparser(self, parser): + narrows = parser.add_mutually_exclusive_group() + narrows.add_argument('--enabled', dest='enabled', + action='store_true', + help=_("show only enabled modules")) + narrows.add_argument('--disabled', dest='disabled', + action='store_true', + help=_("show only disabled modules")) + narrows.add_argument('--installed', dest='installed', + action='store_true', + help=_("show only installed modules or packages")) + narrows.add_argument('--profile', dest='profile', + action='store_true', + help=_("show profile content")) + parser.add_argument('--available', dest='available', action='store_true', + help=_("show only available packages")) + narrows.add_argument('--all', dest='all', + action='store_true', + help=_("remove all modular packages")) + + subcommand_help = [subcmd.aliases[0] for subcmd in self.SUBCMDS] + parser.add_argument('subcmd', nargs=1, choices=subcommand_help, + help=_("Modular command")) + parser.add_argument('module_spec', metavar='module-spec', nargs='*', + help=_("Module specification")) + + def configure(self): + try: + self.subcmd = self._subcmd_name2obj[self.opts.subcmd[0]] + except (CliError, KeyError): + self.cli.optparser.print_usage() + raise CliError + self.subcmd.opts = self.opts + self.subcmd.configure() + + def run(self): + self.check_required_argument() + self.subcmd.run_on_module() + + def check_required_argument(self): + not_required_argument = [alias + for subcmd in self.SUBCMDS_NOT_REQUIRED_ARG + for alias in subcmd.aliases] + if self.opts.subcmd[0] not in not_required_argument: + if not self.opts.module_spec: + raise CliError( + _("{} {} {}: too few arguments").format(dnf.util.MAIN_PROG, + self.opts.command, + self.opts.subcmd[0])) diff --git a/dnf/cli/commands/reinstall.py b/dnf/cli/commands/reinstall.py new file mode 100644 index 0000000..2b3ceac --- /dev/null +++ b/dnf/cli/commands/reinstall.py @@ -0,0 +1,101 @@ +# reinstall.py +# Reinstall CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser +from dnf.i18n import _ + +import dnf.exceptions +import logging + +logger = logging.getLogger('dnf') + + +class ReinstallCommand(commands.Command): + """A class containing methods needed by the cli to execute the reinstall command. + """ + + aliases = ('reinstall', 'rei') + summary = _('reinstall a package') + + @staticmethod + def set_argparser(parser): + parser.add_argument('packages', nargs='+', help=_('Package to reinstall'), + action=OptionParser.ParseSpecGroupFileCallback, + metavar=_('PACKAGE')) + + def configure(self): + """Verify that conditions are met so that this command can + run. These include that the program is being run by the root + user, that there are enabled repositories with gpg keys, and + that this command is called with appropriate arguments. + """ + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + commands._checkGPGKey(self.base, self.cli) + if not self.opts.filenames: + commands._checkEnabledRepo(self.base) + + def run(self): + + # Reinstall files. + done = False + for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False, + progress=self.base.output.progress): + try: + self.base.package_reinstall(pkg) + except dnf.exceptions.MarkingError: + logger.info(_('No match for argument: %s'), + self.base.output.term.bold(pkg.location)) + else: + done = True + + # Reinstall packages. + for pkg_spec in self.opts.pkg_specs + ['@' + x for x in self.opts.grp_specs]: + try: + self.base.reinstall(pkg_spec) + except dnf.exceptions.PackagesNotInstalledError as err: + for pkg in err.packages: + logger.info(_('Package %s available, but not installed.'), + self.output.term.bold(pkg.name)) + break + logger.info(_('No match for argument: %s'), + self.base.output.term.bold(pkg_spec)) + except dnf.exceptions.PackagesNotAvailableError as err: + for pkg in err.packages: + xmsg = '' + pkgrepo = self.base.history.repo(pkg) + if pkgrepo: + xmsg = _(' (from %s)') % pkgrepo + msg = _('Installed package %s%s not available.') + logger.info(msg, self.base.output.term.bold(pkg), + xmsg) + except dnf.exceptions.MarkingError: + assert False, 'Only the above marking errors are expected.' + else: + done = True + + if not done: + raise dnf.exceptions.Error(_('No packages marked for reinstall.')) diff --git a/dnf/cli/commands/remove.py b/dnf/cli/commands/remove.py new file mode 100644 index 0000000..f8059e4 --- /dev/null +++ b/dnf/cli/commands/remove.py @@ -0,0 +1,156 @@ +# remove_command.py +# Remove CLI command. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli import commands +from dnf.i18n import _ +from dnf.cli.option_parser import OptionParser +import dnf.base +import argparse +import hawkey +import dnf.exceptions +import logging + +logger = logging.getLogger("dnf") + + +class RemoveCommand(commands.Command): + """Remove command.""" + + nevra_forms = {'remove-n': hawkey.FORM_NAME, + 'remove-na': hawkey.FORM_NA, + 'remove-nevra': hawkey.FORM_NEVRA, + 'erase-n': hawkey.FORM_NAME, + 'erase-na': hawkey.FORM_NA, + 'erase-nevra': hawkey.FORM_NEVRA} + + aliases = ('remove', 'erase', 'rm') + tuple(nevra_forms.keys()) + summary = _('remove a package or packages from your system') + + @staticmethod + def set_argparser(parser): + mgroup = parser.add_mutually_exclusive_group() + mgroup.add_argument('--duplicates', action='store_true', + dest='duplicated', + help=_('remove duplicated packages')) + mgroup.add_argument('--duplicated', action='store_true', + help=argparse.SUPPRESS) + mgroup.add_argument('--oldinstallonly', action='store_true', + help=_( + 'remove installonly packages over the limit')) + parser.add_argument('packages', nargs='*', help=_('Package to remove'), + action=OptionParser.ParseSpecGroupFileCallback, + metavar=_('PACKAGE')) + + def configure(self): + demands = self.cli.demands + # disable all available repos to delete whole dependency tree + # instead of replacing removable package with available packages + demands.resolving = True + demands.root_user = True + demands.sack_activation = True + if self.opts.duplicated: + demands.available_repos = True + elif dnf.base.WITH_MODULES and self.opts.grp_specs: + demands.available_repos = True + demands.fresh_metadata = False + demands.allow_erasing = True + else: + demands.allow_erasing = True + demands.available_repos = False + + def run(self): + + forms = [self.nevra_forms[command] for command in self.opts.command + if command in list(self.nevra_forms.keys())] + + # local pkgs not supported in erase command + self.opts.pkg_specs += self.opts.filenames + done = False + + if self.opts.duplicated: + q = self.base.sack.query() + instonly = self.base._get_installonly_query(q.installed()) + dups = q.duplicated().difference(instonly) + if not dups: + raise dnf.exceptions.Error(_('No duplicated packages found for removal.')) + + for (name, arch), pkgs_list in dups._na_dict().items(): + if len(pkgs_list) < 2: + continue + pkgs_list.sort(reverse=True) + try: + self.base.reinstall(str(pkgs_list[0])) + except dnf.exceptions.PackagesNotAvailableError: + xmsg = '' + msg = _('Installed package %s%s not available.') + logger.warning(msg, self.base.output.term.bold(str(pkgs_list[0])), xmsg) + + for pkg in pkgs_list[1:]: + self.base.package_remove(pkg) + return + + if self.opts.oldinstallonly: + q = self.base.sack.query() + instonly = self.base._get_installonly_query(q.installed()).latest( + - self.base.conf.installonly_limit) + if instonly: + for pkg in instonly: + self.base.package_remove(pkg) + else: + raise dnf.exceptions.Error( + _('No old installonly packages found for removal.')) + return + + # Remove groups. + if self.opts.grp_specs and forms: + for grp_spec in self.opts.grp_specs: + msg = _('Not a valid form: %s') + logger.warning(msg, self.base.output.term.bold(grp_spec)) + elif self.opts.grp_specs: + if dnf.base.WITH_MODULES: + module_base = dnf.module.module_base.ModuleBase(self.base) + skipped_grps = module_base.remove(self.opts.grp_specs) + if len(self.opts.grp_specs) != len(skipped_grps): + done = True + else: + skipped_grps = self.opts.grp_specs + + if skipped_grps: + self.base.read_comps(arch_filter=True) + for group in skipped_grps: + try: + if self.base.env_group_remove([group]): + done = True + except dnf.exceptions.Error: + pass + + for pkg_spec in self.opts.pkg_specs: + try: + self.base.remove(pkg_spec, forms=forms) + except dnf.exceptions.MarkingError as e: + msg = '{}: {}'.format(e.value, self.base.output.term.bold(pkg_spec)) + logger.info(msg) + else: + done = True + + if not done: + logger.warning(_('No packages marked for removal.')) diff --git a/dnf/cli/commands/repolist.py b/dnf/cli/commands/repolist.py new file mode 100644 index 0000000..413bda0 --- /dev/null +++ b/dnf/cli/commands/repolist.py @@ -0,0 +1,291 @@ +# repolist.py +# repolist CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli import commands +from dnf.i18n import _, ucd, fill_exact_width, exact_width +from dnf.cli.option_parser import OptionParser +import dnf.cli.format +import dnf.pycomp +import dnf.util +import fnmatch +import hawkey +import logging +import operator + +logger = logging.getLogger('dnf') + + +def _expire_str(repo, md): + last = dnf.util.normalize_time(repo._repo.getTimestamp()) if md else _("unknown") + if repo.metadata_expire <= -1: + return _("Never (last: %s)") % last + elif not repo.metadata_expire: + return _("Instant (last: %s)") % last + else: + num = _num2ui_num(repo.metadata_expire) + return _("%s second(s) (last: %s)") % (num, last) + + +def _num2ui_num(num): + return ucd(dnf.pycomp.format("%d", num, True)) + + +def _repo_match(repo, patterns): + rid = repo.id.lower() + rnm = repo.name.lower() + for pat in patterns: + if fnmatch.fnmatch(rid, pat): + return True + if fnmatch.fnmatch(rnm, pat): + return True + return False + + +def _repo_size(sack, repo): + ret = 0 + for pkg in sack.query(flags=hawkey.IGNORE_EXCLUDES).filterm(reponame__eq=repo.id): + ret += pkg._size + return dnf.cli.format.format_number(ret) + + +class RepoListCommand(commands.Command): + """A class containing methods needed by the cli to execute the + repolist command. + """ + + aliases = ('repolist', 'repoinfo') + summary = _('display the configured software repositories') + + @staticmethod + def set_argparser(parser): + repolimit = parser.add_mutually_exclusive_group() + repolimit.add_argument('--all', dest='_repos_action', + action='store_const', const='all', default=None, + help=_("show all repos")) + repolimit.add_argument('--enabled', dest='_repos_action', + action='store_const', const='enabled', + help=_("show enabled repos (default)")) + repolimit.add_argument('--disabled', dest='_repos_action', + action='store_const', const='disabled', + help=_("show disabled repos")) + parser.add_argument('repos', nargs='*', default='enabled-default', metavar="REPOSITORY", + choices=['all', 'enabled', 'disabled'], + action=OptionParser.PkgNarrowCallback, + help=_("Repository specification")) + + def pre_configure(self): + if not self.opts.verbose and not self.opts.quiet: + self.cli.redirect_logger(stdout=logging.WARNING, stderr=logging.INFO) + + def configure(self): + if not self.opts.verbose and not self.opts.quiet: + self.cli.redirect_repo_progress() + demands = self.cli.demands + if any((self.opts.verbose, ('repoinfo' in self.opts.command))): + demands.available_repos = True + demands.sack_activation = True + + if self.opts._repos_action: + self.opts.repos_action = self.opts._repos_action + + def run(self): + arg = self.opts.repos_action + extcmds = [x.lower() for x in self.opts.repos] + + verbose = self.base.conf.verbose + + repos = list(self.base.repos.values()) + repos.sort(key=operator.attrgetter('id')) + term = self.output.term + on_ehibeg = term.FG_COLOR['green'] + term.MODE['bold'] + on_dhibeg = term.FG_COLOR['red'] + on_hiend = term.MODE['normal'] + tot_num = 0 + cols = [] + if not repos: + logger.warning(_('No repositories available')) + return + include_status = arg == 'all' or (arg == 'enabled-default' and extcmds) + + for repo in repos: + if len(extcmds) and not _repo_match(repo, extcmds): + continue + (ehibeg, dhibeg, hiend) = '', '', '' + ui_enabled = '' + ui_endis_wid = 0 + ui_excludes_num = '' + if include_status: + (ehibeg, dhibeg, hiend) = (on_ehibeg, on_dhibeg, on_hiend) + if repo.enabled: + enabled = True + if arg == 'disabled': + continue + if any((include_status, verbose, 'repoinfo' in self.opts.command)): + ui_enabled = ehibeg + _('enabled') + hiend + ui_endis_wid = exact_width(_('enabled')) + if verbose or ('repoinfo' in self.opts.command): + ui_size = _repo_size(self.base.sack, repo) + else: + enabled = False + if arg == 'enabled' or (arg == 'enabled-default' and not extcmds): + continue + ui_enabled = dhibeg + _('disabled') + hiend + ui_endis_wid = exact_width(_('disabled')) + + if not any((verbose, ('repoinfo' in self.opts.command))): + rid = ucd(repo.id) + cols.append((rid, repo.name, (ui_enabled, ui_endis_wid))) + else: + if enabled: + md = repo.metadata + else: + md = None + out = [self.output.fmtKeyValFill(_("Repo-id : "), repo.id), + self.output.fmtKeyValFill(_("Repo-name : "), repo.name)] + + if include_status: + out += [self.output.fmtKeyValFill(_("Repo-status : "), + ui_enabled)] + if md and repo._repo.getRevision(): + out += [self.output.fmtKeyValFill(_("Repo-revision : "), + repo._repo.getRevision())] + if md and repo._repo.getContentTags(): + tags = repo._repo.getContentTags() + out += [self.output.fmtKeyValFill(_("Repo-tags : "), + ", ".join(sorted(tags)))] + + if md and repo._repo.getDistroTags(): + distroTagsDict = {k: v for (k, v) in repo._repo.getDistroTags()} + for (distro, tags) in distroTagsDict.items(): + out += [self.output.fmtKeyValFill( + _("Repo-distro-tags : "), + "[%s]: %s" % (distro, ", ".join(sorted(tags))))] + + if md: + num = len(self.base.sack.query(flags=hawkey.IGNORE_EXCLUDES).filterm( + reponame__eq=repo.id)) + num_available = len(self.base.sack.query().filterm(reponame__eq=repo.id)) + ui_num = _num2ui_num(num) + ui_num_available = _num2ui_num(num_available) + tot_num += num + out += [ + self.output.fmtKeyValFill( + _("Repo-updated : "), + dnf.util.normalize_time(repo._repo.getMaxTimestamp())), + self.output.fmtKeyValFill(_("Repo-pkgs : "), ui_num), + self.output.fmtKeyValFill(_("Repo-available-pkgs: "), ui_num_available), + self.output.fmtKeyValFill(_("Repo-size : "), ui_size)] + + if repo.metalink: + out += [self.output.fmtKeyValFill(_("Repo-metalink : "), + repo.metalink)] + if enabled: + ts = repo._repo.getTimestamp() + out += [self.output.fmtKeyValFill( + _(" Updated : "), dnf.util.normalize_time(ts))] + elif repo.mirrorlist: + out += [self.output.fmtKeyValFill(_("Repo-mirrors : "), + repo.mirrorlist)] + baseurls = repo.baseurl + if baseurls: + out += [self.output.fmtKeyValFill(_("Repo-baseurl : "), + ", ".join(baseurls))] + elif enabled: + mirrors = repo._repo.getMirrors() + if mirrors: + url = "%s (%d more)" % (mirrors[0], len(mirrors) - 1) + out += [self.output.fmtKeyValFill(_("Repo-baseurl : "), url)] + + expire = _expire_str(repo, md) + out += [self.output.fmtKeyValFill(_("Repo-expire : "), expire)] + + if repo.excludepkgs: + # TRANSLATORS: Packages that are excluded - their names like (dnf systemd) + out += [self.output.fmtKeyValFill(_("Repo-exclude : "), + ", ".join(repo.excludepkgs))] + + if repo.includepkgs: + out += [self.output.fmtKeyValFill(_("Repo-include : "), + ", ".join(repo.includepkgs))] + + if ui_excludes_num: + # TRANSLATORS: Number of packages that where excluded (5) + out += [self.output.fmtKeyValFill(_("Repo-excluded : "), + ui_excludes_num)] + + if repo.repofile: + out += [self.output.fmtKeyValFill(_("Repo-filename : "), + repo.repofile)] + + print("\n" + "\n".join(map(ucd, out))) + + if not verbose and cols: + # Work out the first (id) and last (enabled/disabled/count), + # then chop the middle (name)... + + id_len = exact_width(_('repo id')) + nm_len = 0 + st_len = 0 + + for (rid, rname, (ui_enabled, ui_endis_wid)) in cols: + if id_len < exact_width(rid): + id_len = exact_width(rid) + if nm_len < exact_width(rname): + nm_len = exact_width(rname) + if st_len < ui_endis_wid: + st_len = ui_endis_wid + # Need this as well as above for: fill_exact_width() + if include_status: + if exact_width(_('status')) > st_len: + left = term.columns - (id_len + len(_('status')) + 2) + else: + left = term.columns - (id_len + st_len + 2) + else: # Don't output a status column. + left = term.columns - (id_len + 1) + + if left < nm_len: # Name gets chopped + nm_len = left + else: # Share the extra... + left -= nm_len + id_len += left // 2 + nm_len += left - (left // 2) + + txt_rid = fill_exact_width(_('repo id'), id_len) + if include_status: + txt_rnam = fill_exact_width(_('repo name'), nm_len, nm_len) + else: + txt_rnam = _('repo name') + if not include_status: # Don't output a status column. + print("%s %s" % (txt_rid, txt_rnam)) + else: + print("%s %s %s" % (txt_rid, txt_rnam, _('status'))) + for (rid, rname, (ui_enabled, ui_endis_wid)) in cols: + if not include_status: # Don't output a status column. + print("%s %s" % (fill_exact_width(rid, id_len), rname)) + continue + + print("%s %s %s" % (fill_exact_width(rid, id_len), + fill_exact_width(rname, nm_len, nm_len), + ui_enabled)) + if any((verbose, ('repoinfo' in self.opts.command))): + msg = _('Total packages: {}') + print(msg.format(_num2ui_num(tot_num))) diff --git a/dnf/cli/commands/repoquery.py b/dnf/cli/commands/repoquery.py new file mode 100644 index 0000000..f5cb36f --- /dev/null +++ b/dnf/cli/commands/repoquery.py @@ -0,0 +1,688 @@ +# +# Copyright (C) 2014 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals +from dnf.i18n import _ +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser + +import argparse +import datetime +import logging +import re +import sys + +import dnf +import dnf.cli +import dnf.exceptions +import dnf.subject +import dnf.util +import hawkey + +logger = logging.getLogger('dnf') + + +QFORMAT_DEFAULT = '%{name}-%{epoch}:%{version}-%{release}.%{arch}' +# matches %[-][dd]{attr} +QFORMAT_MATCH = re.compile(r'%(-?\d*?){([:.\w]+?)}') + +QUERY_TAGS = """ +name, arch, epoch, version, release, reponame (repoid), evr, +debug_name, source_name, source_debug_name, +installtime, buildtime, size, downloadsize, installsize, +provides, requires, obsoletes, conflicts, sourcerpm, +description, summary, license, url, reason +""" + +OPTS_MAPPING = { + 'conflicts': 'conflicts', + 'enhances': 'enhances', + 'obsoletes': 'obsoletes', + 'provides': 'provides', + 'recommends': 'recommends', + 'requires': 'requires', + 'requires-pre': 'requires_pre', + 'suggests': 'suggests', + 'supplements': 'supplements' +} + + +def rpm2py_format(queryformat): + """Convert a rpm like QUERYFMT to an python .format() string.""" + def fmt_repl(matchobj): + fill = matchobj.groups()[0] + key = matchobj.groups()[1] + if fill: + if fill[0] == '-': + fill = '>' + fill[1:] + else: + fill = '<' + fill + fill = ':' + fill + return '{0.' + key.lower() + fill + "}" + + def brackets(txt): + return txt.replace('{', '{{').replace('}', '}}') + + queryformat = queryformat.replace("\\n", "\n").replace("\\t", "\t") + for key, value in OPTS_MAPPING.items(): + queryformat = queryformat.replace(key, value) + fmt = "" + spos = 0 + for item in QFORMAT_MATCH.finditer(queryformat): + fmt += brackets(queryformat[spos:item.start()]) + fmt += fmt_repl(item) + spos = item.end() + fmt += brackets(queryformat[spos:]) + return fmt + + +class _CommaSplitCallback(OptionParser._SplitCallback): + SPLITTER = r'\s*,\s*' + + +class RepoQueryCommand(commands.Command): + """A class containing methods needed by the cli to execute the repoquery command. + """ + nevra_forms = {'repoquery-n': hawkey.FORM_NAME, + 'repoquery-na': hawkey.FORM_NA, + 'repoquery-nevra': hawkey.FORM_NEVRA} + + aliases = ('repoquery', 'rq') + tuple(nevra_forms.keys()) + summary = _('search for packages matching keyword') + + @staticmethod + def filter_repo_arch(opts, query): + """Filter query by repoid and arch options""" + if opts.repo: + query.filterm(reponame=opts.repo) + if opts.arches: + query.filterm(arch=opts.arches) + return query + + @staticmethod + def set_argparser(parser): + parser.add_argument('-a', '--all', dest='queryall', action='store_true', + help=_("Query all packages (shorthand for repoquery '*' " + "or repoquery without argument)")) + parser.add_argument('--show-duplicates', action='store_true', + help=_("Query all versions of packages (default)")) + parser.add_argument('--arch', '--archlist', dest='arches', default=[], + action=_CommaSplitCallback, metavar='[arch]', + help=_('show only results from this ARCH')) + parser.add_argument('-f', '--file', metavar='FILE', nargs='+', + help=_('show only results that owns FILE')) + parser.add_argument('--whatconflicts', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('show only results that conflict REQ')) + parser.add_argument('--whatdepends', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('shows results that requires, suggests, supplements, enhances,' + 'or recommends package provides and files REQ')) + parser.add_argument('--whatobsoletes', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('show only results that obsolete REQ')) + parser.add_argument('--whatprovides', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('show only results that provide REQ')) + parser.add_argument('--whatrequires', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('shows results that requires package provides and files REQ')) + parser.add_argument('--whatrecommends', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('show only results that recommend REQ')) + parser.add_argument('--whatenhances', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('show only results that enhance REQ')) + parser.add_argument('--whatsuggests', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('show only results that suggest REQ')) + parser.add_argument('--whatsupplements', default=[], action=_CommaSplitCallback, + metavar='REQ', + help=_('show only results that supplement REQ')) + whatrequiresform = parser.add_mutually_exclusive_group() + whatrequiresform.add_argument("--alldeps", action="store_true", + help=_("check non-explicit dependencies (files and Provides); default")) + whatrequiresform.add_argument("--exactdeps", action="store_true", + help=_('check dependencies exactly as given, opposite of --alldeps')) + parser.add_argument("--recursive", action="store_true", help=_( + 'used with --whatrequires, and --requires --resolve, query packages recursively.')) + parser.add_argument('--deplist', action='store_true', help=_( + "show a list of all dependencies and what packages provide them")) + parser.add_argument('--querytags', action='store_true', + help=_('show available tags to use with ' + '--queryformat')) + parser.add_argument('--resolve', action='store_true', + help=_('resolve capabilities to originating package(s)')) + parser.add_argument("--tree", action="store_true", + help=_('show recursive tree for package(s)')) + parser.add_argument('--srpm', action='store_true', + help=_('operate on corresponding source RPM')) + parser.add_argument("--latest-limit", dest='latest_limit', type=int, + help=_('show N latest packages for a given name.arch' + ' (or latest but N if N is negative)')) + parser.add_argument("--disable-modular-filtering", action="store_true", + help=_("list also packages of inactive module streams")) + + outform = parser.add_mutually_exclusive_group() + outform.add_argument('-i', "--info", dest='queryinfo', + default=False, action='store_true', + help=_('show detailed information about the package')) + outform.add_argument('-l', "--list", dest='queryfilelist', + default=False, action='store_true', + help=_('show list of files in the package')) + outform.add_argument('-s', "--source", dest='querysourcerpm', + default=False, action='store_true', + help=_('show package source RPM name')) + outform.add_argument('--changelogs', dest='querychangelogs', + default=False, action='store_true', + help=_('show changelogs of the package')) + outform.add_argument('--qf', "--queryformat", dest='queryformat', + default=QFORMAT_DEFAULT, + help=_('format for displaying found packages')) + outform.add_argument("--nevra", dest='queryformat', const=QFORMAT_DEFAULT, + action='store_const', + help=_('use name-epoch:version-release.architecture format for ' + 'displaying found packages (default)')) + outform.add_argument("--nvr", dest='queryformat', const='%{name}-%{version}-%{release}', + action='store_const', help=_('use name-version-release format for ' + 'displaying found packages ' + '(rpm query default)')) + outform.add_argument("--envra", dest='queryformat', + const='%{epoch}:%{name}-%{version}-%{release}.%{arch}', + action='store_const', + help=_('use epoch:name-version-release.architecture format for ' + 'displaying found packages')) + outform.add_argument('--groupmember', action="store_true", help=_( + 'Display in which comps groups are presented selected packages')) + pkgfilter = parser.add_mutually_exclusive_group() + pkgfilter.add_argument("--duplicates", dest='pkgfilter', + const='duplicated', action='store_const', + help=_('limit the query to installed duplicate ' + 'packages')) + pkgfilter.add_argument("--duplicated", dest='pkgfilter', + const='duplicated', action='store_const', + help=argparse.SUPPRESS) + pkgfilter.add_argument("--installonly", dest='pkgfilter', + const='installonly', action='store_const', + help=_('limit the query to installed installonly packages')) + pkgfilter.add_argument("--unsatisfied", dest='pkgfilter', + const='unsatisfied', action='store_const', + help=_('limit the query to installed packages with unsatisfied dependencies')) + parser.add_argument('--location', action='store_true', + help=_('show a location from where packages can be downloaded')) + package_attribute = parser.add_mutually_exclusive_group() + help_msgs = { + 'conflicts': _('Display capabilities that the package conflicts with.'), + 'depends': _('Display capabilities that the package can depend on, enhance, recommend,' + ' suggest, and supplement.'), + 'enhances': _('Display capabilities that the package can enhance.'), + 'provides': _('Display capabilities provided by the package.'), + 'recommends': _('Display capabilities that the package recommends.'), + 'requires': _('Display capabilities that the package depends on.'), + 'requires-pre': _('Display capabilities that the package depends on for running a %%pre script.'), + 'suggests': _('Display capabilities that the package suggests.'), + 'supplements': _('Display capabilities that the package can supplement.') + } + for arg, help_msg in help_msgs.items(): + name = '--%s' % arg + package_attribute.add_argument(name, dest='packageatr', action='store_const', + const=arg, help=help_msg) + parser.add_argument('--available', action="store_true", help=_('Display only available packages.')) + + help_list = { + 'installed': _('Display only installed packages.'), + 'extras': _('Display only packages that are not present in any of available repositories.'), + 'upgrades': _('Display only packages that provide an upgrade for some already installed package.'), + 'unneeded': _('Display only packages that can be removed by "{prog} autoremove" ' + 'command.').format(prog=dnf.util.MAIN_PROG), + 'userinstalled': _('Display only packages that were installed by user.') + } + list_group = parser.add_mutually_exclusive_group() + for list_arg, help_arg in help_list.items(): + switch = '--%s' % list_arg + list_group.add_argument(switch, dest='list', action='store_const', + const=list_arg, help=help_arg) + + # make --autoremove hidden compatibility alias for --unneeded + list_group.add_argument( + '--autoremove', dest='list', action='store_const', + const="unneeded", help=argparse.SUPPRESS) + parser.add_argument('--recent', action="store_true", help=_('Display only recently edited packages')) + + parser.add_argument('key', nargs='*', metavar="KEY", + help=_('the key to search for')) + + def pre_configure(self): + if not self.opts.verbose and not self.opts.quiet: + self.cli.redirect_logger(stdout=logging.WARNING, stderr=logging.INFO) + + def configure(self): + if not self.opts.verbose and not self.opts.quiet: + self.cli.redirect_repo_progress() + demands = self.cli.demands + + if self.opts.obsoletes: + if self.opts.packageatr: + self.cli._option_conflict("--obsoletes", "--" + self.opts.packageatr) + else: + self.opts.packageatr = "obsoletes" + + if self.opts.querytags: + return + + if self.opts.resolve and not self.opts.packageatr: + raise dnf.cli.CliError( + _("Option '--resolve' has to be used together with one of the " + "'--conflicts', '--depends', '--enhances', '--provides', '--recommends', " + "'--requires', '--requires-pre', '--suggests' or '--supplements' options")) + + if self.opts.recursive: + if self.opts.exactdeps: + self.cli._option_conflict("--recursive", "--exactdeps") + if not any([self.opts.whatrequires, + (self.opts.packageatr == "requires" and self.opts.resolve)]): + raise dnf.cli.CliError( + _("Option '--recursive' has to be used with '--whatrequires ' " + "(optionally with '--alldeps', but not with '--exactdeps'), or with " + "'--requires --resolve'")) + + if self.opts.srpm: + self.base.repos.enable_source_repos() + + if (self.opts.list not in ["installed", "userinstalled"] and + self.opts.pkgfilter != "installonly") or self.opts.available: + demands.available_repos = True + + demands.sack_activation = True + + if self.opts.querychangelogs: + demands.changelogs = True + + def build_format_fn(self, opts, pkg): + if opts.querychangelogs: + out = [] + out.append('Changelog for %s' % str(pkg)) + for chlog in pkg.changelogs: + dt = chlog['timestamp'] + out.append('* %s %s\n%s\n' % (dt.strftime("%a %b %d %Y"), + dnf.i18n.ucd(chlog['author']), + dnf.i18n.ucd(chlog['text']))) + return '\n'.join(out) + try: + po = PackageWrapper(pkg) + if opts.queryinfo: + return self.base.output.infoOutput(pkg) + elif opts.queryfilelist: + filelist = po.files + if not filelist: + print(_('Package {} contains no files').format(pkg), file=sys.stderr) + return filelist + elif opts.querysourcerpm: + return po.sourcerpm + else: + return rpm2py_format(opts.queryformat).format(po) + except AttributeError as e: + # catch that the user has specified attributes + # there don't exist on the dnf Package object. + raise dnf.exceptions.Error(str(e)) + + def _get_recursive_deps_query(self, query_in, query_select, done=None, recursive=False, + all_deps=False): + done = done if done else self.base.sack.query().filterm(empty=True) + t = self.base.sack.query().filterm(empty=True) + set_requires = set() + set_all_deps = set() + + for pkg in query_select.run(): + pkg_provides = pkg.provides + set_requires.update(pkg_provides) + set_requires.update(pkg.files) + if all_deps: + set_all_deps.update(pkg_provides) + + t = t.union(query_in.filter(requires=set_requires)) + if set_all_deps: + t = t.union(query_in.filter(recommends=set_all_deps)) + t = t.union(query_in.filter(enhances=set_all_deps)) + t = t.union(query_in.filter(supplements=set_all_deps)) + t = t.union(query_in.filter(suggests=set_all_deps)) + if recursive: + query_select = t.difference(done) + if query_select: + done = self._get_recursive_deps_query(query_in, query_select, done=t.union(done), + recursive=recursive, all_deps=all_deps) + return t.union(done) + + def by_all_deps(self, requires_name, depends_name, query): + names = requires_name or depends_name + defaultquery = self.base.sack.query().filterm(empty=True) + for name in names: + defaultquery = defaultquery.union(query.intersection( + dnf.subject.Subject(name).get_best_query(self.base.sack, with_provides=False, + with_filenames=False))) + requiresquery = query.filter(requires__glob=names) + if depends_name: + requiresquery = requiresquery.union(query.filter(recommends__glob=depends_name)) + requiresquery = requiresquery.union(query.filter(enhances__glob=depends_name)) + requiresquery = requiresquery.union(query.filter(supplements__glob=depends_name)) + requiresquery = requiresquery.union(query.filter(suggests__glob=depends_name)) + + done = requiresquery.union(self._get_recursive_deps_query(query, defaultquery, + all_deps=depends_name)) + if self.opts.recursive: + done = done.union(self._get_recursive_deps_query(query, done, + recursive=self.opts.recursive, + all_deps=depends_name)) + return done + + def _get_recursive_providers_query(self, query_in, providers, done=None): + done = done if done else self.base.sack.query().filterm(empty=True) + t = self.base.sack.query().filterm(empty=True) + for pkg in providers.run(): + t = t.union(query_in.filter(provides=pkg.requires)) + query_select = t.difference(done) + if query_select: + done = self._get_recursive_providers_query(query_in, query_select, done=t.union(done)) + return t.union(done) + + def run(self): + if self.opts.querytags: + print(_('Available query-tags: use --queryformat ".. %{tag} .."')) + print(QUERY_TAGS) + return + + self.cli._populate_update_security_filter(self.opts, self.base.sack.query()) + + q = self.base.sack.query( + flags=hawkey.IGNORE_MODULAR_EXCLUDES + if self.opts.disable_modular_filtering + else hawkey.APPLY_EXCLUDES + ) + if self.opts.key: + kwark = {} + forms = [self.nevra_forms[command] for command in self.opts.command + if command in list(self.nevra_forms.keys())] + if forms: + kwark["forms"] = forms + pkgs = [] + query_results = q.filter(empty=True) + for key in self.opts.key: + query_results = query_results.union( + dnf.subject.Subject(key, ignore_case=True).get_best_query( + self.base.sack, with_provides=False, query=q, **kwark)) + q = query_results + + if self.opts.recent: + q = q._recent(self.base.conf.recent) + if self.opts.available: + if self.opts.list and self.opts.list != "installed": + print(self.cli.optparser.print_usage()) + raise dnf.exceptions.Error(_("argument {}: not allowed with argument {}".format( + "--available", "--" + self.opts.list))) + elif self.opts.list == "unneeded": + q = q._unneeded(self.base.history.swdb) + elif self.opts.list and self.opts.list != 'userinstalled': + q = getattr(q, self.opts.list)() + + if self.opts.pkgfilter == "duplicated": + installonly = self.base._get_installonly_query(q) + q = q.difference(installonly).duplicated() + elif self.opts.pkgfilter == "installonly": + q = self.base._get_installonly_query(q) + elif self.opts.pkgfilter == "unsatisfied": + rpmdb = dnf.sack.rpmdb_sack(self.base) + rpmdb._configure(self.base.conf.installonlypkgs, self.base.conf.installonly_limit) + goal = dnf.goal.Goal(rpmdb) + solved = goal.run(verify=True) + if not solved: + print(dnf.util._format_resolve_problems(goal.problem_rules())) + return + elif not self.opts.list: + # do not show packages from @System repo + q = q.available() + + # filter repo and arch + q = self.filter_repo_arch(self.opts, q) + orquery = q + + if self.opts.file: + q.filterm(file__glob=self.opts.file) + if self.opts.whatconflicts: + q.filterm(conflicts=self.opts.whatconflicts) + if self.opts.whatobsoletes: + q.filterm(obsoletes=self.opts.whatobsoletes) + if self.opts.whatprovides: + query_for_provide = q.filter(provides__glob=self.opts.whatprovides) + if query_for_provide: + q = query_for_provide + else: + q.filterm(file__glob=self.opts.whatprovides) + if self.opts.alldeps or self.opts.exactdeps: + if not (self.opts.whatrequires or self.opts.whatdepends): + raise dnf.exceptions.Error( + _("argument {} requires --whatrequires or --whatdepends option".format( + '--alldeps' if self.opts.alldeps else '--exactdeps'))) + if self.opts.alldeps: + q = self.by_all_deps(self.opts.whatrequires, self.opts.whatdepends, q) + else: + if self.opts.whatrequires: + q.filterm(requires__glob=self.opts.whatrequires) + else: + dependsquery = q.filter(requires__glob=self.opts.whatdepends) + dependsquery = dependsquery.union( + q.filter(recommends__glob=self.opts.whatdepends)) + dependsquery = dependsquery.union( + q.filter(enhances__glob=self.opts.whatdepends)) + dependsquery = dependsquery.union( + q.filter(supplements__glob=self.opts.whatdepends)) + q = dependsquery.union(q.filter(suggests__glob=self.opts.whatdepends)) + + elif self.opts.whatrequires or self.opts.whatdepends: + q = self.by_all_deps(self.opts.whatrequires, self.opts.whatdepends, q) + if self.opts.whatrecommends: + q.filterm(recommends__glob=self.opts.whatrecommends) + if self.opts.whatenhances: + q.filterm(enhances__glob=self.opts.whatenhances) + if self.opts.whatsupplements: + q.filterm(supplements__glob=self.opts.whatsupplements) + if self.opts.whatsuggests: + q.filterm(suggests__glob=self.opts.whatsuggests) + if self.opts.latest_limit: + q = q.latest(self.opts.latest_limit) + # reduce a query to security upgrades if they are specified + q = self.base._merge_update_filters(q, warning=False) + if self.opts.srpm: + pkg_list = [] + for pkg in q: + srcname = pkg.source_name + if srcname is not None: + tmp_query = self.base.sack.query().filterm(name=srcname, evr=pkg.evr, + arch='src') + pkg_list += tmp_query.run() + q = self.base.sack.query().filterm(pkg=pkg_list) + if self.opts.tree: + if not self.opts.whatrequires and self.opts.packageatr not in ( + 'conflicts', 'enhances', 'obsoletes', 'provides', 'recommends', + 'requires', 'suggests', 'supplements'): + raise dnf.exceptions.Error( + _("No valid switch specified\nusage: {prog} repoquery [--conflicts|" + "--enhances|--obsoletes|--provides|--recommends|--requires|" + "--suggest|--supplements|--whatrequires] [key] [--tree]\n\n" + "description:\n For the given packages print a tree of the" + "packages.").format(prog=dnf.util.MAIN_PROG)) + self.tree_seed(q, orquery, self.opts) + return + + pkgs = set() + if self.opts.packageatr: + rels = set() + for pkg in q.run(): + if self.opts.list != 'userinstalled' or self.base.history.user_installed(pkg): + if self.opts.packageatr == 'depends': + rels.update(pkg.requires + pkg.enhances + pkg.suggests + + pkg.supplements + pkg.recommends) + else: + rels.update(getattr(pkg, OPTS_MAPPING[self.opts.packageatr])) + if self.opts.resolve: + # find the providing packages and show them + if self.opts.list == "installed": + query = self.filter_repo_arch(self.opts, self.base.sack.query()) + else: + query = self.filter_repo_arch(self.opts, self.base.sack.query().available()) + providers = query.filter(provides=rels) + if self.opts.recursive: + providers = providers.union( + self._get_recursive_providers_query(query, providers)) + pkgs = set() + for pkg in providers.latest().run(): + pkgs.add(self.build_format_fn(self.opts, pkg)) + else: + pkgs.update(str(rel) for rel in rels) + elif self.opts.location: + for pkg in q.run(): + location = pkg.remote_location() + if location is not None: + pkgs.add(location) + elif self.opts.deplist: + pkgs = [] + for pkg in sorted(set(q.run())): + if self.opts.list != 'userinstalled' or self.base.history.user_installed(pkg): + deplist_output = [] + deplist_output.append('package: ' + str(pkg)) + for req in sorted([str(req) for req in pkg.requires]): + deplist_output.append(' dependency: ' + req) + subject = dnf.subject.Subject(req) + query = subject.get_best_query(self.base.sack) + query = self.filter_repo_arch( + self.opts, query.available()) + if not self.opts.verbose: + query = query.latest() + for provider in query.run(): + deplist_output.append(' provider: ' + str(provider)) + pkgs.append('\n'.join(deplist_output)) + if pkgs: + print('\n\n'.join(pkgs)) + return + elif self.opts.groupmember: + self._group_member_report(q) + return + + else: + for pkg in q.run(): + if self.opts.list != 'userinstalled' or self.base.history.user_installed(pkg): + pkgs.add(self.build_format_fn(self.opts, pkg)) + + if pkgs: + if self.opts.queryinfo: + print("\n\n".join(sorted(pkgs))) + else: + print("\n".join(sorted(pkgs))) + + def _group_member_report(self, query): + self.base.read_comps(arch_filter=True) + package_conf_dict = {} + for group in self.base.comps.groups: + package_conf_dict[group.id] = set([pkg.name for pkg in group.packages_iter()]) + group_package_dict = {} + pkg_not_in_group = [] + for pkg in query.run(): + group_id_list = [] + for group_id, package_name_set in package_conf_dict.items(): + if pkg.name in package_name_set: + group_id_list.append(group_id) + if group_id_list: + group_package_dict.setdefault( + '$'.join(sorted(group_id_list)), []).append(str(pkg)) + else: + pkg_not_in_group.append(str(pkg)) + output = [] + for key, package_list in sorted(group_package_dict.items()): + output.append( + '\n'.join(sorted(package_list) + sorted([' @' + id for id in key.split('$')]))) + output.append('\n'.join(sorted(pkg_not_in_group))) + if output: + print('\n'.join(output)) + + def grow_tree(self, level, pkg, opts): + pkg_string = self.build_format_fn(opts, pkg) + if level == -1: + print(pkg_string) + return + spacing = " " + for x in range(0, level): + spacing += "| " + requires = [] + for requirepkg in pkg.requires: + requires.append(str(requirepkg)) + reqstr = "[" + str(len(requires)) + ": " + ", ".join(requires) + "]" + print(spacing + r"\_ " + pkg_string + " " + reqstr) + + def tree_seed(self, query, aquery, opts, level=-1, usedpkgs=None): + for pkg in sorted(set(query.run()), key=lambda p: p.name): + usedpkgs = set() if usedpkgs is None or level == -1 else usedpkgs + if pkg.name.startswith("rpmlib") or pkg.name.startswith("solvable"): + return + self.grow_tree(level, pkg, opts) + if pkg not in usedpkgs: + usedpkgs.add(pkg) + if opts.packageatr: + strpkg = getattr(pkg, opts.packageatr) + ar = {} + for name in set(strpkg): + pkgquery = self.base.sack.query().filterm(provides=name) + for querypkg in pkgquery: + ar[querypkg.name + "." + querypkg.arch] = querypkg + pkgquery = self.base.sack.query().filterm(pkg=list(ar.values())) + else: + pkgquery = self.by_all_deps(pkg.name, None, aquery) if opts.alldeps \ + else aquery.filter(requires__glob=pkg.name) + self.tree_seed(pkgquery, aquery, opts, level + 1, usedpkgs) + + +class PackageWrapper(object): + + """Wrapper for dnf.package.Package, so we can control formatting.""" + + def __init__(self, pkg): + self._pkg = pkg + + def __getattr__(self, attr): + atr = getattr(self._pkg, attr) + if atr is None: + return "(none)" + if isinstance(atr, list): + return '\n'.join(sorted({dnf.i18n.ucd(reldep) for reldep in atr})) + return dnf.i18n.ucd(atr) + + @staticmethod + def _get_timestamp(timestamp): + if timestamp > 0: + dt = datetime.datetime.utcfromtimestamp(timestamp) + return dt.strftime("%Y-%m-%d %H:%M") + else: + return '' + + @property + def buildtime(self): + return self._get_timestamp(self._pkg.buildtime) + + @property + def installtime(self): + return self._get_timestamp(self._pkg.installtime) diff --git a/dnf/cli/commands/search.py b/dnf/cli/commands/search.py new file mode 100644 index 0000000..cdf4773 --- /dev/null +++ b/dnf/cli/commands/search.py @@ -0,0 +1,160 @@ +# search.py +# Search CLI command. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +import collections + +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser +from dnf.i18n import ucd, _, C_ + +import dnf.i18n +import dnf.match_counter +import dnf.util +import hawkey +import logging + +logger = logging.getLogger('dnf') + + +class SearchCommand(commands.Command): + """A class containing methods needed by the cli to execute the + search command. + """ + + aliases = ('search', 'se') + summary = _('search package details for the given string') + + @staticmethod + def set_argparser(parser): + parser.add_argument('--all', action='store_true', + help=_("search also package description and URL")) + parser.add_argument('query_string', nargs='+', metavar=_('KEYWORD'), + choices=['all'], default=None, + action=OptionParser.PkgNarrowCallback, + help=_("Keyword to search for")) + + def _search(self, args): + """Search for simple text tags in a package object.""" + + TRANS_TBL = collections.OrderedDict(( + ('name', C_('long', 'Name')), + ('summary', C_('long', 'Summary')), + ('description', C_('long', 'Description')), + ('url', _('URL')), + )) + + def _translate_attr(attr): + try: + return TRANS_TBL[attr] + except: + return attr + + def _print_section_header(exact_match, attrs, keys): + trans_attrs = map(_translate_attr, attrs) + # TRANSLATORS: separator used between package attributes (eg. Name & Summary & URL) + trans_attrs_str = _(' & ').join(trans_attrs) + if exact_match: + # TRANSLATORS: %s - translated package attributes, + # %%s - found keys (in listed attributes) + section_text = _('%s Exactly Matched: %%s') % trans_attrs_str + else: + # TRANSLATORS: %s - translated package attributes, + # %%s - found keys (in listed attributes) + section_text = _('%s Matched: %%s') % trans_attrs_str + formatted = self.base.output.fmtSection(section_text % ", ".join(keys)) + print(ucd(formatted)) + + counter = dnf.match_counter.MatchCounter() + for arg in args: + self._search_counted(counter, 'name', arg) + self._search_counted(counter, 'summary', arg) + + if self.opts.all: + for arg in args: + self._search_counted(counter, 'description', arg) + self._search_counted(counter, 'url', arg) + else: + needles = len(args) + pkgs = list(counter.keys()) + for pkg in pkgs: + if len(counter.matched_needles(pkg)) != needles: + del counter[pkg] + + used_attrs = None + matched_needles = None + exact_match = False + print_section_header = False + limit = None + if not self.base.conf.showdupesfromrepos: + limit = self.base.sack.query().filterm(pkg=counter.keys()).latest() + + seen = set() + for pkg in counter.sorted(reverse=True, limit_to=limit): + if not self.base.conf.showdupesfromrepos: + if pkg.name + pkg.arch in seen: + continue + seen.add(pkg.name + pkg.arch) + + if used_attrs != counter.matched_keys(pkg): + used_attrs = counter.matched_keys(pkg) + print_section_header = True + if matched_needles != counter.matched_needles(pkg): + matched_needles = counter.matched_needles(pkg) + print_section_header = True + if exact_match != (counter.matched_haystacks(pkg) == matched_needles): + exact_match = counter.matched_haystacks(pkg) == matched_needles + print_section_header = True + if print_section_header: + _print_section_header(exact_match, used_attrs, matched_needles) + print_section_header = False + self.base.output.matchcallback(pkg, counter.matched_haystacks(pkg), args) + + if len(counter) == 0: + logger.info(_('No matches found.')) + + def _search_counted(self, counter, attr, needle): + fdict = {'%s__substr' % attr : needle} + if dnf.util.is_glob_pattern(needle): + fdict = {'%s__glob' % attr : needle} + q = self.base.sack.query().filterm(hawkey.ICASE, **fdict) + for pkg in q.run(): + counter.add(pkg, attr, needle) + return counter + + def pre_configure(self): + if not self.opts.verbose and not self.opts.quiet: + self.cli.redirect_logger(stdout=logging.WARNING, stderr=logging.INFO) + + def configure(self): + if not self.opts.verbose and not self.opts.quiet: + self.cli.redirect_repo_progress() + demands = self.cli.demands + demands.available_repos = True + demands.fresh_metadata = False + demands.sack_activation = True + self.opts.all = self.opts.all or self.opts.query_string_action + + def run(self): + logger.debug(_('Searching Packages: ')) + return self._search(self.opts.query_string) diff --git a/dnf/cli/commands/shell.py b/dnf/cli/commands/shell.py new file mode 100644 index 0000000..431fe50 --- /dev/null +++ b/dnf/cli/commands/shell.py @@ -0,0 +1,292 @@ +# shell.py +# Shell CLI command. +# +# Copyright (C) 2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from dnf.cli import commands +from dnf.i18n import _, ucd + +import dnf.util +import cmd +import copy +import dnf +import logging +import shlex +import sys + + +logger = logging.getLogger('dnf') + + +# only demands we'd like to override +class ShellDemandSheet(object): + available_repos = True + resolving = True + root_user = True + sack_activation = True + + +class ShellCommand(commands.Command, cmd.Cmd): + + aliases = ('shell', 'sh') + summary = _('run an interactive {prog} shell').format(prog=dnf.util.MAIN_PROG_UPPER) + + MAPPING = {'repo': 'repo', + 'repository': 'repo', + 'exit': 'quit', + 'quit': 'quit', + 'run': 'ts_run', + 'ts': 'transaction', + 'transaction': 'transaction', + 'config': 'config', + 'resolvedep': 'resolve', + 'help': 'help' + } + + def __init__(self, cli): + commands.Command.__init__(self, cli) + cmd.Cmd.__init__(self) + self.prompt = '> ' + + @staticmethod + def set_argparser(parser): + parser.add_argument('script', nargs='?', metavar=_('SCRIPT'), + help=_('Script to run in {prog} shell').format( + prog=dnf.util.MAIN_PROG_UPPER)) + + def configure(self): + # append to ShellDemandSheet missing demands from + # dnf.cli.demand.DemandSheet with their default values. + default_demands = self.cli.demands + self.cli.demands = ShellDemandSheet() + for attr in dir(default_demands): + if attr.startswith('__'): + continue + try: + getattr(self.cli.demands, attr) + except AttributeError: + setattr(self.cli.demands, attr, getattr(default_demands, attr)) + + def run(self): + if self.opts.script: + self._run_script(self.opts.script) + else: + self.cmdloop() + + def _clean(self): + self.base._finalize_base() + self.base._transaction = None + self.base.fill_sack() + + def onecmd(self, line): + if not line or line == '\n': + return + if line == 'EOF': + line = 'quit' + try: + s_line = shlex.split(line) + except: + self._help() + return + # reset option parser before each command, keep usage information + self.cli.optparser.__init__(reset_usage=False) + opts = self.cli.optparser.parse_main_args(s_line) + # Disable shell recursion. + if opts.command == 'shell': + return + if opts.command in self.MAPPING: + getattr(self, '_' + self.MAPPING[opts.command])(s_line[1::]) + else: + cmd_cls = self.cli.cli_commands.get(opts.command) + if cmd_cls is not None: + cmd = cmd_cls(self.cli) + try: + opts = self.cli.optparser.parse_command_args(cmd, s_line) + except SystemExit: + # argparse.ArgumentParser prints usage information and executes + # sys.exit() on problems with parsing command line arguments + return + try: + cmd.cli.demands = copy.deepcopy(self.cli.demands) + cmd.configure() + cmd.run() + except dnf.exceptions.Error as e: + logger.error(_("Error:") + " " + ucd(e)) + return + else: + self._help() + + def _config(self, args=None): + def print_or_set(key, val, conf): + if val: + setattr(conf, key, val) + else: + try: + print('{}: {}'.format(key, getattr(conf, str(key)))) + except: + logger.warning(_('Unsupported key value.')) + + if not args or len(args) > 2: + self._help('config') + return + + key = args[0] + val = args[1] if len(args) == 2 else None + period = key.find('.') + if period != -1: + repo_name = key[:period] + key = key[period+1:] + repos = self.base.repos.get_matching(repo_name) + for repo in repos: + print_or_set(key, val, repo) + if not repos: + logger.warning(_('Could not find repository: %s'), + repo_name) + else: + print_or_set(key, val, self.base.conf) + + def _help(self, args=None): + """Output help information. + + :param args: the command to output help information about. If + *args* is an empty, general help will be output. + """ + arg = args[0] if isinstance(args, list) and len(args) > 0 else args + msg = None + + if arg: + if arg == 'config': + msg = _("""{} arg [value] + arg: debuglevel, errorlevel, obsoletes, gpgcheck, assumeyes, exclude, + repo_id.gpgcheck, repo_id.exclude + If no value is given it prints the current value. + If value is given it sets that value.""").format(arg) + + elif arg == 'help': + msg = _("""{} [command] + print help""").format(arg) + + elif arg in ['repo', 'repository']: + msg = _("""{} arg [option] + list: lists repositories and their status. option = [all | id | glob] + enable: enable repositories. option = repository id + disable: disable repositories. option = repository id""").format(arg) + + elif arg == 'resolvedep': + msg = _("""{} + resolve the transaction set""").format(arg) + + elif arg in ['transaction', 'ts']: + msg = _("""{} arg + list: lists the contents of the transaction + reset: reset (zero-out) the transaction + run: run the transaction""").format(arg) + + elif arg == 'run': + msg = _("""{} + run the transaction""").format(arg) + + elif arg in ['exit', 'quit']: + msg = _("""{} + exit the shell""").format(arg) + + if not msg: + self.cli.optparser.print_help() + msg = _("""Shell specific arguments: + +config set config options +help print help +repository (or repo) enable, disable or list repositories +resolvedep resolve the transaction set +transaction (or ts) list, reset or run the transaction set +run resolve and run the transaction set +exit (or quit) exit the shell""") + + print('\n' + msg) + + def _repo(self, args=None): + cmd = args[0] if args else None + + if cmd in ['list', None]: + self.onecmd('repolist ' + ' '.join(args[1:])) + + elif cmd in ['enable', 'disable']: + repos = self.cli.base.repos + fill_sack = False + for repo in args[1::]: + r = repos.get_matching(repo) + if r: + getattr(r, cmd)() + fill_sack = True + else: + logger.critical(_("Error:") + " " + _("Unknown repo: '%s'"), + self.base.output.term.bold(repo)) + if fill_sack: + self.base.fill_sack() + + else: + self._help('repo') + + def _resolve(self, args=None): + try: + self.cli.base.resolve(self.cli.demands.allow_erasing) + except dnf.exceptions.DepsolveError as e: + print(e) + + def _run_script(self, file): + try: + with open(file, 'r') as fd: + lines = fd.readlines() + for line in lines: + if not line.startswith('#'): + self.onecmd(line) + except IOError: + logger.info(_('Error: Cannot open %s for reading'), self.base.output.term.bold(file)) + sys.exit(1) + + def _transaction(self, args=None): + cmd = args[0] if args else None + + if cmd == 'reset': + self._clean() + return + + self._resolve() + if cmd in ['list', None]: + if self.base._transaction: + out = self.base.output.list_transaction(self.base._transaction) + logger.info(out) + + elif cmd == 'run': + try: + self.base.do_transaction() + except dnf.exceptions.Error as e: + logger.error(_("Error:") + " " + ucd(e)) + else: + logger.info(_("Complete!")) + self._clean() + + else: + self._help('transaction') + + def _ts_run(self, args=None): + self._transaction(['run']) + + def _quit(self, args=None): + logger.info(_('Leaving Shell')) + sys.exit(0) diff --git a/dnf/cli/commands/swap.py b/dnf/cli/commands/swap.py new file mode 100644 index 0000000..5f23880 --- /dev/null +++ b/dnf/cli/commands/swap.py @@ -0,0 +1,62 @@ +# +# Copyright (C) 2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ +from dnf.cli import commands + +import dnf.util +import logging + +logger = logging.getLogger("dnf") + + +class SwapCommand(commands.Command): + """A class containing methods needed by the cli to execute the swap command. + """ + + aliases = ('swap',) + summary = _('run an interactive {prog} mod for remove and install one spec').format( + prog=dnf.util.MAIN_PROG_UPPER) + + @staticmethod + def set_argparser(parser): + parser.add_argument('remove_spec', action="store", help=_('The specs that will be removed')) + parser.add_argument('install_spec', action="store", help=_( + 'The specs that will be installed')) + + def configure(self): + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + commands._checkGPGKey(self.base, self.cli) + commands._checkEnabledRepo(self.base, [self.opts.install_spec]) + + def _perform(self, cmd_str, spec): + cmd_cls = self.cli.cli_commands.get(cmd_str) + if cmd_cls is not None: + cmd = cmd_cls(self.cli) + self.cli.optparser.parse_command_args(cmd, [cmd_str, spec]) + cmd.run() + + def run(self): + self._perform('remove', self.opts.remove_spec) + self._perform('install', self.opts.install_spec) diff --git a/dnf/cli/commands/updateinfo.py b/dnf/cli/commands/updateinfo.py new file mode 100644 index 0000000..77923bd --- /dev/null +++ b/dnf/cli/commands/updateinfo.py @@ -0,0 +1,400 @@ +# updateinfo.py +# UpdateInfo CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +"""UpdateInfo CLI command.""" +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +import collections +import fnmatch + +import hawkey +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser +from dnf.i18n import _, exact_width +from dnf.pycomp import unicode + + +def _maxlen(iterable): + """Return maximum length of items in a non-empty iterable.""" + return max(exact_width(item) for item in iterable) + + +class UpdateInfoCommand(commands.Command): + """Implementation of the UpdateInfo command.""" + + TYPE2LABEL = {hawkey.ADVISORY_BUGFIX: _('bugfix'), + hawkey.ADVISORY_ENHANCEMENT: _('enhancement'), + hawkey.ADVISORY_SECURITY: _('security'), + hawkey.ADVISORY_UNKNOWN: _('unknown'), + hawkey.ADVISORY_NEWPACKAGE: _('newpackage')} + + SECURITY2LABEL = {'Critical': _('Critical/Sec.'), + 'Important': _('Important/Sec.'), + 'Moderate': _('Moderate/Sec.'), + 'Low': _('Low/Sec.')} + + direct_commands = {'list-updateinfo' : 'list', + 'list-security' : 'list', + 'list-sec' : 'list', + 'info-updateinfo' : 'info', + 'info-security' : 'info', + 'info-sec' : 'info', + 'summary-updateinfo' : 'summary'} + aliases = ['updateinfo'] + list(direct_commands.keys()) + summary = _('display advisories about packages') + availability_default = 'available' + availabilities = ['installed', 'updates', 'all', availability_default] + + def __init__(self, cli): + """Initialize the command.""" + super(UpdateInfoCommand, self).__init__(cli) + self._installed_query = None + + @staticmethod + def set_argparser(parser): + availability = parser.add_mutually_exclusive_group() + availability.add_argument( + "--available", dest='_availability', const='available', action='store_const', + help=_("advisories about newer versions of installed packages (default)")) + availability.add_argument( + "--installed", dest='_availability', const='installed', action='store_const', + help=_("advisories about equal and older versions of installed packages")) + availability.add_argument( + "--updates", dest='_availability', const='updates', action='store_const', + help=_("advisories about newer versions of those installed packages " + "for which a newer version is available")) + availability.add_argument( + "--all", dest='_availability', const='all', action='store_const', + help=_("advisories about any versions of installed packages")) + cmds = ['summary', 'list', 'info'] + output_format = parser.add_mutually_exclusive_group() + output_format.add_argument("--summary", dest='_spec_action', const='summary', + action='store_const', + help=_('show summary of advisories (default)')) + output_format.add_argument("--list", dest='_spec_action', const='list', + action='store_const', + help=_('show list of advisories')) + output_format.add_argument("--info", dest='_spec_action', const='info', + action='store_const', + help=_('show info of advisories')) + parser.add_argument("--with-cve", dest='with_cve', default=False, + action='store_true', + help=_('show only advisories with CVE reference')) + parser.add_argument("--with-bz", dest='with_bz', default=False, + action='store_true', + help=_('show only advisories with bugzilla reference')) + parser.add_argument('spec', nargs='*', metavar='SPEC', + choices=cmds, default=cmds[0], + action=OptionParser.PkgNarrowCallback, + help=_("Package specification")) + + def configure(self): + """Do any command-specific configuration based on command arguments.""" + self.cli.demands.available_repos = True + self.cli.demands.sack_activation = True + + if self.opts.command[0] in self.direct_commands: + # we were called with direct command + self.opts.spec_action = self.direct_commands[self.opts.command[0]] + else: + if self.opts._spec_action: + self.opts.spec_action = self.opts._spec_action + + if self.opts._availability: + self.opts.availability = self.opts._availability + else: + # yum compatibility - search for all|available|installed|updates in spec[0] + if not self.opts.spec or self.opts.spec[0] not in self.availabilities: + self.opts.availability = self.availability_default + else: + self.opts.availability = self.opts.spec.pop(0) + + # filtering by advisory types (security/bugfix/enhancement/newpackage) + self.opts._advisory_types = set() + if self.opts.bugfix: + self.opts._advisory_types.add(hawkey.ADVISORY_BUGFIX) + if self.opts.enhancement: + self.opts._advisory_types.add(hawkey.ADVISORY_ENHANCEMENT) + if self.opts.newpackage: + self.opts._advisory_types.add(hawkey.ADVISORY_NEWPACKAGE) + if self.opts.security: + self.opts._advisory_types.add(hawkey.ADVISORY_SECURITY) + + # yum compatibility - yum accepts types also as positional arguments + if self.opts.spec: + spec = self.opts.spec.pop(0) + if spec == 'bugfix': + self.opts._advisory_types.add(hawkey.ADVISORY_BUGFIX) + elif spec == 'enhancement': + self.opts._advisory_types.add(hawkey.ADVISORY_ENHANCEMENT) + elif spec in ('security', 'sec'): + self.opts._advisory_types.add(hawkey.ADVISORY_SECURITY) + elif spec == 'newpackage': + self.opts._advisory_types.add(hawkey.ADVISORY_NEWPACKAGE) + elif spec in ('bugzillas', 'bzs'): + self.opts.with_bz = True + elif spec == 'cves': + self.opts.with_cve = True + else: + self.opts.spec.insert(0, spec) + + if self.opts.advisory: + self.opts.spec.extend(self.opts.advisory) + + + def run(self): + """Execute the command with arguments.""" + if self.opts.availability == 'installed': + apkg_adv_insts = self.installed_apkg_adv_insts(self.opts.spec) + description = _('installed') + elif self.opts.availability == 'updates': + apkg_adv_insts = self.updating_apkg_adv_insts(self.opts.spec) + description = _('updates') + elif self.opts.availability == 'all': + apkg_adv_insts = self.all_apkg_adv_insts(self.opts.spec) + description = _('all') + else: + apkg_adv_insts = self.available_apkg_adv_insts(self.opts.spec) + description = _('available') + + if self.opts.spec_action == 'list': + self.display_list(apkg_adv_insts) + elif self.opts.spec_action == 'info': + self.display_info(apkg_adv_insts) + else: + self.display_summary(apkg_adv_insts, description) + + def _newer_equal_installed(self, apackage): + if self._installed_query is None: + self._installed_query = self.base.sack.query().installed().apply() + q = self._installed_query.filter(name=apackage.name, evr__gte=apackage.evr) + return len(q) > 0 + + def _advisory_matcher(self, advisory): + if not self.opts._advisory_types \ + and not self.opts.spec \ + and not self.opts.severity \ + and not self.opts.bugzilla \ + and not self.opts.cves \ + and not self.opts.with_cve \ + and not self.opts.with_bz: + return True + if advisory.type in self.opts._advisory_types: + return True + if any(fnmatch.fnmatchcase(advisory.id, pat) for pat in self.opts.spec): + return True + if self.opts.severity and advisory.severity in self.opts.severity: + return True + if self.opts.bugzilla and any([advisory.match_bug(bug) for bug in self.opts.bugzilla]): + return True + if self.opts.cves and any([advisory.match_cve(cve) for cve in self.opts.cves]): + return True + if self.opts.with_cve: + if any([ref.type == hawkey.REFERENCE_CVE for ref in advisory.references]): + return True + if self.opts.with_bz: + if any([ref.type == hawkey.REFERENCE_BUGZILLA for ref in advisory.references]): + return True + return False + + def _apackage_advisory_installed(self, pkgs_query, cmptype, specs): + """Return (adv. package, advisory, installed) triplets.""" + for apackage in pkgs_query.get_advisory_pkgs(cmptype): + advisory = apackage.get_advisory(self.base.sack) + advisory_match = self._advisory_matcher(advisory) + apackage_match = any(fnmatch.fnmatchcase(apackage.name, pat) + for pat in self.opts.spec) + if advisory_match or apackage_match: + installed = self._newer_equal_installed(apackage) + yield apackage, advisory, installed + + def running_kernel_pkgs(self): + """Return query containing packages of currently running kernel""" + sack = self.base.sack + q = sack.query().filterm(empty=True) + kernel = sack.get_running_kernel() + if kernel: + q = q.union(sack.query().filterm(sourcerpm=kernel.sourcerpm)) + return q + + def available_apkg_adv_insts(self, specs): + """Return available (adv. package, adv., inst.) triplets""" + # check advisories for the latest installed packages + q = self.base.sack.query().installed().latest(1) + # plus packages of the running kernel + q = q.union(self.running_kernel_pkgs().installed()) + return self._apackage_advisory_installed(q, hawkey.GT, specs) + + def installed_apkg_adv_insts(self, specs): + """Return installed (adv. package, adv., inst.) triplets""" + return self._apackage_advisory_installed( + self.base.sack.query().installed(), hawkey.LT | hawkey.EQ, specs) + + def updating_apkg_adv_insts(self, specs): + """Return updating (adv. package, adv., inst.) triplets""" + return self._apackage_advisory_installed( + self.base.sack.query().filterm(upgradable=True), hawkey.GT, specs) + + def all_apkg_adv_insts(self, specs): + """Return installed (adv. package, adv., inst.) triplets""" + return self._apackage_advisory_installed( + self.base.sack.query().installed(), hawkey.LT | hawkey.EQ | hawkey.GT, specs) + + def _summary(self, apkg_adv_insts): + """Make the summary of advisories.""" + # Remove duplicate advisory IDs. We assume that the ID is unique within + # a repository and two advisories with the same IDs in different + # repositories must have the same type. + id2type = {} + for (apkg, advisory, installed) in apkg_adv_insts: + id2type[advisory.id] = advisory.type + if advisory.type == hawkey.ADVISORY_SECURITY: + id2type[(advisory.id, advisory.severity)] = (advisory.type, advisory.severity) + return collections.Counter(id2type.values()) + + def display_summary(self, apkg_adv_insts, description): + """Display the summary of advisories.""" + typ2cnt = self._summary(apkg_adv_insts) + if typ2cnt: + print(_('Updates Information Summary: ') + description) + # Convert types to strings and order the entries. + label_counts = [ + (0, _('New Package notice(s)'), typ2cnt[hawkey.ADVISORY_NEWPACKAGE]), + (0, _('Security notice(s)'), typ2cnt[hawkey.ADVISORY_SECURITY]), + (1, _('Critical Security notice(s)'), + typ2cnt[(hawkey.ADVISORY_SECURITY, 'Critical')]), + (1, _('Important Security notice(s)'), + typ2cnt[(hawkey.ADVISORY_SECURITY, 'Important')]), + (1, _('Moderate Security notice(s)'), + typ2cnt[(hawkey.ADVISORY_SECURITY, 'Moderate')]), + (1, _('Low Security notice(s)'), + typ2cnt[(hawkey.ADVISORY_SECURITY, 'Low')]), + (1, _('Unknown Security notice(s)'), + typ2cnt[(hawkey.ADVISORY_SECURITY, None)]), + (0, _('Bugfix notice(s)'), typ2cnt[hawkey.ADVISORY_BUGFIX]), + (0, _('Enhancement notice(s)'), typ2cnt[hawkey.ADVISORY_ENHANCEMENT]), + (0, _('other notice(s)'), typ2cnt[hawkey.ADVISORY_UNKNOWN])] + width = _maxlen(unicode(v[2]) for v in label_counts if v[2]) + for indent, label, count in label_counts: + if not count: + continue + print(' %*s %s' % (width + 4 * indent, unicode(count), label)) + if self.base.conf.autocheck_running_kernel: + self.cli._check_running_kernel() + + def display_list(self, apkg_adv_insts): + """Display the list of advisories.""" + def inst2mark(inst): + if not self.opts.availability == 'all': + return '' + elif inst: + return 'i ' + else: + return ' ' + + def type2label(typ, sev): + if typ == hawkey.ADVISORY_SECURITY: + return self.SECURITY2LABEL.get(sev, _('Unknown/Sec.')) + else: + return self.TYPE2LABEL.get(typ, _('unknown')) + + nevra_inst_dict = dict() + for apkg, advisory, installed in apkg_adv_insts: + nevra = '%s-%s.%s' % (apkg.name, apkg.evr, apkg.arch) + if self.opts.with_cve or self.opts.with_bz: + for ref in advisory.references: + if ref.type == hawkey.REFERENCE_BUGZILLA and not self.opts.with_bz: + continue + elif ref.type == hawkey.REFERENCE_CVE and not self.opts.with_cve: + continue + nevra_inst_dict.setdefault((nevra, installed), dict())[ref.id] = ( + advisory.type, advisory.severity) + else: + nevra_inst_dict.setdefault((nevra, installed), dict())[advisory.id] = ( + advisory.type, advisory.severity) + + advlist = [] + # convert types to labels, find max len of advisory IDs and types + idw = tlw = 0 + for (nevra, inst), id2type in sorted(nevra_inst_dict.items(), key=lambda x: x[0]): + for aid, atypesev in id2type.items(): + idw = max(idw, len(aid)) + label = type2label(*atypesev) + tlw = max(tlw, len(label)) + advlist.append((inst2mark(inst), aid, label, nevra)) + + for (inst, aid, label, nevra) in advlist: + print('%s%-*s %-*s %s' % (inst, idw, aid, tlw, label, nevra)) + + def display_info(self, apkg_adv_insts): + """Display the details about available advisories.""" + arches = self.base.sack.list_arches() + verbose = self.base.conf.verbose + labels = (_('Update ID'), _('Type'), _('Updated'), _('Bugs'), + _('CVEs'), _('Description'), _('Severity'), _('Rights'), + _('Files'), _('Installed')) + + def advisory2info(advisory, installed): + attributes = [ + [advisory.id], + [self.TYPE2LABEL.get(advisory.type, _('unknown'))], + [unicode(advisory.updated)], + [], + [], + (advisory.description or '').splitlines(), + [advisory.severity], + (advisory.rights or '').splitlines(), + sorted(set(pkg.filename for pkg in advisory.packages + if pkg.arch in arches)), + None] + for ref in advisory.references: + if ref.type == hawkey.REFERENCE_BUGZILLA: + attributes[3].append('{} - {}'.format(ref.id, ref.title or '')) + elif ref.type == hawkey.REFERENCE_CVE: + attributes[4].append(ref.id) + attributes[3].sort() + attributes[4].sort() + if not verbose: + attributes[7] = None + attributes[8] = None + if self.opts.availability == 'all': + attributes[9] = [_('true') if installed else _('false')] + + width = _maxlen(labels) + lines = [] + lines.append('=' * 79) + lines.append(' ' + advisory.title) + lines.append('=' * 79) + for label, atr_lines in zip(labels, attributes): + if atr_lines in (None, [None]): + continue + for i, line in enumerate(atr_lines): + key = label if i == 0 else '' + key_padding = width - exact_width(key) + lines.append('%*s%s: %s' % (key_padding, "", key, line)) + return '\n'.join(lines) + + advisories = set() + for apkg, advisory, installed in apkg_adv_insts: + advisories.add(advisory2info(advisory, installed)) + + print("\n\n".join(sorted(advisories, key=lambda x: x.lower()))) diff --git a/dnf/cli/commands/upgrade.py b/dnf/cli/commands/upgrade.py new file mode 100644 index 0000000..44789c9 --- /dev/null +++ b/dnf/cli/commands/upgrade.py @@ -0,0 +1,130 @@ +# upgrade.py +# Upgrade CLI command. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +import logging + +import dnf.exceptions +import dnf.base +from dnf.cli import commands +from dnf.cli.option_parser import OptionParser +from dnf.i18n import _ + +logger = logging.getLogger('dnf') + + +class UpgradeCommand(commands.Command): + """A class containing methods needed by the cli to execute the + update command. + """ + aliases = ('upgrade', 'update', 'upgrade-to', 'update-to', 'localupdate', 'up') + summary = _('upgrade a package or packages on your system') + + @staticmethod + def set_argparser(parser): + parser.add_argument('packages', nargs='*', help=_('Package to upgrade'), + action=OptionParser.ParseSpecGroupFileCallback, + metavar=_('PACKAGE')) + + def configure(self): + """Verify that conditions are met so that this command can run. + + These include that there are enabled repositories with gpg + keys, and that this command is being run by the root user. + """ + demands = self.cli.demands + demands.sack_activation = True + demands.available_repos = True + demands.resolving = True + demands.root_user = True + commands._checkGPGKey(self.base, self.cli) + if not self.opts.filenames: + commands._checkEnabledRepo(self.base) + self.upgrade_minimal = None + self.all_security = None + self.skipped_grp_specs = None + + def run(self): + query = self.base.sack.query().upgrades() + if self.base.conf.obsoletes: + obsoleted = query.union(self.base.sack.query().installed()) + obsoletes = self.base.sack.query().filter(obsoletes=obsoleted) + query = query.union(obsoletes) + cmp_type = "eq" if self.upgrade_minimal else "gte" + self.cli._populate_update_security_filter(self.opts, query, cmp_type=cmp_type, + all=self.all_security) + + if self.opts.filenames or self.opts.pkg_specs or self.opts.grp_specs: + result = False + result |= self._update_modules() + result |= self._update_files() + result |= self._update_packages() + result |= self._update_groups() + + if result: + return + else: + self.base.upgrade_all() + return + + raise dnf.exceptions.Error(_('No packages marked for upgrade.')) + + def _update_modules(self): + group_specs_num = len(self.opts.grp_specs) + if dnf.base.WITH_MODULES: + module_base = dnf.module.module_base.ModuleBase(self.base) + self.skipped_grp_specs = module_base.upgrade(self.opts.grp_specs) + else: + self.skipped_grp_specs = self.opts.grp_specs + + return len(self.skipped_grp_specs) != group_specs_num + + def _update_files(self): + success = False + if self.opts.filenames: + for pkg in self.base.add_remote_rpms(self.opts.filenames, strict=False, + progress=self.base.output.progress): + try: + self.base.package_upgrade(pkg) + success = True + except dnf.exceptions.MarkingError as e: + logger.info(_('No match for argument: %s'), + self.base.output.term.bold(pkg.location)) + return success + + def _update_packages(self): + success = False + for pkg_spec in self.opts.pkg_specs: + try: + self.base.upgrade(pkg_spec) + success = True + except dnf.exceptions.MarkingError as e: + logger.info(_('No match for argument: %s'), + self.base.output.term.bold(pkg_spec)) + return success + + def _update_groups(self): + if self.skipped_grp_specs: + self.base.read_comps(arch_filter=True) + self.base.env_group_upgrade(self.skipped_grp_specs) + return True + return False diff --git a/dnf/cli/commands/upgrademinimal.py b/dnf/cli/commands/upgrademinimal.py new file mode 100644 index 0000000..a770e2b --- /dev/null +++ b/dnf/cli/commands/upgrademinimal.py @@ -0,0 +1,41 @@ +# +# Copyright (C) 2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ +from dnf.cli.commands.upgrade import UpgradeCommand + + +class UpgradeMinimalCommand(UpgradeCommand): + """A class containing methods needed by the cli to execute the check + command. + """ + + aliases = ('upgrade-minimal', 'update-minimal', 'up-min') + summary = _("upgrade, but only 'newest' package match which fixes a problem" + " that affects your system") + + def configure(self): + UpgradeCommand.configure(self) + + self.upgrade_minimal = True + if not any([self.opts.bugfix, self.opts.enhancement, + self.opts.newpackage, self.opts.security, self.opts.advisory, + self.opts.bugzilla, self.opts.cves, self.opts.severity]): + self.all_security = True diff --git a/dnf/cli/completion_helper.py.in b/dnf/cli/completion_helper.py.in new file mode 100644 index 0000000..3512267 --- /dev/null +++ b/dnf/cli/completion_helper.py.in @@ -0,0 +1,203 @@ +#!@PYTHON_EXECUTABLE@ +# +# This file is part of dnf. +# +# Copyright 2015 (C) Igor Gnatenko +# Copyright 2016 (C) Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA + +import dnf.exceptions +import dnf.cli +import dnf.cli.commands.clean +import sys + + +def filter_list_by_kw(kw, lst): + return filter(lambda k: str(k).startswith(kw), lst) + +def listpkg_to_setstr(pkgs): + return set([str(x) for x in pkgs]) + +class RemoveCompletionCommand(dnf.cli.commands.remove.RemoveCommand): + def __init__(self, args): + super(RemoveCompletionCommand, self).__init__(args) + + def configure(self): + self.cli.demands.root_user = False + self.cli.demands.sack_activation = True + + def run(self): + for pkg in ListCompletionCommand.installed(self.base, self.opts.pkg_specs): + print(str(pkg)) + + +class InstallCompletionCommand(dnf.cli.commands.install.InstallCommand): + def __init__(self, args): + super(InstallCompletionCommand, self).__init__(args) + + def configure(self): + self.cli.demands.root_user = False + self.cli.demands.available_repos = True + self.cli.demands.sack_activation = True + + def run(self): + installed = listpkg_to_setstr(ListCompletionCommand.installed(self.base, + self.opts.pkg_specs)) + available = listpkg_to_setstr(ListCompletionCommand.available(self.base, + self.opts.pkg_specs)) + for pkg in (available - installed): + print(str(pkg)) + + +class ReinstallCompletionCommand(dnf.cli.commands.reinstall.ReinstallCommand): + def __init__(self, args): + super(ReinstallCompletionCommand, self).__init__(args) + + def configure(self): + self.cli.demands.root_user = False + self.cli.demands.available_repos = True + self.cli.demands.sack_activation = True + + def run(self): + installed = listpkg_to_setstr(ListCompletionCommand.installed(self.base, + self.opts.pkg_specs)) + available = listpkg_to_setstr(ListCompletionCommand.available(self.base, + self.opts.pkg_specs)) + for pkg in (installed & available): + print(str(pkg)) + +class ListCompletionCommand(dnf.cli.commands.ListCommand): + def __init__(self, args): + super(ListCompletionCommand, self).__init__(args) + + def run(self): + subcmds = self.pkgnarrows + args = self.opts.packages + action = self.opts.packages_action + if len(args) > 1 and args[1] not in subcmds: + print("\n".join(filter_list_by_kw(args[1], subcmds))) + else: + if action == "installed": + pkgs = self.installed(self.base, args) + elif action == "available": + pkgs = self.available(self.base, args) + elif action == "updates": + pkgs = self.updates(self.base, args) + else: + available = listpkg_to_setstr(self.available(self.base, args)) + installed = listpkg_to_setstr(self.installed(self.base, args)) + pkgs = (available | installed) + if not pkgs: + print("\n".join(filter_list_by_kw(args[0], subcmds))) + return + for pkg in pkgs: + print(str(pkg)) + + @staticmethod + def installed(base, arg): + return base.sack.query().installed().filterm(name__glob="{}*".format(arg[0])) + + @staticmethod + def available(base, arg): + return base.sack.query().available().filterm(name__glob="{}*".format(arg[0])) + + @staticmethod + def updates(base, arg): + return base.check_updates(["{}*".format(arg[0])], print_=False) + + +class RepoListCompletionCommand(dnf.cli.commands.repolist.RepoListCommand): + def __init__(self, args): + super(RepoListCompletionCommand, self).__init__(args) + + def run(self): + args = self.opts + if args.repos_action == "enabled": + print("\n".join(filter_list_by_kw(args.repos[0], + [r.id for r in self.base.repos.iter_enabled()]))) + elif args.repos_action == "disabled": + print("\n".join(filter_list_by_kw(args.repos[0], + [r.id for r in self.base.repos.all() if not r.enabled]))) + elif args.repos_action == "all": + print("\n".join(filter_list_by_kw(args.repos[0], + [r.id for r in self.base.repos.all()]))) + + +class UpgradeCompletionCommand(dnf.cli.commands.upgrade.UpgradeCommand): + def __init__(self, args): + super(UpgradeCompletionCommand, self).__init__(args) + + def configure(self): + self.cli.demands.root_user = False + self.cli.demands.available_repos = True + self.cli.demands.sack_activation = True + + def run(self): + for pkg in ListCompletionCommand.updates(self.base, self.opts.pkg_specs): + print(str(pkg)) + + +class DowngradeCompletionCommand(dnf.cli.commands.downgrade.DowngradeCommand): + def __init__(self, args): + super(DowngradeCompletionCommand, self).__init__(args) + + def configure(self): + self.cli.demands.root_user = False + self.cli.demands.available_repos = True + self.cli.demands.sack_activation = True + + def run(self): + for pkg in ListCompletionCommand.available(self.base, self.opts.pkg_specs).downgrades(): + print(str(pkg)) + + +class CleanCompletionCommand(dnf.cli.commands.clean.CleanCommand): + def __init__(self, args): + super(CleanCompletionCommand, self).__init__(args) + + def run(self): + subcmds = dnf.cli.commands.clean._CACHE_TYPES.keys() + print("\n".join(filter_list_by_kw(self.opts.type[1], subcmds))) + + +def main(args): + base = dnf.cli.cli.BaseCli() + cli = dnf.cli.Cli(base) + if args[0] == "_cmds": + base.init_plugins([], [], cli) + print("\n".join(filter_list_by_kw(args[1], cli.cli_commands))) + return + cli.cli_commands.clear() + cli.register_command(RemoveCompletionCommand) + cli.register_command(InstallCompletionCommand) + cli.register_command(ReinstallCompletionCommand) + cli.register_command(ListCompletionCommand) + cli.register_command(RepoListCompletionCommand) + cli.register_command(UpgradeCompletionCommand) + cli.register_command(DowngradeCompletionCommand) + cli.register_command(CleanCompletionCommand) + cli.configure(args) + try: + cli.run() + except dnf.exceptions.Error: + sys.exit(0) + +if __name__ == "__main__": + try: + main(sys.argv[1:]) + except KeyboardInterrupt: + sys.exit(1) diff --git a/dnf/cli/demand.py b/dnf/cli/demand.py new file mode 100644 index 0000000..f82a75b --- /dev/null +++ b/dnf/cli/demand.py @@ -0,0 +1,65 @@ +# demand.py +# Demand sheet and related classes. +# +# Copyright (C) 2014-2015 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals + + +class _BoolDefault(object): + def __init__(self, default): + self.default = default + self._storing_name = '__%s%x' % (self.__class__.__name__, id(self)) + + def __get__(self, obj, objtype=None): + objdict = obj.__dict__ + if self._storing_name in objdict: + return objdict[self._storing_name] + return self.default + + def __set__(self, obj, val): + objdict = obj.__dict__ + if self._storing_name in objdict: + current_val = objdict[self._storing_name] + if current_val != val: + raise AttributeError('Demand already set.') + objdict[self._storing_name] = val + +class DemandSheet(object): + """Collection of demands that different CLI parts have on other parts. :api""" + + # :api... + allow_erasing = _BoolDefault(False) + available_repos = _BoolDefault(False) + resolving = _BoolDefault(False) + root_user = _BoolDefault(False) + sack_activation = _BoolDefault(False) + load_system_repo = _BoolDefault(True) + success_exit_status = 0 + + cacheonly = _BoolDefault(False) + fresh_metadata = _BoolDefault(True) + freshest_metadata = _BoolDefault(False) + changelogs = _BoolDefault(False) + + transaction_display = None + + # This demand controlls applicability of the plugins that could filter + # repositories packages (e.g. versionlock). + # If it stays None, the demands.resolving is used as a fallback. + plugin_filtering_enabled = _BoolDefault(None) diff --git a/dnf/cli/format.py b/dnf/cli/format.py new file mode 100644 index 0000000..404124f --- /dev/null +++ b/dnf/cli/format.py @@ -0,0 +1,104 @@ +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. + +from __future__ import unicode_literals +from dnf.pycomp import long + +def format_number(number, SI=0, space=' '): + """Return a human-readable metric-like string representation + of a number. + + :param number: the number to be converted to a human-readable form + :param SI: If is 0, this function will use the convention + that 1 kilobyte = 1024 bytes, otherwise, the convention + that 1 kilobyte = 1000 bytes will be used + :param space: string that will be placed between the number + and the SI prefix + :return: a human-readable metric-like string representation of + *number* + """ + + # copied from from urlgrabber.progress + symbols = [ ' ', # (none) + 'k', # kilo + 'M', # mega + 'G', # giga + 'T', # tera + 'P', # peta + 'E', # exa + 'Z', # zetta + 'Y'] # yotta + + if SI: step = 1000.0 + else: step = 1024.0 + + thresh = 999 + depth = 0 + max_depth = len(symbols) - 1 + + if number is None: + number = 0.0 + + # we want numbers between 0 and thresh, but don't exceed the length + # of our list. In that event, the formatting will be screwed up, + # but it'll still show the right number. + while number > thresh and depth < max_depth: + depth = depth + 1 + number = number / step + + if isinstance(number, int) or isinstance(number, long): + format = '%i%s%s' + elif number < 9.95: + # must use 9.95 for proper sizing. For example, 9.99 will be + # rounded to 10.0 with the .1f format string (which is too long) + format = '%.1f%s%s' + else: + format = '%.0f%s%s' + + return(format % (float(number or 0), space, symbols[depth])) + +def format_time(seconds, use_hours=0): + """Return a human-readable string representation of a number + of seconds. The string will show seconds, minutes, and + optionally hours. + + :param seconds: the number of seconds to convert to a + human-readable form + :param use_hours: If use_hours is 0, the representation will + be in minutes and seconds. Otherwise, it will be in hours, + minutes, and seconds + :return: a human-readable string representation of *seconds* + """ + + # copied from from urlgrabber.progress + if seconds is None or seconds < 0: + if use_hours: return '--:--:--' + else: return '--:--' + elif seconds == float('inf'): + return 'Infinite' + else: + seconds = int(seconds) + minutes = seconds // 60 + seconds = seconds % 60 + if use_hours: + hours = minutes // 60 + minutes = minutes % 60 + return '%02i:%02i:%02i' % (hours, minutes, seconds) + else: + return '%02i:%02i' % (minutes, seconds) + +def indent_block(s): + return '\n'.join(' ' + s for s in s.splitlines()) diff --git a/dnf/cli/main.py b/dnf/cli/main.py new file mode 100644 index 0000000..af89768 --- /dev/null +++ b/dnf/cli/main.py @@ -0,0 +1,198 @@ +# Copyright 2005 Duke University +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +Entrance point for the yum command line interface. +""" + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.conf import Conf +from dnf.cli.cli import Cli +from dnf.cli.option_parser import OptionParser +from dnf.i18n import ucd +from dnf.cli.utils import show_lock_owner +from dnf.i18n import _ + +import dnf.cli +import dnf.cli.cli +import dnf.cli.option_parser +import dnf.exceptions +import dnf.i18n +import dnf.logging +import dnf.util +import errno +import logging +import os +import os.path +import sys + +logger = logging.getLogger("dnf") + + +def ex_IOError(e): + logger.log(dnf.logging.SUBDEBUG, '', exc_info=True) + logger.critical(ucd(e)) + return 1 + + +def ex_Error(e): + logger.log(dnf.logging.SUBDEBUG, '', exc_info=True) + if e.value is not None: + logger.critical(_('Error: %s'), ucd(e)) + return 1 + + +def main(args, conf_class=Conf, cli_class=Cli, option_parser_class=OptionParser): + try: + dnf.i18n.setup_stdout() + with dnf.cli.cli.BaseCli(conf_class()) as base: + return _main(base, args, cli_class, option_parser_class) + except dnf.exceptions.ProcessLockError as e: + logger.critical(e.value) + show_lock_owner(e.pid) + return 200 + except dnf.exceptions.LockError as e: + logger.critical(e.value) + return 200 + except dnf.exceptions.DepsolveError as e: + return 1 + except dnf.exceptions.Error as e: + return ex_Error(e) + except IOError as e: + return ex_IOError(e) + except KeyboardInterrupt as e: + logger.critical('{}: {}'.format(type(e).__name__, _("Terminated."))) + return 1 + + +def _main(base, args, cli_class, option_parser): + """Run the dnf program from a command line interface.""" + + # our core object for the cli + base._logging._presetup() + cli = cli_class(base) + + # do our cli parsing and config file setup + # also sanity check the things being passed on the cli + try: + cli.configure(list(map(ucd, args)), option_parser()) + except (IOError, OSError) as e: + return ex_IOError(e) + + return cli_run(cli, base) + + +def cli_run(cli, base): + # Try to open the current directory to see if we have + # read and execute access. If not, chdir to / + try: + f = open(".") + except IOError as e: + if e.errno == errno.EACCES: + logger.critical(_('No read/execute access in current directory, moving to /')) + os.chdir("/") + else: + f.close() + + try: + cli.run() + except dnf.exceptions.LockError: + raise + except (IOError, OSError) as e: + return ex_IOError(e) + + if cli.demands.resolving: + try: + ret = resolving(cli, base) + except dnf.exceptions.DepsolveError as e: + ex_Error(e) + msg = "" + if not cli.demands.allow_erasing and base._goal.problem_conflicts(available=True): + msg += _("try to add '{}' to command line to replace conflicting " + "packages").format("--allowerasing") + if cli.base.conf.strict: + if not msg: + msg += _("try to add '{}' to skip uninstallable packages").format( + "--skip-broken") + else: + msg += _(" or '{}' to skip uninstallable packages").format("--skip-broken") + if cli.base.conf.best: + prio = cli.base.conf._get_priority("best") + if prio <= dnf.conf.PRIO_MAINCONFIG: + if not msg: + msg += _("try to add '{}' to use not only best candidate packages").format( + "--nobest") + else: + msg += _(" or '{}' to use not only best candidate packages").format( + "--nobest") + if msg: + logger.info("({})".format(msg)) + raise + if ret: + return ret + + cli.command.run_transaction() + return cli.demands.success_exit_status + + +def resolving(cli, base): + """Perform the depsolve, download and RPM transaction stage.""" + + if base.transaction is None: + base.resolve(cli.demands.allow_erasing) + logger.info(_('Dependencies resolved.')) + + # Run the transaction + displays = [] + if cli.demands.transaction_display is not None: + displays.append(cli.demands.transaction_display) + try: + base.do_transaction(display=displays) + except dnf.cli.CliError as exc: + logger.error(ucd(exc)) + return 1 + except dnf.exceptions.TransactionCheckError as err: + for msg in cli.command.get_error_output(err): + logger.critical(msg) + return 1 + except IOError as e: + return ex_IOError(e) + else: + logger.info(_('Complete!')) + return 0 + + +def user_main(args, exit_code=False): + """Call one of the multiple main() functions based on environment variables. + + :param args: command line arguments passed into yum + :param exit_code: if *exit_code* is True, this function will exit + python with its exit code when it has finished executing. + Otherwise, it will return its exit code. + :return: the exit code from dnf.yum execution + """ + + errcode = main(args) + if exit_code: + sys.exit(errcode) + return errcode + + +if __name__ == "__main__": + user_main(sys.argv[1:], exit_code=True) diff --git a/dnf/cli/option_parser.py b/dnf/cli/option_parser.py new file mode 100644 index 0000000..9543b54 --- /dev/null +++ b/dnf/cli/option_parser.py @@ -0,0 +1,426 @@ +# optparse.py +# CLI options parser. +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals +from dnf.i18n import _ +from dnf.util import _parse_specs + +import argparse +import dnf.exceptions +import dnf.util +import dnf.rpm +import dnf.yum.misc +import logging +import os.path +import re +import sys + +logger = logging.getLogger("dnf") + + +class MultilineHelpFormatter(argparse.HelpFormatter): + def _split_lines(self, text, width): + if '\n' in text: + return text.splitlines() + return super(MultilineHelpFormatter, self)._split_lines(text, width) + +class OptionParser(argparse.ArgumentParser): + """ArgumentParser like class to do things the "yum way".""" + + def __init__(self, reset_usage=True): + super(OptionParser, self).__init__(add_help=False, + formatter_class=MultilineHelpFormatter) + self.command_positional_parser = None + self.command_group = None + self._add_general_options() + if reset_usage: + self._cmd_usage = {} # names, summary for dnf commands, to build usage + self._cmd_groups = set() # cmd groups added (main, plugin) + + def error(self, msg): + """Output an error message, and exit the program. + This method overrides standard argparser's error + so that error output goes to the logger. + + :param msg: the error message to output + """ + self.print_usage() + logger.critical(_("Command line error: %s"), msg) + sys.exit(1) + + class _RepoCallback(argparse.Action): + def __call__(self, parser, namespace, values, opt_str): + operation = 'disable' if opt_str == '--disablerepo' else 'enable' + l = getattr(namespace, self.dest) + l.extend((x, operation) for x in re.split(r'\s*[,\s]\s*', values)) + + class _RepoCallbackEnable(argparse.Action): + def __call__(self, parser, namespace, values, opt_str): + namespace.repos_ed.append((values[0], 'enable')) + setattr(namespace, 'reponame', values) + + class _SplitCallback(argparse._AppendAction): + """ Split all strings in seq, at "," and whitespace. + Returns a new list. """ + SPLITTER = r'\s*[,\s]\s*' + + def __call__(self, parser, namespace, values, opt_str): + for val in re.split(self.SPLITTER, values): + super(OptionParser._SplitCallback, + self).__call__(parser, namespace, val, opt_str) + + class _SplitExtendDictCallback(argparse.Action): + """ Split string at "," or whitespace to (key, value). + Extends dict with {key: value}.""" + def __call__(self, parser, namespace, values, opt_str): + try: + key, val = values.split(',') + if not key or not val: + raise ValueError + except ValueError: + msg = _('bad format: %s') % values + raise argparse.ArgumentError(self, msg) + dct = getattr(namespace, self.dest) + dct[key] = val + + class _SetoptsCallback(argparse.Action): + """ Parse setopts arguments and put them into main_ + and repo_.""" + def __call__(self, parser, namespace, values, opt_str): + vals = values.split('=') + if len(vals) > 2: + logger.warning(_("Setopt argument has multiple values: %s"), values) + return + if len(vals) < 2: + logger.warning(_("Setopt argument has no value: %s"), values) + return + k, v = vals + period = k.rfind('.') + if period != -1: + repo = k[:period] + k = k[period+1:] + if hasattr(namespace, 'repo_setopts'): + repoopts = namespace.repo_setopts + else: + repoopts = {} + repoopts.setdefault(repo, {}).setdefault(k, []).append(v) + setattr(namespace, 'repo_' + self.dest, repoopts) + else: + if hasattr(namespace, 'main_setopts'): + mainopts = namespace.main_setopts + else: + mainopts = {} + mainopts.setdefault(k, []).append(v) + setattr(namespace, 'main_' + self.dest, mainopts) + + class ParseSpecGroupFileCallback(argparse.Action): + def __call__(self, parser, namespace, values, opt_str): + _parse_specs(namespace, values) + + class PkgNarrowCallback(argparse.Action): + def __init__(self, *args, **kwargs): + self.pkgnarrow = {} + try: + for k in ['choices', 'default']: + self.pkgnarrow[k] = kwargs[k] + del kwargs[k] + except KeyError as e: + raise TypeError("%s() missing mandatory argument %s" + % (self.__class__.__name__, e)) + kwargs['default'] = [] + super(OptionParser.PkgNarrowCallback, self).__init__(*args, **kwargs) + + def __call__(self, parser, namespace, values, opt_str): + dest_action = self.dest + '_action' + if not values or values[0] not in self.pkgnarrow['choices']: + narrow = self.pkgnarrow['default'] + else: + narrow = values.pop(0) + setattr(namespace, dest_action, narrow) + setattr(namespace, self.dest, values) + + class ForceArchAction(argparse.Action): + def __call__(self, parser, namespace, values, opt_str): + namespace.ignorearch = True + namespace.arch = values + + def _add_general_options(self): + """ Standard options known to all dnf subcommands. """ + # All defaults need to be a None, so we can always tell whether the user + # has set something or whether we are getting a default. + general_grp = self.add_argument_group(_('General {prog} options'.format( + prog=dnf.util.MAIN_PROG_UPPER))) + general_grp.add_argument("-c", "--config", dest="config_file_path", + default=None, metavar='[config file]', + help=_("config file location")) + general_grp.add_argument("-q", "--quiet", dest="quiet", + action="store_true", default=None, + help=_("quiet operation")) + general_grp.add_argument("-v", "--verbose", action="store_true", + default=None, help=_("verbose operation")) + general_grp.add_argument("--version", action="store_true", default=None, + help=_("show {prog} version and exit").format( + prog=dnf.util.MAIN_PROG_UPPER)) + general_grp.add_argument("--installroot", help=_("set install root"), + metavar='[path]') + general_grp.add_argument("--nodocs", action="store_const", const=['nodocs'], dest='tsflags', + help=_("do not install documentations")) + general_grp.add_argument("--noplugins", action="store_false", + default=None, dest='plugins', + help=_("disable all plugins")) + general_grp.add_argument("--enableplugin", dest="enableplugin", + default=[], action=self._SplitCallback, + help=_("enable plugins by name"), + metavar='[plugin]') + general_grp.add_argument("--disableplugin", dest="disableplugin", + default=[], action=self._SplitCallback, + help=_("disable plugins by name"), + metavar='[plugin]') + general_grp.add_argument("--releasever", default=None, + help=_("override the value of $releasever" + " in config and repo files")) + general_grp.add_argument("--setopt", dest="setopts", default=[], + action=self._SetoptsCallback, + help=_("set arbitrary config and repo options")) + general_grp.add_argument("--skip-broken", dest="skip_broken", action="store_true", + default=None, + help=_("resolve depsolve problems by skipping packages")) + general_grp.add_argument('-h', '--help', '--help-cmd', + action="store_true", dest='help', + help=_("show command help")) + + general_grp.add_argument('--allowerasing', action='store_true', + default=None, + help=_('allow erasing of installed packages to ' + 'resolve dependencies')) + best_group = general_grp.add_mutually_exclusive_group() + best_group.add_argument("-b", "--best", action="store_true", dest='best', default=None, + help=_("try the best available package versions in transactions.")) + best_group.add_argument("--nobest", action="store_false", dest='best', + help=_("do not limit the transaction to the best candidate")) + general_grp.add_argument("-C", "--cacheonly", dest="cacheonly", + action="store_true", default=None, + help=_("run entirely from system cache, " + "don't update cache")) + general_grp.add_argument("-R", "--randomwait", dest="sleeptime", type=int, + default=None, metavar='[minutes]', + help=_("maximum command wait time")) + general_grp.add_argument("-d", "--debuglevel", dest="debuglevel", + metavar='[debug level]', default=None, + help=_("debugging output level"), type=int) + general_grp.add_argument("--debugsolver", + action="store_true", default=None, + help=_("dumps detailed solving results into" + " files")) + general_grp.add_argument("--showduplicates", dest="showdupesfromrepos", + action="store_true", default=None, + help=_("show duplicates, in repos, " + "in list/search commands")) + general_grp.add_argument("-e", "--errorlevel", default=None, type=int, + help=_("error output level")) + general_grp.add_argument("--obsoletes", default=None, dest="obsoletes", + action="store_true", + help=_("enables {prog}'s obsoletes processing logic " + "for upgrade or display capabilities that " + "the package obsoletes for info, list and " + "repoquery").format(prog=dnf.util.MAIN_PROG)) + general_grp.add_argument("--rpmverbosity", default=None, + help=_("debugging output level for rpm"), + metavar='[debug level name]') + general_grp.add_argument("-y", "--assumeyes", action="store_true", + default=None, help=_("automatically answer yes" + " for all questions")) + general_grp.add_argument("--assumeno", action="store_true", + default=None, help=_("automatically answer no" + " for all questions")) + general_grp.add_argument("--enablerepo", action=self._RepoCallback, + dest='repos_ed', default=[], metavar='[repo]', + help=_("Enable additional repositories. List option. " + "Supports globs, can be specified multiple times.")) + repo_group = general_grp.add_mutually_exclusive_group() + repo_group.add_argument("--disablerepo", action=self._RepoCallback, + dest='repos_ed', default=[], metavar='[repo]', + help=_("Disable repositories. List option. " + "Supports globs, can be specified multiple times.")) + repo_group.add_argument('--repo', '--repoid', metavar='[repo]', dest='repo', + action=self._SplitCallback, default=[], + help=_('enable just specific repositories by an id or a glob, ' + 'can be specified multiple times')) + enable_group = general_grp.add_mutually_exclusive_group() + enable_group.add_argument("--enable", "--set-enabled", default=False, + dest="set_enabled", action="store_true", + help=_("enable repos with config-manager " + "command (automatically saves)")) + enable_group.add_argument("--disable", "--set-disabled", default=False, + dest="set_disabled", action="store_true", + help=_("disable repos with config-manager " + "command (automatically saves)")) + general_grp.add_argument("-x", "--exclude", "--excludepkgs", default=[], + dest='excludepkgs', action=self._SplitCallback, + help=_("exclude packages by name or glob"), + metavar='[package]') + general_grp.add_argument("--disableexcludes", "--disableexcludepkgs", + default=[], dest="disable_excludes", + action=self._SplitCallback, + help=_("disable excludepkgs"), + metavar='[repo]') + general_grp.add_argument("--repofrompath", default={}, + action=self._SplitExtendDictCallback, + metavar='[repo,path]', + help=_("label and path to an additional repository to use (same " + "path as in a baseurl), can be specified multiple times.")) + general_grp.add_argument("--noautoremove", action="store_false", + default=None, dest='clean_requirements_on_remove', + help=_("disable removal of dependencies that are no longer used")) + general_grp.add_argument("--nogpgcheck", action="store_false", + default=None, dest='gpgcheck', + help=_("disable gpg signature checking (if RPM policy allows)")) + general_grp.add_argument("--color", dest="color", default=None, + help=_("control whether color is used")) + general_grp.add_argument("--refresh", dest="freshest_metadata", + action="store_true", + help=_("set metadata as expired before running" + " the command")) + general_grp.add_argument("-4", dest="ip_resolve", default=None, + help=_("resolve to IPv4 addresses only"), + action="store_const", const='ipv4') + general_grp.add_argument("-6", dest="ip_resolve", default=None, + help=_("resolve to IPv6 addresses only"), + action="store_const", const='ipv6') + general_grp.add_argument("--destdir", "--downloaddir", dest="destdir", default=None, + help=_("set directory to copy packages to")) + general_grp.add_argument("--downloadonly", dest="downloadonly", + action="store_true", default=False, + help=_("only download packages")) + general_grp.add_argument("--comment", dest="comment", default=None, + help=_("add a comment to transaction")) + # Updateinfo options... + general_grp.add_argument("--bugfix", action="store_true", + help=_("Include bugfix relevant packages, " + "in updates")) + general_grp.add_argument("--enhancement", action="store_true", + help=_("Include enhancement relevant packages," + " in updates")) + general_grp.add_argument("--newpackage", action="store_true", + help=_("Include newpackage relevant packages," + " in updates")) + general_grp.add_argument("--security", action="store_true", + help=_("Include security relevant packages, " + "in updates")) + general_grp.add_argument("--advisory", "--advisories", dest="advisory", + default=[], action=self._SplitCallback, + help=_("Include packages needed to fix the " + "given advisory, in updates")) + general_grp.add_argument("--bz", "--bzs", default=[], dest="bugzilla", + action=self._SplitCallback, help=_( + "Include packages needed to fix the given BZ, in updates")) + general_grp.add_argument("--cve", "--cves", default=[], dest="cves", + action=self._SplitCallback, + help=_("Include packages needed to fix the given CVE, in updates")) + general_grp.add_argument( + "--sec-severity", "--secseverity", + choices=['Critical', 'Important', 'Moderate', 'Low'], default=[], + dest="severity", action=self._SplitCallback, help=_( + "Include security relevant packages matching the severity, " + "in updates")) + general_grp.add_argument("--forcearch", metavar="ARCH", + dest=argparse.SUPPRESS, + action=self.ForceArchAction, + choices=sorted(dnf.rpm._BASEARCH_MAP.keys()), + help=_("Force the use of an architecture")) + general_grp.add_argument('command', nargs='?', help=argparse.SUPPRESS) + + def _add_cmd_usage(self, cmd, group): + """ store usage info about a single dnf command.""" + summary = dnf.i18n.ucd(cmd.summary) + name = dnf.i18n.ucd(cmd.aliases[0]) + if not name in self._cmd_usage: + self._cmd_usage[name] = (group, summary) + self._cmd_groups.add(group) + + def add_commands(self, cli_cmds, group): + """ store name & summary for dnf commands + + The stored information is used build usage information + grouped by build-in & plugin commands. + """ + for cmd in set(cli_cmds.values()): + self._add_cmd_usage(cmd, group) + + def get_usage(self): + """ get the usage information to show the user. """ + desc = {'main': _('List of Main Commands:'), + 'plugin': _('List of Plugin Commands:')} + usage = '%s [options] COMMAND\n' % dnf.util.MAIN_PROG + for grp in ['main', 'plugin']: + if not grp in self._cmd_groups: + # dont add plugin usage, if we dont have plugins + continue + usage += "\n%s\n\n" % desc[grp] + for name in sorted(self._cmd_usage.keys()): + group, summary = self._cmd_usage[name] + if group == grp: + usage += "%-25s %s\n" % (name, summary) + return usage + + def _add_command_options(self, command): + self.prog = "%s %s" % (dnf.util.MAIN_PROG, command._basecmd) + self.description = command.summary + self.command_positional_parser = argparse.ArgumentParser(self.prog, add_help=False) + self.command_positional_parser.print_usage = self.print_usage + self.command_positional_parser._positionals.title = None + self.command_group = self.add_argument_group( + '{} command-specific options'.format(command._basecmd.capitalize())) + self.command_group.add_argument = self.cmd_add_argument + self.command_group._command = command._basecmd + command.set_argparser(self.command_group) + + def cmd_add_argument(self, *args, **kwargs): + if all([(arg[0] in self.prefix_chars) for arg in args]): + return type(self.command_group).add_argument(self.command_group, *args, **kwargs) + else: + return self.command_positional_parser.add_argument(*args, **kwargs) + + def parse_main_args(self, args): + namespace, _unused_args = self.parse_known_args(args) + return namespace + + def parse_command_args(self, command, args): + self._add_command_options(command) + namespace, unused_args = self.parse_known_args(args) + namespace = self.command_positional_parser.parse_args(unused_args, namespace) + command.opts = namespace + return command.opts + + def print_usage(self, file_=None): + if self.command_positional_parser: + self._actions += self.command_positional_parser._actions + super(OptionParser, self).print_usage(file_) + + def print_help(self, command=None): + # pylint: disable=W0212 + if command: + if not self.command_group or self.command_group._command != command._basecmd: + self._add_command_options(command) + self._actions += self.command_positional_parser._actions + self._action_groups.append(self.command_positional_parser._positionals) + else: + self.usage = self.get_usage() + super(OptionParser, self).print_help() diff --git a/dnf/cli/output.py b/dnf/cli/output.py new file mode 100644 index 0000000..a03df61 --- /dev/null +++ b/dnf/cli/output.py @@ -0,0 +1,2312 @@ +# Copyright 2005 Duke University +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +"""Handle actual output from the cli.""" + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +from copy import deepcopy +import fnmatch +import hawkey +import itertools +import libdnf.transaction +import logging +import operator +import pwd +import re +import sys +import time + +from dnf.cli.format import format_number, format_time +from dnf.i18n import _, C_, P_, ucd, fill_exact_width, textwrap_fill, exact_width, select_short_long +from dnf.pycomp import xrange, basestring, long, unicode, sys_maxsize +from dnf.yum.rpmtrans import LoggingTransactionDisplay +from dnf.db.history import MergedTransactionWrapper +import dnf.base +import dnf.callback +import dnf.cli.progress +import dnf.cli.term +import dnf.conf +import dnf.crypto +import dnf.i18n +import dnf.transaction +import dnf.util +import dnf.yum.misc + +logger = logging.getLogger('dnf') + +def _make_lists(transaction, goal): + b = dnf.util.Bunch({ + 'downgraded': [], + 'erased': [], + 'erased_clean': [], + 'erased_dep': [], + 'installed': [], + 'installed_group': [], + 'installed_dep': [], + 'installed_weak': [], + 'reinstalled': [], + 'upgraded': [], + 'failed': [], + }) + + for tsi in transaction: + if tsi.state == libdnf.transaction.TransactionItemState_ERROR: + b.failed.append(tsi) + elif tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADE: + b.downgraded.append(tsi) + elif tsi.action == libdnf.transaction.TransactionItemAction_INSTALL: + if tsi.reason == libdnf.transaction.TransactionItemReason_GROUP: + b.installed_group.append(tsi) + elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY: + b.installed_dep.append(tsi) + elif tsi.reason == libdnf.transaction.TransactionItemReason_WEAK_DEPENDENCY: + b.installed_weak.append(tsi) + else: + # TransactionItemReason_USER + b.installed.append(tsi) + elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALL: + b.reinstalled.append(tsi) + elif tsi.action == libdnf.transaction.TransactionItemAction_REMOVE: + if tsi.reason == libdnf.transaction.TransactionItemReason_CLEAN: + b.erased_clean.append(tsi) + elif tsi.reason == libdnf.transaction.TransactionItemReason_DEPENDENCY: + b.erased_dep.append(tsi) + else: + b.erased.append(tsi) + elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADE: + b.upgraded.append(tsi) + + return b + + +def _spread_in_columns(cols_count, label, lst): + left = itertools.chain((label,), itertools.repeat('')) + lst_length = len(lst) + right_count = cols_count - 1 + missing_items = -lst_length % right_count + if not lst_length: + lst = itertools.repeat('', right_count) + elif missing_items: + lst.extend(('',) * missing_items) + lst_iter = iter(lst) + return list(zip(left, *[lst_iter] * right_count)) + + +class Output(object): + """Main output class for the yum command line.""" + + GRP_PACKAGE_INDENT = ' ' * 3 + FILE_PROVIDE_RE = re.compile(r'^\*{0,2}/') + + def __init__(self, base, conf): + self.conf = conf + self.base = base + self.term = dnf.cli.term.Term() + self.progress = None + + def _banner(self, col_data, row): + term_width = self.term.columns + rule = '%s' % '=' * term_width + header = self.fmtColumns(zip(row, col_data), ' ') + return rule, header, rule + + def _col_widths(self, rows): + col_data = [dict() for _ in rows[0]] + for row in rows: + for (i, val) in enumerate(row): + col_dct = col_data[i] + length = len(val) + col_dct[length] = col_dct.get(length, 0) + 1 + cols = self.calcColumns(col_data, None, indent=' ') + # align to the left + return list(map(operator.neg, cols)) + + def _highlight(self, highlight): + hibeg = '' + hiend = '' + if not highlight: + pass + elif not isinstance(highlight, basestring) or highlight == 'bold': + hibeg = self.term.MODE['bold'] + elif highlight == 'normal': + pass # Minor opt. + else: + # Turn a string into a specific output: colour, bold, etc. + for high in highlight.replace(',', ' ').split(): + if high == 'normal': + hibeg = '' + elif high in self.term.MODE: + hibeg += self.term.MODE[high] + elif high in self.term.FG_COLOR: + hibeg += self.term.FG_COLOR[high] + elif (high.startswith('fg:') and + high[3:] in self.term.FG_COLOR): + hibeg += self.term.FG_COLOR[high[3:]] + elif (high.startswith('bg:') and + high[3:] in self.term.BG_COLOR): + hibeg += self.term.BG_COLOR[high[3:]] + + if hibeg: + hiend = self.term.MODE['normal'] + return (hibeg, hiend) + + def _sub_highlight(self, haystack, highlight, needles, **kwds): + hibeg, hiend = self._highlight(highlight) + return self.term.sub(haystack, hibeg, hiend, needles, **kwds) + + @staticmethod + def _calc_columns_spaces_helps(current, data_tups, left): + """ Spaces left on the current field will help how many pkgs? """ + ret = 0 + for tup in data_tups: + if left < (tup[0] - current): + break + ret += tup[1] + return ret + + @property + def history(self): + return self.base.history + + @property + def sack(self): + return self.base.sack + + def calcColumns(self, data, columns=None, remainder_column=0, + total_width=None, indent=''): + """Dynamically calculate the widths of the columns that the + fields in data should be placed into for output. + + :param data: a list of dictionaries that represent the data to + be output. Each dictionary in the list corresponds to a + column of output. The keys of the dictionary are the + lengths of the items to be output, and the value associated + with a key is the number of items of that length. + :param columns: a list containing the minimum amount of space + that must be allocated for each row. This can be used to + ensure that there is space available in a column if, for + example, the actual lengths of the items being output + cannot be given in *data* + :param remainder_column: number of the column to receive a few + extra spaces that may remain after other allocation has + taken place + :param total_width: the total width of the output. + self.term.real_columns is used by default + :param indent: string that will be prefixed to a line of + output to create e.g. an indent + :return: a list of the widths of the columns that the fields + in data should be placed into for output + """ + cols = len(data) + # Convert the data to ascending list of tuples, (field_length, pkgs) + pdata = data + data = [None] * cols # Don't modify the passed in data + for d in range(0, cols): + data[d] = sorted(pdata[d].items()) + + if total_width is None: + total_width = self.term.real_columns + + # i'm not able to get real terminal width so i'm probably + # running in non interactive terminal (pipe to grep, redirect to file...) + # avoid splitting lines to enable filtering output + if not total_width: + full_columns = [] + for col in data: + if col: + full_columns.append(col[-1][0]) + else: + full_columns.append(0) + full_columns[0] += len(indent) + # if possible, try to keep default width (usually 80 columns) + default_width = self.term.columns + if sum(full_columns) > default_width: + return full_columns + total_width = default_width + + # We start allocating 1 char to everything but the last column, and a + # space between each (again, except for the last column). Because + # at worst we are better with: + # |one two three| + # | four | + # ...than: + # |one two three| + # | f| + # |our | + # ...the later being what we get if we pre-allocate the last column, and + # thus. the space, due to "three" overflowing it's column by 2 chars. + if columns is None: + columns = [1] * (cols - 1) + columns.append(0) + + total_width -= (sum(columns) + (cols - 1) + exact_width(indent)) + if not columns[-1]: + total_width += 1 + while total_width > 0: + # Find which field all the spaces left will help best + helps = 0 + val = 0 + for d in xrange(0, cols): + thelps = self._calc_columns_spaces_helps(columns[d], data[d], + total_width) + if not thelps: + continue + # We prefer to overflow: the last column, and then earlier + # columns. This is so that in the best case (just overflow the + # last) ... grep still "works", and then we make it prettier. + if helps and (d == (cols - 1)) and (thelps / 2) < helps: + continue + if thelps < helps: + continue + helps = thelps + val = d + + # If we found a column to expand, move up to the next level with + # that column and start again with any remaining space. + if helps: + diff = data[val].pop(0)[0] - columns[val] + if not columns[val] and (val == (cols - 1)): + # If we are going from 0 => N on the last column, take 1 + # for the space before the column. + total_width -= 1 + columns[val] += diff + total_width -= diff + continue + + overflowed_columns = 0 + for d in xrange(0, cols): + if not data[d]: + continue + overflowed_columns += 1 + if overflowed_columns: + # Split the remaining spaces among each overflowed column + # equally + norm = total_width // overflowed_columns + for d in xrange(0, cols): + if not data[d]: + continue + columns[d] += norm + total_width -= norm + + # Split the remaining spaces among each column equally, except the + # last one. And put the rest into the remainder column + cols -= 1 + norm = total_width // cols + for d in xrange(0, cols): + columns[d] += norm + columns[remainder_column] += total_width - (cols * norm) + total_width = 0 + + return columns + + @staticmethod + def _fmt_column_align_width(width): + """Returns tuple of (align_left, width)""" + if width < 0: + return (True, -width) + return (False, width) + + def _col_data(self, col_data): + assert len(col_data) == 2 or len(col_data) == 3 + if len(col_data) == 2: + (val, width) = col_data + hibeg = hiend = '' + if len(col_data) == 3: + (val, width, highlight) = col_data + (hibeg, hiend) = self._highlight(highlight) + return (ucd(val), width, hibeg, hiend) + + def fmtColumns(self, columns, msg=u'', end=u''): + """Return a row of data formatted into a string for output. + Items can overflow their columns. + + :param columns: a list of tuples containing the data to + output. Each tuple contains first the item to be output, + then the amount of space allocated for the column, and then + optionally a type of highlighting for the item + :param msg: a string to begin the line of output with + :param end: a string to end the line of output with + :return: a row of data formatted into a string for output + """ + columns = list(columns) + total_width = len(msg) + data = [] + for col_data in columns[:-1]: + (val, width, hibeg, hiend) = self._col_data(col_data) + + if not width: # Don't count this column, invisible text + msg += u"%s" + data.append(val) + continue + + (align_left, width) = self._fmt_column_align_width(width) + val_width = exact_width(val) + if val_width <= width: + # Don't use fill_exact_width() because it sucks performance + # wise for 1,000s of rows. Also allows us to use len(), when + # we can. + msg += u"%s%s%s%s " + if align_left: + data.extend([hibeg, val, " " * (width - val_width), hiend]) + else: + data.extend([hibeg, " " * (width - val_width), val, hiend]) + else: + msg += u"%s%s%s\n" + " " * (total_width + width + 1) + data.extend([hibeg, val, hiend]) + total_width += width + total_width += 1 + (val, width, hibeg, hiend) = self._col_data(columns[-1]) + (align_left, width) = self._fmt_column_align_width(width) + val = fill_exact_width(val, width, left=align_left, + prefix=hibeg, suffix=hiend) + msg += u"%%s%s" % end + data.append(val) + return msg % tuple(data) + + def simpleList(self, pkg, ui_overflow=False, indent='', highlight=False, + columns=None): + """Print a package as a line. + + :param pkg: the package to be printed + :param ui_overflow: unused + :param indent: string to be prefixed onto the line to provide + e.g. an indent + :param highlight: highlighting options for the name of the + package + :param columns: tuple containing the space allocated for each + column of output. The columns are the package name, version, + and repository + """ + if columns is None: + columns = (-40, -22, -16) # Old default + na = '%s%s.%s' % (indent, pkg.name, pkg.arch) + hi_cols = [highlight, 'normal', 'normal'] + + columns = zip((na, pkg.evr, pkg._from_repo), columns, hi_cols) + print(self.fmtColumns(columns)) + + def simpleEnvraList(self, pkg, ui_overflow=False, + indent='', highlight=False, columns=None): + """Print a package as a line, with the package itself in envra + format so it can be passed to list/install/etc. + + :param pkg: the package to be printed + :param ui_overflow: unused + :param indent: string to be prefixed onto the line to provide + e.g. an indent + :param highlight: highlighting options for the name of the + package + :param columns: tuple containing the space allocated for each + column of output. The columns the are the package envra and + repository + """ + if columns is None: + columns = (-63, -16) # Old default + envra = '%s%s' % (indent, ucd(pkg)) + hi_cols = [highlight, 'normal', 'normal'] + rid = pkg.ui_from_repo + columns = zip((envra, rid), columns, hi_cols) + print(self.fmtColumns(columns)) + + def simple_name_list(self, pkg): + """Print a package as a line containing its name.""" + print(ucd(pkg.name)) + + def simple_nevra_list(self, pkg): + """Print a package as a line containing its NEVRA.""" + print(ucd(pkg)) + + def fmtKeyValFill(self, key, val): + """Return a key value pair in the common two column output + format. + + :param key: the key to be formatted + :param val: the value associated with *key* + :return: the key value pair formatted in two columns for output + """ + keylen = exact_width(key) + cols = self.term.real_columns + if not cols: + cols = sys_maxsize + elif cols < 20: + cols = 20 + nxt = ' ' * (keylen - 2) + ': ' + if not val: + # textwrap.fill in case of empty val returns empty string + return key + val = ucd(val) + ret = textwrap_fill(val, width=cols, initial_indent=key, + subsequent_indent=nxt) + if ret.count("\n") > 1 and keylen > (cols // 3): + # If it's big, redo it again with a smaller subsequent off + ret = textwrap_fill(val, width=cols, initial_indent=key, + subsequent_indent=' ...: ') + return ret + + def fmtSection(self, name, fill='='): + """Format and return a section header. The format of the + header is a line with *name* centered, and *fill* repeated on + either side to fill an entire line on the terminal. + + :param name: the name of the section + :param fill: the character to repeat on either side of *name* + to fill an entire line. *fill* must be a single character. + :return: a string formatted to be a section header + """ + name = ucd(name) + cols = self.term.columns - 2 + name_len = exact_width(name) + if name_len >= (cols - 4): + beg = end = fill * 2 + else: + beg = fill * ((cols - name_len) // 2) + end = fill * (cols - name_len - len(beg)) + + return "%s %s %s" % (beg, name, end) + + def infoOutput(self, pkg, highlight=False): + """Print information about the given package. + + :param pkg: the package to print information about + :param highlight: highlighting options for the name of the + package + """ + def format_key_val(key, val): + return " ".join([fill_exact_width(key, 12, 12), ":", str(val)]) + + def format_key_val_fill(key, val): + return self.fmtKeyValFill(fill_exact_width(key, 12, 12) + " : ", val or "") + + output_list = [] + (hibeg, hiend) = self._highlight(highlight) + # Translators: This is abbreviated 'Name'. Should be no longer + # than 12 characters. You can use the full version if it is short + # enough in your language. + key = select_short_long(12, C_("short", "Name"), + C_("long", "Name")) + output_list.append(format_key_val(key, + "%s%s%s" % (hibeg, pkg.name, hiend))) + if pkg.epoch: + # Translators: This message should be no longer than 12 characters. + output_list.append(format_key_val(_("Epoch"), pkg.epoch)) + key = select_short_long(12, C_("short", "Version"), + C_("long", "Version")) + output_list.append(format_key_val(key, pkg.version)) + # Translators: This message should be no longer than 12 characters. + output_list.append(format_key_val(_("Release"), pkg.release)) + key = select_short_long(12, C_("short", "Arch"), + C_("long", "Architecture")) + output_list.append(format_key_val(key, pkg.arch)) + key = select_short_long(12, C_("short", "Size"), C_("long", "Size")) + output_list.append(format_key_val(key, + format_number(float(pkg._size)))) + # Translators: This message should be no longer than 12 characters. + output_list.append(format_key_val(_("Source"), pkg.sourcerpm)) + key = select_short_long(12, C_("short", "Repo"), + C_("long", "Repository")) + output_list.append(format_key_val(key, pkg.repoid)) + + if pkg._from_system: + history_repo = self.history.repo(pkg) + if history_repo: + # Translators: This message should be no longer than 12 chars. + output_list.append(format_key_val(_("From repo"), history_repo)) + if self.conf.verbose: + # :hawkey does not support changelog information + # print(_("Committer : %s") % ucd(pkg.committer)) + # print(_("Committime : %s") % time.ctime(pkg.committime)) + # Translators: This message should be no longer than 12 characters. + output_list.append(format_key_val(_("Packager"), pkg.packager)) + # Translators: This message should be no longer than 12 characters. + output_list.append(format_key_val(_("Buildtime"), + dnf.util.normalize_time(pkg.buildtime))) + if pkg.installtime: + # Translators: This message should be no longer than 12 characters. + output_list.append(format_key_val(_("Install time"), + dnf.util.normalize_time(pkg.installtime))) + history_pkg = self.history.package_data(pkg) + if history_pkg: + try: + uid = int(history_pkg._item.getInstalledBy()) + except ValueError: # In case int() fails + uid = None + # Translators: This message should be no longer than 12 chars. + output_list.append(format_key_val(_("Installed by"), self._pwd_ui_username(uid))) + # Translators: This is abbreviated 'Summary'. Should be no longer + # than 12 characters. You can use the full version if it is short + # enough in your language. + key = select_short_long(12, C_("short", "Summary"), + C_("long", "Summary")) + output_list.append(format_key_val_fill(key, pkg.summary)) + if pkg.url: + output_list.append(format_key_val(_("URL"), ucd(pkg.url))) + # Translators: This message should be no longer than 12 characters. + output_list.append(format_key_val_fill(_("License"), pkg.license)) + # Translators: This is abbreviated 'Description'. Should be no longer + # than 12 characters. You can use the full version if it is short + # enough in your language. + key = select_short_long(12, C_("short", "Description"), + C_("long", "Description")) + output_list.append(format_key_val_fill(key, pkg.description)) + return "\n".join(output_list) + + def updatesObsoletesList(self, uotup, changetype, columns=None): + """Print a simple string that explains the relationship + between the members of an update or obsoletes tuple. + + :param uotup: an update or obsoletes tuple. The first member + is the new package, and the second member is the old + package + :param changetype: a string indicating what the change between + the packages is, e.g. 'updates' or 'obsoletes' + :param columns: a tuple containing information about how to + format the columns of output. The absolute value of each + number in the tuple indicates how much space has been + allocated for the corresponding column. If the number is + negative, the text in the column will be left justified, + and if it is positive, the text will be right justified. + The columns of output are the package name, version, and repository + """ + (changePkg, instPkg) = uotup + + if columns is not None: + # New style, output all info. for both old/new with old indented + chi = self.conf.color_update_remote + if changePkg.reponame != hawkey.SYSTEM_REPO_NAME: + chi = self.conf.color_update_local + self.simpleList(changePkg, columns=columns, highlight=chi) + self.simpleList(instPkg, columns=columns, indent=' ' * 4, + highlight=self.conf.color_update_installed) + return + + # Old style + c_compact = changePkg.compactPrint() + i_compact = '%s.%s' % (instPkg.name, instPkg.arch) + c_repo = changePkg.repoid + print('%-35.35s [%.12s] %.10s %-20.20s' % + (c_compact, c_repo, changetype, i_compact)) + + def listPkgs(self, lst, description, outputType, highlight_na={}, + columns=None, highlight_modes={}): + """Prints information about the given list of packages. + + :param lst: a list of packages to print information about + :param description: string describing what the list of + packages contains, e.g. 'Available Packages' + :param outputType: The type of information to be printed. + Current options:: + + 'list' - simple pkg list + 'info' - similar to rpm -qi output + 'name' - simple name list + 'nevra' - simple nevra list + :param highlight_na: a dictionary containing information about + packages that should be highlighted in the output. The + dictionary keys are (name, arch) tuples for the package, + and the associated values are the package objects + themselves. + :param columns: a tuple containing information about how to + format the columns of output. The absolute value of each + number in the tuple indicates how much space has been + allocated for the corresponding column. If the number is + negative, the text in the column will be left justified, + and if it is positive, the text will be right justified. + The columns of output are the package name, version, and + repository + :param highlight_modes: dictionary containing information + about to highlight the packages in *highlight_na*. + *highlight_modes* should contain the following keys:: + + 'not_in' - highlighting used for packages not in *highlight_na* + '=' - highlighting used when the package versions are equal + '<' - highlighting used when the package has a lower version + number + '>' - highlighting used when the package has a higher version + number + :return: (exit_code, [errors]) + + exit_code is:: + + 0 = we're done, exit + 1 = we've errored, exit with error string + + """ + if outputType in ['list', 'info', 'name', 'nevra']: + thingslisted = 0 + if len(lst) > 0: + thingslisted = 1 + print('%s' % description) + info_set = set() + if outputType == 'list': + unique_item_dict = {} + for pkg in lst: + unique_item_dict[str(pkg) + str(pkg._from_repo)] = pkg + + lst = unique_item_dict.values() + + for pkg in sorted(lst): + key = (pkg.name, pkg.arch) + highlight = False + if key not in highlight_na: + highlight = highlight_modes.get('not in', 'normal') + elif pkg.evr_eq(highlight_na[key]): + highlight = highlight_modes.get('=', 'normal') + elif pkg.evr_lt(highlight_na[key]): + highlight = highlight_modes.get('>', 'bold') + else: + highlight = highlight_modes.get('<', 'normal') + + if outputType == 'list': + self.simpleList(pkg, ui_overflow=True, + highlight=highlight, columns=columns) + elif outputType == 'info': + info_set.add(self.infoOutput(pkg, highlight=highlight) + "\n") + elif outputType == 'name': + self.simple_name_list(pkg) + elif outputType == 'nevra': + self.simple_nevra_list(pkg) + else: + pass + + if info_set: + print("\n".join(sorted(info_set))) + + if thingslisted == 0: + return 1, [_('No packages to list')] + return 0, [] + + def userconfirm(self, msg=None, defaultyes_msg=None): + """Get a yes or no from the user, and default to No + + :msg: String for case with [y/N] + :defaultyes_msg: String for case with [Y/n] + :return: True if the user selects yes, and False if the user + selects no + """ + yui = (ucd(_('y')), ucd(_('yes'))) + nui = (ucd(_('n')), ucd(_('no'))) + aui = yui + nui + while True: + if msg is None: + msg = _('Is this ok [y/N]: ') + choice = '' + if self.conf.defaultyes: + if defaultyes_msg is None: + msg = _('Is this ok [Y/n]: ') + else: + msg = defaultyes_msg + try: + choice = dnf.i18n.ucd_input(msg) + except EOFError: + pass + except KeyboardInterrupt: + choice = nui[0] + choice = ucd(choice).lower() + if len(choice) == 0: + choice = yui[0] if self.conf.defaultyes else nui[0] + if choice in aui: + break + + # If the English one letter names don't mix with the translated + # letters, allow them too: + if u'y' == choice and u'y' not in aui: + choice = yui[0] + break + if u'n' == choice and u'n' not in aui: + choice = nui[0] + break + + if choice in yui: + return True + return False + + def _pkgs2name_dict(self, sections): + installed = self.sack.query().installed()._name_dict() + available = self.sack.query().available()._name_dict() + + d = {} + for pkg_name in itertools.chain(*list(zip(*sections))[1]): + if pkg_name in installed: + d[pkg_name] = installed[pkg_name][0] + elif pkg_name in available: + d[pkg_name] = available[pkg_name][0] + return d + + def _pkgs2col_lengths(self, sections, name_dict): + nevra_lengths = {} + repo_lengths = {} + for pkg_name in itertools.chain(*list(zip(*sections))[1]): + pkg = name_dict.get(pkg_name) + if pkg is None: + continue + nevra_l = exact_width(ucd(pkg)) + exact_width(self.GRP_PACKAGE_INDENT) + repo_l = exact_width(ucd(pkg.reponame)) + nevra_lengths[nevra_l] = nevra_lengths.get(nevra_l, 0) + 1 + repo_lengths[repo_l] = repo_lengths.get(repo_l, 0) + 1 + return (nevra_lengths, repo_lengths) + + def _display_packages(self, pkg_names): + for name in pkg_names: + print('%s%s' % (self.GRP_PACKAGE_INDENT, name)) + + def _display_packages_verbose(self, pkg_names, name_dict, columns): + for name in pkg_names: + try: + pkg = name_dict[name] + except KeyError: + # package not in any repo -> print only package name + print('%s%s' % (self.GRP_PACKAGE_INDENT, name)) + continue + highlight = False + if not pkg._from_system: + highlight = self.conf.color_list_available_install + self.simpleEnvraList(pkg, ui_overflow=True, + indent=self.GRP_PACKAGE_INDENT, + highlight=highlight, + columns=columns) + + def display_pkgs_in_groups(self, group): + """Output information about the packages in a given group + + :param group: a Group object to output information about + """ + def names(packages): + return sorted(pkg.name for pkg in packages) + print('\n' + _('Group: %s') % group.ui_name) + + verbose = self.conf.verbose + if verbose: + print(_(' Group-Id: %s') % ucd(group.id)) + if group.ui_description: + print(_(' Description: %s') % ucd(group.ui_description) or "") + if group.lang_only: + print(_(' Language: %s') % group.lang_only) + + sections = ( + (_(' Mandatory Packages:'), names(group.mandatory_packages)), + (_(' Default Packages:'), names(group.default_packages)), + (_(' Optional Packages:'), names(group.optional_packages)), + (_(' Conditional Packages:'), names(group.conditional_packages))) + if verbose: + name_dict = self._pkgs2name_dict(sections) + col_lengths = self._pkgs2col_lengths(sections, name_dict) + columns = self.calcColumns(col_lengths) + columns = (-columns[0], -columns[1]) + for (section_name, packages) in sections: + if len(packages) < 1: + continue + print(section_name) + self._display_packages_verbose(packages, name_dict, columns) + else: + for (section_name, packages) in sections: + if len(packages) < 1: + continue + print(section_name) + self._display_packages(packages) + + def display_groups_in_environment(self, environment): + """Output information about the packages in a given environment + + :param environment: an Environment object to output information about + """ + def names(groups): + return sorted(group.name for group in groups) + print(_('Environment Group: %s') % environment.ui_name) + + if self.conf.verbose: + print(_(' Environment-Id: %s') % ucd(environment.id)) + if environment.ui_description: + description = ucd(environment.ui_description) or "" + print(_(' Description: %s') % description) + + sections = ( + (_(' Mandatory Groups:'), names(environment.mandatory_groups)), + (_(' Optional Groups:'), names(environment.optional_groups))) + for (section_name, packages) in sections: + if len(packages) < 1: + continue + print(section_name) + self._display_packages(packages) + + def matchcallback(self, po, values, matchfor=None, verbose=None, + highlight=None): + """Output search/provides type callback matches. + + :param po: the package object that matched the search + :param values: the information associated with *po* that + matched the search + :param matchfor: a list of strings to be highlighted in the + output + :param verbose: whether to output extra verbose information + :param highlight: highlighting options for the highlighted matches + """ + def print_highlighted_key_item(key, item, printed_headline, can_overflow=False): + if not printed_headline: + print(_('Matched from:')) + item = ucd(item) or "" + if item == "": + return + if matchfor: + item = self._sub_highlight(item, highlight, matchfor, ignore_case=True) + if can_overflow: + print(self.fmtKeyValFill(key, item)) + else: + print(key % item) + + def print_file_provides(item, printed_match): + if not self.FILE_PROVIDE_RE.match(item): + return False + key = _("Filename : %s") + file_match = False + for filename in po.files: + if fnmatch.fnmatch(filename, item): + print_highlighted_key_item( + key, filename, file_match or printed_match, can_overflow=False) + file_match = True + return file_match + + if self.conf.showdupesfromrepos: + msg = '%s : ' % po + else: + msg = '%s.%s : ' % (po.name, po.arch) + msg = self.fmtKeyValFill(msg, po.summary or "") + if matchfor: + if highlight is None: + highlight = self.conf.color_search_match + msg = self._sub_highlight(msg, highlight, matchfor, ignore_case=True) + print(msg) + + if verbose is None: + verbose = self.conf.verbose + if not verbose: + return + + print(_("Repo : %s") % po.ui_from_repo) + printed_match = False + name_match = False + for item in set(values): + if po.summary == item: + name_match = True + continue # Skip double name/summary printing + + if po.description == item: + key = _("Description : ") + print_highlighted_key_item(key, item, printed_match, can_overflow=True) + printed_match = True + elif po.url == item: + key = _("URL : %s") + print_highlighted_key_item(key, item, printed_match, can_overflow=False) + printed_match = True + elif po.license == item: + key = _("License : %s") + print_highlighted_key_item(key, item, printed_match, can_overflow=False) + printed_match = True + elif print_file_provides(item, printed_match): + printed_match = True + else: + key = _("Provide : %s") + for provide in po.provides: + provide = str(provide) + if fnmatch.fnmatch(provide, item): + print_highlighted_key_item(key, provide, printed_match, can_overflow=False) + printed_match = True + else: + first_provide = provide.split()[0] + possible = set('=<>') + if any((char in possible) for char in item): + item_new = item.split()[0] + else: + item_new = item + if fnmatch.fnmatch(first_provide, item_new): + print_highlighted_key_item( + key, provide, printed_match, can_overflow=False) + printed_match = True + + if not any([printed_match, name_match]): + for item in set(values): + key = _("Other : %s") + print_highlighted_key_item(key, item, printed_match, can_overflow=False) + print() + + def matchcallback_verbose(self, po, values, matchfor=None): + """Output search/provides type callback matches. This will + output more information than :func:`matchcallback`. + + :param po: the package object that matched the search + :param values: the information associated with *po* that + matched the search + :param matchfor: a list of strings to be highlighted in the + output + """ + return self.matchcallback(po, values, matchfor, verbose=True) + + def reportDownloadSize(self, packages, installonly=False): + """Report the total download size for a set of packages + + :param packages: a list of package objects + :param installonly: whether the transaction consists only of installations + """ + totsize = 0 + locsize = 0 + insize = 0 + error = False + for pkg in packages: + # Just to be on the safe side, if for some reason getting + # the package size fails, log the error and don't report download + # size + try: + size = int(pkg._size) + totsize += size + try: + if pkg.verifyLocalPkg(): + locsize += size + except Exception: + pass + + if not installonly: + continue + + try: + size = int(pkg.installsize) + except Exception: + pass + insize += size + except Exception: + error = True + msg = _('There was an error calculating total download size') + logger.error(msg) + break + + if not error: + if locsize: + logger.info(_("Total size: %s"), + format_number(totsize)) + if locsize != totsize: + logger.info(_("Total download size: %s"), + format_number(totsize - locsize)) + if installonly: + logger.info(_("Installed size: %s"), format_number(insize)) + + def reportRemoveSize(self, packages): + """Report the total size of packages being removed. + + :param packages: a list of package objects + """ + totsize = 0 + error = False + for pkg in packages: + # Just to be on the safe side, if for some reason getting + # the package size fails, log the error and don't report download + # size + try: + size = pkg._size + totsize += size + except Exception: + error = True + msg = _('There was an error calculating installed size') + logger.error(msg) + break + if not error: + logger.info(_("Freed space: %s"), format_number(totsize)) + + def list_group_transaction(self, comps, history, diff): + if not diff: + return None + + out = [] + rows = [] + if diff.new_groups: + out.append(_('Marking packages as installed by the group:')) + for grp_id in diff.new_groups: + pkgs = list(diff.added_packages(grp_id)) + group_object = comps._group_by_id(grp_id) + grp_name = group_object.ui_name if group_object else grp_id + rows.extend(_spread_in_columns(4, "@" + grp_name, pkgs)) + if diff.removed_groups: + out.append(_('Marking packages as removed by the group:')) + for grp_id in diff.removed_groups: + pkgs = list(diff.removed_packages(grp_id)) + grp_name = history.group.get(grp_id).ui_name + rows.extend(_spread_in_columns(4, "@" + grp_name, pkgs)) + + if rows: + col_data = self._col_widths(rows) + for row in rows: + out.append(self.fmtColumns(zip(row, col_data), ' ')) + out[0:0] = self._banner(col_data, (_('Group'), _('Packages'), '', '')) + return '\n'.join(out) + + def _skipped_packages(self, report_problems): + """returns set of conflicting packages and set of packages with broken dependency that would + be additionally installed when --best and --allowerasing""" + if self.base._goal.actions & (hawkey.INSTALL | hawkey.UPGRADE | hawkey.UPGRADE_ALL): + best = True + else: + best = False + ng = deepcopy(self.base._goal) + params = {"allow_uninstall": self.base._allow_erasing, + "force_best": best, + "ignore_weak": True} + ret = ng.run(**params) + if not ret and report_problems: + msg = dnf.util._format_resolve_problems(ng.problem_rules()) + logger.warning(msg) + problem_conflicts = set(ng.problem_conflicts(available=True)) + problem_dependency = set(ng.problem_broken_dependency(available=True)) - problem_conflicts + return problem_conflicts, problem_dependency + + def list_transaction(self, transaction, total_width=None): + """Return a string representation of the transaction in an + easy-to-read format. + """ + forward_actions = hawkey.UPGRADE | hawkey.UPGRADE_ALL | hawkey.DISTUPGRADE | \ + hawkey.DISTUPGRADE_ALL | hawkey.DOWNGRADE | hawkey.INSTALL + skipped_conflicts = set() + skipped_broken = set() + + if transaction is None: + # set empty transaction list instead of returning None + # in order to display module changes when RPM transaction is empty + transaction = [] + + list_bunch = _make_lists(transaction, self.base._goal) + pkglist_lines = [] + data = {'n' : {}, 'v' : {}, 'r' : {}} + a_wid = 0 # Arch can't get "that big" ... so always use the max. + + def _add_line(lines, data, a_wid, po, obsoletes=[]): + (n, a, e, v, r) = po.pkgtup + evr = po.evr + repoid = po._from_repo + size = format_number(po._size) + + if a is None: # gpgkeys are weird + a = 'noarch' + + # none, partial, full? + if po._from_system: + hi = self.conf.color_update_installed + elif po._from_cmdline: + hi = self.conf.color_update_local + else: + hi = self.conf.color_update_remote + lines.append((n, a, evr, repoid, size, obsoletes, hi)) + # Create a dict of field_length => number of packages, for + # each field. + for (d, v) in (("n", len(n)), ("v", len(evr)), ("r", len(repoid))): + data[d].setdefault(v, 0) + data[d][v] += 1 + a_wid = max(a_wid, len(a)) + return a_wid + ins_group_msg = _('Installing group/module packages') if dnf.base.WITH_MODULES \ + else _('Installing group packages') + + for (action, pkglist) in [ + # TRANSLATORS: This is for a list of packages to be installed. + (C_('summary', 'Installing'), list_bunch.installed), + # TRANSLATORS: This is for a list of packages to be upgraded. + (C_('summary', 'Upgrading'), list_bunch.upgraded), + # TRANSLATORS: This is for a list of packages to be reinstalled. + (C_('summary', 'Reinstalling'), list_bunch.reinstalled), + (ins_group_msg, list_bunch.installed_group), + (_('Installing dependencies'), list_bunch.installed_dep), + (_('Installing weak dependencies'), list_bunch.installed_weak), + # TRANSLATORS: This is for a list of packages to be removed. + (_('Removing'), list_bunch.erased), + (_('Removing dependent packages'), list_bunch.erased_dep), + (_('Removing unused dependencies'), list_bunch.erased_clean), + # TRANSLATORS: This is for a list of packages to be downgraded. + (C_('summary', 'Downgrading'), list_bunch.downgraded)]: + lines = [] + + # build a reverse mapping to 'replaced_by' + # this is required to achieve reasonable speed + replaces = {} + for tsi in transaction: + if tsi.action != libdnf.transaction.TransactionItemAction_OBSOLETED: + continue + for i in tsi._item.getReplacedBy(): + replaces.setdefault(i, set()).add(tsi) + + for tsi in pkglist: + if tsi.action not in dnf.transaction.FORWARD_ACTIONS + [libdnf.transaction.TransactionItemAction_REMOVE]: + continue + + # get TransactionItems obsoleted by tsi + obsoleted = sorted(replaces.get(tsi._item, [])) + + a_wid = _add_line(lines, data, a_wid, tsi.pkg, obsoleted) + + pkglist_lines.append((action, lines)) + + installedProfiles = sorted(dict(self.base._moduleContainer.getInstalledProfiles()).items()) + if installedProfiles: + action = _("Installing module profiles") + lines = [] + for name, profiles in installedProfiles: + for profile in list(profiles): + lines.append(("%s/%s" % (name, profile), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + + removedProfiles = sorted(dict(self.base._moduleContainer.getRemovedProfiles()).items()) + if removedProfiles: + action = _("Disabling module profiles") + lines = [] + for name, profiles in removedProfiles: + for profile in list(profiles): + lines.append(("%s/%s" % (name, profile), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + + enabledStreams = sorted(dict(self.base._moduleContainer.getEnabledStreams()).items()) + if enabledStreams: + action = _("Enabling module streams") + lines = [] + for name, stream in enabledStreams: + lines.append((name, "", stream, "", "", "", "")) + pkglist_lines.append((action, lines)) + + switchedStreams = sorted(dict(self.base._moduleContainer.getSwitchedStreams()).items()) + if switchedStreams: + action = _("Switching module streams") + lines = [] + for name, stream in switchedStreams: + lines.append((name, "", "%s -> %s" % (stream[0], stream[1]), "", "", "", "")) + pkglist_lines.append((action, lines)) + + disabledModules = sorted(list(self.base._moduleContainer.getDisabledModules())) + if disabledModules: + action = _("Disabling modules") + lines = [] + for name in disabledModules: + lines.append((name, "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + + resetModules = sorted(list(self.base._moduleContainer.getResetModules())) + if resetModules: + action = _("Resetting modules") + lines = [] + for name in resetModules: + lines.append((name, "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + if self.base._history: + install_env_group = self.base._history.env._installed + if install_env_group: + action = _("Installing Environment Groups") + lines = [] + for group in install_env_group.values(): + lines.append((group.getName(), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + upgrade_env_group = self.base._history.env._upgraded + if upgrade_env_group: + action = _("Upgrading Environment Groups") + lines = [] + for group in upgrade_env_group.values(): + lines.append((group.getName(), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + remove_env_group = self.base._history.env._removed + if remove_env_group: + action = _("Removing Environment Groups") + lines = [] + for group in remove_env_group.values(): + lines.append((group.getName(), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + install_group = self.base._history.group._installed + if install_group: + action = _("Installing Groups") + lines = [] + for group in install_group.values(): + lines.append((group.getName(), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + upgrade_group = self.base._history.group._upgraded + if upgrade_group: + action = _("Upgrading Groups") + lines = [] + for group in upgrade_group.values(): + lines.append((group.getName(), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + remove_group = self.base._history.group._removed + if remove_group: + action = _("Removing Groups") + lines = [] + for group in remove_group.values(): + lines.append((group.getName(), "", "", "", "", "", "")) + pkglist_lines.append((action, lines)) + # show skipped conflicting packages + if not self.conf.best and self.base._goal.actions & forward_actions: + lines = [] + skipped_conflicts, skipped_broken = self._skipped_packages(report_problems=True) + skipped_broken = dict((str(pkg), pkg) for pkg in skipped_broken) + for pkg in sorted(skipped_conflicts): + a_wid = _add_line(lines, data, a_wid, pkg, []) + recommendations = ["--best"] + if not self.base._allow_erasing: + recommendations.append("--allowerasing") + skip_str = _("Skipping packages with conflicts:\n" + "(add '%s' to command line " + "to force their upgrade)") % " ".join(recommendations) + pkglist_lines.append((skip_str, lines)) + + lines = [] + for nevra, pkg in sorted(skipped_broken.items()): + a_wid = _add_line(lines, data, a_wid, pkg, []) + skip_str = _("Skipping packages with broken dependencies%s") + if self.base.conf.upgrade_group_objects_upgrade: + skip_str = skip_str % "" + else: + skip_str = skip_str % _(" or part of a group") + + pkglist_lines.append((skip_str, lines)) + + if not data['n'] and not self.base._moduleContainer.isChanged() and not \ + (self.base._history and (self.base._history.group or self.base._history.env)): + return u'' + else: + data = [data['n'], {}, data['v'], data['r'], {}] + columns = [1, a_wid, 1, 1, 5] + columns = self.calcColumns(data, indent=" ", columns=columns, + remainder_column=2, total_width=total_width) + (n_wid, a_wid, v_wid, r_wid, s_wid) = columns + + # Do not use 'Package' without context. Using context resolves + # RhBug 1302935 as a side effect. + msg_package = select_short_long(n_wid, + # Translators: This is the short version of 'Package'. You can + # use the full (unabbreviated) term 'Package' if you think that + # the translation to your language is not too long and will + # always fit to limited space. + C_('short', 'Package'), + # Translators: This is the full (unabbreviated) term 'Package'. + C_('long', 'Package')) + msg_arch = select_short_long(a_wid, + # Translators: This is abbreviated 'Architecture', used when + # we have not enough space to display the full word. + C_('short', 'Arch'), + # Translators: This is the full word 'Architecture', used when + # we have enough space. + C_('long', 'Architecture')) + msg_version = select_short_long(v_wid, + # Translators: This is the short version of 'Version'. You can + # use the full (unabbreviated) term 'Version' if you think that + # the translation to your language is not too long and will + # always fit to limited space. + C_('short', 'Version'), + # Translators: This is the full (unabbreviated) term 'Version'. + C_('long', 'Version')) + msg_repository = select_short_long(r_wid, + # Translators: This is abbreviated 'Repository', used when + # we have not enough space to display the full word. + C_('short', 'Repo'), + # Translators: This is the full word 'Repository', used when + # we have enough space. + C_('long', 'Repository')) + msg_size = select_short_long(s_wid, + # Translators: This is the short version of 'Size'. It should + # not be longer than 5 characters. If the term 'Size' in your + # language is not longer than 5 characters then you can use it + # unabbreviated. + C_('short', 'Size'), + # Translators: This is the full (unabbreviated) term 'Size'. + C_('long', 'Size')) + + out = [u"%s\n%s\n%s\n" % ('=' * self.term.columns, + self.fmtColumns(((msg_package, -n_wid), + (msg_arch, -a_wid), + (msg_version, -v_wid), + (msg_repository, -r_wid), + (msg_size, s_wid)), u" "), + '=' * self.term.columns)] + + for (action, lines) in pkglist_lines: + if lines: + totalmsg = u"%s:\n" % action + for (n, a, evr, repoid, size, obsoletes, hi) in lines: + columns = ((n, -n_wid, hi), (a, -a_wid), + (evr, -v_wid), (repoid, -r_wid), (size, s_wid)) + msg = self.fmtColumns(columns, u" ", u"\n") + hibeg, hiend = self._highlight(self.conf.color_update_installed) + for obspo in sorted(obsoletes): + appended = ' ' + _('replacing') + ' %s%s%s.%s %s\n' + appended %= (hibeg, obspo.name, hiend, obspo.arch, obspo.evr) + msg += appended + totalmsg = totalmsg + msg + + if lines: + out.append(totalmsg) + + out.append(_(""" +Transaction Summary +%s +""") % ('=' * self.term.columns)) + summary_data = ( + (_('Install'), len(list_bunch.installed) + + len(list_bunch.installed_group) + + len(list_bunch.installed_weak) + + len(list_bunch.installed_dep), 0), + (_('Upgrade'), len(list_bunch.upgraded), 0), + (_('Remove'), len(list_bunch.erased) + len(list_bunch.erased_dep) + + len(list_bunch.erased_clean), 0), + (_('Downgrade'), len(list_bunch.downgraded), 0), + (_('Skip'), len(skipped_conflicts) + len(skipped_broken), 0)) + max_msg_action = 0 + max_msg_count = 0 + max_msg_pkgs = 0 + max_msg_depcount = 0 + for action, count, depcount in summary_data: + if not count and not depcount: + continue + + msg_pkgs = P_('Package', 'Packages', count) + len_msg_action = exact_width(action) + len_msg_count = exact_width(unicode(count)) + len_msg_pkgs = exact_width(msg_pkgs) + + if depcount: + len_msg_depcount = exact_width(unicode(depcount)) + else: + len_msg_depcount = 0 + + max_msg_action = max(len_msg_action, max_msg_action) + max_msg_count = max(len_msg_count, max_msg_count) + max_msg_pkgs = max(len_msg_pkgs, max_msg_pkgs) + max_msg_depcount = max(len_msg_depcount, max_msg_depcount) + + for action, count, depcount in summary_data: + msg_pkgs = P_('Package', 'Packages', count) + if depcount: + msg_deppkgs = P_('Dependent package', 'Dependent packages', + depcount) + action_msg = fill_exact_width(action, max_msg_action) + if count: + msg = '%s %*d %s (+%*d %s)\n' + out.append(msg % (action_msg, + max_msg_count, count, + "%-*s" % (max_msg_pkgs, msg_pkgs), + max_msg_depcount, depcount, msg_deppkgs)) + else: + msg = '%s %s ( %*d %s)\n' + out.append(msg % (action_msg, + (max_msg_count + max_msg_pkgs) * ' ', + max_msg_depcount, depcount, msg_deppkgs)) + elif count: + msg = '%s %*d %s\n' + out.append(msg % (fill_exact_width(action, max_msg_action), + max_msg_count, count, msg_pkgs)) + return ''.join(out) + + def post_transaction_output(self, transaction): + """Returns a human-readable summary of the results of the + transaction. + + :return: a string containing a human-readable summary of the + results of the transaction + """ + # Works a bit like calcColumns, but we never overflow a column we just + # have a dynamic number of columns. + def _fits_in_cols(msgs, num): + """ Work out how many columns we can use to display stuff, in + the post trans output. """ + if len(msgs) < num: + return [] + + left = self.term.columns - ((num - 1) + 2) + if left <= 0: + return [] + + col_lens = [0] * num + col = 0 + for msg in msgs: + if len(msg) > col_lens[col]: + diff = (len(msg) - col_lens[col]) + if left <= diff: + return [] + left -= diff + col_lens[col] = len(msg) + col += 1 + col %= len(col_lens) + + for col in range(len(col_lens)): + col_lens[col] += left // num + col_lens[col] *= -1 + return col_lens + + out = '' + list_bunch = _make_lists(transaction, self.base._goal) + skipped_conflicts, skipped_broken = self._skipped_packages(report_problems=False) + skipped = skipped_conflicts.union(skipped_broken) + skipped = sorted(set([str(pkg) for pkg in skipped])) + + for (action, tsis) in [(_('Upgraded'), list_bunch.upgraded), + (_('Downgraded'), list_bunch.downgraded), + (_('Installed'), list_bunch.installed + + list_bunch.installed_group + + list_bunch.installed_weak + + list_bunch.installed_dep), + (_('Reinstalled'), list_bunch.reinstalled), + (_('Skipped'), skipped), + (_('Removed'), list_bunch.erased + + list_bunch.erased_dep + + list_bunch.erased_clean), + (_('Failed'), list_bunch.failed)]: + if not tsis: + continue + msgs = [] + out += '\n%s:\n' % action + for tsi in tsis: + msgs.append(str(tsi)) + for num in (8, 7, 6, 5, 4, 3, 2): + cols = _fits_in_cols(msgs, num) + if cols: + break + if not cols: + cols = [-(self.term.columns - 2)] + while msgs: + current_msgs = msgs[:len(cols)] + out += ' ' + out += self.fmtColumns(zip(current_msgs, cols), end=u'\n') + msgs = msgs[len(cols):] + + return out + + def setup_progress_callbacks(self): + """Set up the progress callbacks and various + output bars based on debug level. + """ + progressbar = None + if self.conf.debuglevel >= 2: + progressbar = dnf.cli.progress.MultiFileProgressMeter(fo=sys.stdout) + self.progress = dnf.cli.progress.MultiFileProgressMeter(fo=sys.stdout) + + # setup our depsolve progress callback + return (progressbar, DepSolveProgressCallBack()) + + def download_callback_total_cb(self, remote_size, download_start_timestamp): + """Outputs summary information about the download process. + + :param remote_size: the total amount of information that was + downloaded, in bytes + :param download_start_timestamp: the time when the download + process started, in seconds since the epoch + """ + if remote_size <= 0: + return + + width = self.term.columns + logger.info("-" * width) + dl_time = max(0.01, time.time() - download_start_timestamp) + msg = ' %5sB/s | %5sB %9s ' % ( + format_number(remote_size // dl_time), + format_number(remote_size), + format_time(dl_time)) + msg = fill_exact_width(_("Total"), width - len(msg)) + msg + logger.info(msg) + + def _history_uiactions(self, hpkgs): + actions = set() + actions_short = set() + count = 0 + for pkg in hpkgs: + if pkg.action in (libdnf.transaction.TransactionItemAction_UPGRADED, libdnf.transaction.TransactionItemAction_DOWNGRADED): + # skip states we don't want to display in user input + continue + actions.add(pkg.action_name) + actions_short.add(pkg.action_short) + count += 1 + + if len(actions) > 1: + return count, ", ".join(sorted(actions_short)) + + # So empty transactions work, although that "shouldn't" really happen + return count, "".join(list(actions)) + + def _pwd_ui_username(self, uid, limit=None): + if isinstance(uid, list): + return [self._pwd_ui_username(u, limit) for u in uid] + + # loginuid is set to -1 (0xFFFF_FFFF) on init, in newer kernels. + # loginuid is set to INT_MAX (0x7FFF_FFFF) on init, in older kernels. + if uid is None or uid in (0xFFFFFFFF, 0x7FFFFFFF): + loginid = _("") + name = _("System") + " " + loginid + if limit is not None and len(name) > limit: + name = loginid + return ucd(name) + + def _safe_split_0(text, *args): + """ Split gives us a [0] for everything _but_ '', this function + returns '' in that case. """ + ret = text.split(*args) + if not ret: + return '' + return ret[0] + + try: + user = pwd.getpwuid(int(uid)) + fullname = _safe_split_0(ucd(user.pw_gecos), ';', 2) + user_name = ucd(user.pw_name) + name = "%s <%s>" % (fullname, user_name) + if limit is not None and len(name) > limit: + name = "%s ... <%s>" % (_safe_split_0(fullname), user_name) + if len(name) > limit: + name = "<%s>" % user_name + return name + except KeyError: + return ucd(uid) + + def historyListCmd(self, tids): + """Output a list of information about the history of yum + transactions. + + :param tids: transaction Ids; lists all transactions if empty + """ + transactions = self.history.old(tids) + if self.conf.history_list_view == 'users': + uids = [1, 2] + elif self.conf.history_list_view == 'commands': + uids = [1] + else: + assert self.conf.history_list_view == 'single-user-commands' + uids = set() + done = 0 + blanks = 0 + for transaction in transactions: + done += 1 + if transaction.cmdline is None: + blanks += 1 + uids.add(transaction.loginuid) + + fmt = "%s | %s | %s | %s | %s" + if len(uids) == 1: + name = _("Command line") + else: + # TRANSLATORS: user names who executed transaction in history command output + name = _("User name") + print(fmt % (fill_exact_width(_("ID"), 6, 6), + fill_exact_width(name, 24, 24), + fill_exact_width(_("Date and time"), 16, 16), + fill_exact_width(_("Action(s)"), 14, 14), + fill_exact_width(_("Altered"), 7, 7))) + print("-" * 79) + fmt = "%6u | %s | %-16.16s | %s | %4u" + + for transaction in transactions: + if len(uids) == 1: + name = transaction.cmdline or '' + else: + name = self._pwd_ui_username(transaction.loginuid, 24) + name = ucd(name) + tm = time.strftime("%Y-%m-%d %H:%M", + time.localtime(transaction.beg_timestamp)) + num, uiacts = self._history_uiactions(transaction.data()) + name = fill_exact_width(name, 24, 24) + uiacts = fill_exact_width(uiacts, 14, 14) + rmark = lmark = ' ' + if transaction.return_code is None: + rmark = lmark = '*' + elif transaction.return_code: + rmark = lmark = '#' + # We don't check .errors, because return_code will be non-0 + elif transaction.is_output: + rmark = lmark = 'E' + if transaction.altered_lt_rpmdb: + rmark = '<' + if transaction.altered_gt_rpmdb: + lmark = '>' + print(fmt % (transaction.tid, name, tm, uiacts, num), "%s%s" % (lmark, rmark)) + + def historyInfoCmd(self, tids, pats=[], mtids=set()): + """Output information about a transaction in history + + :param tids: transaction Ids; prints info for the last transaction if empty + :raises dnf.exceptions.Error in case no transactions were found + """ + tids = set(tids) + last = self.history.last() + if last is None: + logger.critical(_('No transactions')) + raise dnf.exceptions.Error(_('Failed history info')) + + lasttid = last.tid + lastdbv = last.end_rpmdb_version + + transactions = [] + if not tids: + last = self.history.last(complete_transactions_only=False) + if last is not None: + tids.add(last.tid) + transactions.append(last) + else: + transactions = self.history.old(tids) + + if not tids: + logger.critical(_('No transaction ID, or package, given')) + raise dnf.exceptions.Error(_('Failed history info')) + + bmtid, emtid = -1, -1 + mobj = None + done = False + + if mtids: + mtids = sorted(mtids) + bmtid, emtid = mtids.pop() + + for trans in transactions: + if lastdbv is not None and trans.tid == lasttid: + # If this is the last transaction, is good and it doesn't + # match the current rpmdb ... then mark it as bad. + rpmdbv = self.sack._rpmdb_version() + trans.compare_rpmdbv(str(rpmdbv)) + lastdbv = None + + merged = False + + if trans.tid >= bmtid and trans.tid <= emtid: + if mobj is None: + mobj = MergedTransactionWrapper(trans) + else: + mobj.merge(trans) + merged = True + elif mobj is not None: + if done: + print("-" * 79) + done = True + + self._historyInfoCmd(mobj) + mobj = None + + if mtids: + bmtid, emtid = mtids.pop() + if trans.tid >= bmtid and trans.tid <= emtid: + mobj = trans + merged = True + + if not merged: + if done: + print("-" * 79) + done = True + self._historyInfoCmd(trans, pats) + + if mobj is not None: + if done: + print("-" * 79) + self._historyInfoCmd(mobj) + + def _historyInfoCmd(self, old, pats=[]): + loginuid = old.loginuid + if isinstance(loginuid, int): + loginuid = [loginuid] + name = [self._pwd_ui_username(uid) for uid in loginuid] + + _pkg_states_installed = {'i' : _('Installed'), 'e' : _('Erased'), + 'o' : _('Upgraded'), 'n' : _('Downgraded')} + _pkg_states_available = {'i' : _('Installed'), 'e' : _('Not installed'), + 'o' : _('Older'), 'n' : _('Newer')} + maxlen = max([len(x) for x in (list(_pkg_states_installed.values()) + + list(_pkg_states_available.values()))]) + _pkg_states_installed['maxlen'] = maxlen + _pkg_states_available['maxlen'] = maxlen + def _simple_pkg(pkg, prefix_len, was_installed=False, highlight=False, + pkg_max_len=0, show_repo=True): + prefix = " " * prefix_len + if was_installed: + _pkg_states = _pkg_states_installed + else: + _pkg_states = _pkg_states_available + state = _pkg_states['i'] + + # get installed packages with name = pkg.name + ipkgs = self.sack.query().installed().filterm(name=pkg.name).run() + + if not ipkgs: + state = _pkg_states['e'] + else: + # get latest installed package from software database + inst_pkg = self.history.package(ipkgs[0]) + if inst_pkg: + res = pkg.compare(inst_pkg) + # res is: + # 0 if inst_pkg == pkg + # > 0 when inst_pkg > pkg + # < 0 when inst_pkg < pkg + if res == 0: + pass # installed + elif res > 0: + state = _pkg_states['o'] # updated + else: + state = _pkg_states['n'] # downgraded + + if highlight: + (hibeg, hiend) = self._highlight('bold') + else: + (hibeg, hiend) = self._highlight('normal') + state = fill_exact_width(state, _pkg_states['maxlen']) + ui_repo = '' + if show_repo: + ui_repo = pkg.ui_from_repo() + print("%s%s%s%s %-*s %s" % (prefix, hibeg, state, hiend, + pkg_max_len, str(pkg), ui_repo)) + + tids = old.tids() + if len(tids) > 1: + print(_("Transaction ID :"), "%u..%u" % (tids[0], tids[-1])) + else: + print(_("Transaction ID :"), tids[0]) + begt = float(old.beg_timestamp) + begtm = time.strftime("%c", time.localtime(begt)) + print(_("Begin time :"), begtm) + if old.beg_rpmdb_version is not None: + if old.altered_lt_rpmdb: + print(_("Begin rpmdb :"), old.beg_rpmdb_version, "**") + else: + print(_("Begin rpmdb :"), old.beg_rpmdb_version) + if old.end_timestamp is not None: + endt = old.end_timestamp + endtm = time.strftime("%c", time.localtime(endt)) + diff = endt - begt + if diff < 5 * 60: + diff = _("(%u seconds)") % diff + elif diff < 5 * 60 * 60: + diff = _("(%u minutes)") % (diff // 60) + elif diff < 5 * 60 * 60 * 24: + diff = _("(%u hours)") % (diff // (60 * 60)) + else: + diff = _("(%u days)") % (diff // (60 * 60 * 24)) + print(_("End time :"), endtm, diff) + if old.end_rpmdb_version is not None: + if old.altered_gt_rpmdb: + print(_("End rpmdb :"), old.end_rpmdb_version, "**") + else: + print(_("End rpmdb :"), old.end_rpmdb_version) + if isinstance(name, (list, tuple)): + seen = set() + for i in name: + if i in seen: + continue + seen.add(i) + print(_("User :"), i) + else: + print(_("User :"), name) + if isinstance(old.return_code, (list, tuple)): + codes = old.return_code + if codes[0] is None: + print(_("Return-Code :"), "**", _("Aborted"), "**") + codes = codes[1:] + elif not all(codes): + print(_("Return-Code :"), _("Success")) + elif codes: + print(_("Return-Code :"), _("Failures:"), ", ".join([str(i) for i in codes])) + elif old.return_code is None: + print(_("Return-Code :"), "**", _("Aborted"), "**") + elif old.return_code: + print(_("Return-Code :"), _("Failure:"), old.return_code) + else: + print(_("Return-Code :"), _("Success")) + + if isinstance(old.releasever, (list, tuple)): + seen = set() + for i in old.releasever: + if i in seen: + continue + seen.add(i) + print(_("Releasever :"), i) + else: + print(_("Releasever :"), old.releasever) + + if old.cmdline is not None: + if isinstance(old.cmdline, (list, tuple)): + for cmdline in old.cmdline: + print(_("Command Line :"), cmdline) + else: + print(_("Command Line :"), old.cmdline) + + # TODO: + # comment = self.history.addon_data.read(old.tid, item='transaction-comment') + comment = "" + if comment: + print(_("Comment :"), comment) + + perf_with = old.performed_with() + if perf_with: + print(_("Transaction performed with:")) + max_len = 0 + for with_pkg in perf_with: + str_len = len(str(with_pkg)) + if str_len > max_len: + max_len = str_len + for with_pkg in perf_with: + _simple_pkg(with_pkg, 4, was_installed=True, pkg_max_len=max_len) + + print(_("Packages Altered:")) + + self.historyInfoCmdPkgsAltered(old, pats) + + t_out = old.output() + if t_out: + print(_("Scriptlet output:")) + num = 0 + for line in t_out: + num += 1 + print("%4d" % num, line) + t_err = old.error() + if t_err: + print(_("Errors:")) + num = 0 + for line in t_err: + num += 1 + print("%4d" % num, line) + + # TODO: remove + _history_state2uistate = {'True-Install' : _('Install'), + 'Install' : _('Install'), + 'Dep-Install' : _('Dep-Install'), + 'Obsoleted' : _('Obsoleted'), + 'Obsoleting' : _('Obsoleting'), + 'Erase' : _('Erase'), + 'Reinstall' : _('Reinstall'), + 'Downgrade' : _('Downgrade'), + 'Downgraded' : _('Downgraded'), + 'Update' : _('Upgrade'), + 'Updated' : _('Upgraded'), + } + def historyInfoCmdPkgsAltered(self, old, pats=[]): + """Print information about how packages are altered in a transaction. + + :param old: the :class:`DnfSwdbTrans` to + print information about + :param pats: a list of patterns. Packages that match a patten + in *pats* will be highlighted in the output + """ + last = None + # Note that these don't use _simple_pkg() because we are showing what + # happened to them in the transaction ... not the difference between the + # version in the transaction and now. + all_uistates = self._history_state2uistate + maxlen = 0 + pkg_max_len = 0 + + packages = old.packages() + + for pkg in packages: + uistate = all_uistates.get(pkg.action_name, pkg.action_name) + if maxlen < len(uistate): + maxlen = len(uistate) + pkg_len = len(str(pkg)) + if pkg_max_len < pkg_len: + pkg_max_len = pkg_len + + for pkg in packages: + prefix = " " * 4 + if pkg.state != libdnf.transaction.TransactionItemState_DONE: + prefix = " ** " + + highlight = 'normal' + if pats: + if any([pkg.match(pat) for pat in pats]): + highlight = 'bold' + (hibeg, hiend) = self._highlight(highlight) + + cn = str(pkg) + + uistate = all_uistates.get(pkg.action_name, pkg.action_name) + uistate = fill_exact_width(ucd(uistate), maxlen) + + if (last is not None and last.action == libdnf.transaction.TransactionItemAction_UPGRADED and + last.name == pkg.name and pkg.action == libdnf.transaction.TransactionItemAction_UPGRADE): + + ln = len(pkg.name) + 1 + cn = (" " * ln) + cn[ln:] + elif (last is not None and last.action == libdnf.transaction.TransactionItemAction_DOWNGRADE and + last.name == pkg.name and pkg.action == libdnf.transaction.TransactionItemAction_DOWNGRADED): + + ln = len(pkg.name) + 1 + cn = (" " * ln) + cn[ln:] + else: + last = None + if pkg.action in (libdnf.transaction.TransactionItemAction_UPGRADED, libdnf.transaction.TransactionItemAction_DOWNGRADE): + last = pkg + print("%s%s%s%s %-*s %s" % (prefix, hibeg, uistate, hiend, + pkg_max_len, str(pkg), + pkg.ui_from_repo())) + + def historyPackageListCmd(self, extcmds): + """Print a list of information about transactions from history + that involve the given package or packages. + + :param extcmds: list of extra command line arguments + """ + tids = self.history.search(extcmds) + limit = None + if extcmds and not tids: + logger.critical(_('Bad transaction IDs, or package(s), given')) + return 1, ['Failed history packages-list'] + if not tids: + limit = 20 + + all_uistates = self._history_state2uistate + + fmt = "%s | %s | %s" + # REALLY Needs to use columns! + print(fmt % (fill_exact_width(_("ID"), 6, 6), + fill_exact_width(_("Action(s)"), 14, 14), + # This is also a hack to resolve RhBug 1302935 correctly. + fill_exact_width(C_("long", "Package"), 53, 53))) + print("-" * 79) + fmt = "%6u | %s | %-50s" + num = 0 + for old in self.history.old(tids, limit=limit): + packages = old.packages() + if limit and num and (num + len(packages)) > limit: + break + last = None + + # Copy and paste from list ... uh. + rmark = lmark = ' ' + if old.return_code is None: + rmark = lmark = '*' + elif old.return_code: + rmark = lmark = '#' + # We don't check .errors, because return_code will be non-0 + elif old.output: + rmark = lmark = 'E' + elif old.rpmdb_problems: + rmark = lmark = 'P' + elif old.trans_skip: + rmark = lmark = 's' + if old.altered_lt_rpmdb: + rmark = '<' + if old.altered_gt_rpmdb: + lmark = '>' + + # Find a pkg to go with each cmd... + for pkg in packages: + if limit is None: + if not any([pkg.match(pat) for pat in extcmds]): + continue + + uistate = all_uistates.get(pkg.action_name, pkg.action_name) + uistate = fill_exact_width(uistate, 14) + + # To chop the name off we need nevra strings, str(pkg) gives + # envra so we have to do it by hand ... *sigh*. + cn = pkg.ui_nevra + + if (last is not None and last.action == libdnf.transaction.TransactionItemAction_UPGRADED and + last.name == pkg.name and pkg.action == libdnf.transaction.TransactionItemAction_UPGRADE): + ln = len(pkg.name) + 1 + cn = (" " * ln) + cn[ln:] + elif (last is not None and + last.action == libdnf.transaction.TransactionItemAction_DOWNGRADE and last.name == pkg.name and + pkg.action == libdnf.transaction.TransactionItemAction_DOWNGRADED): + ln = len(pkg.name) + 1 + cn = (" " * ln) + cn[ln:] + else: + last = None + if pkg.action in (libdnf.transaction.TransactionItemAction_UPGRADED, libdnf.transaction.TransactionItemAction_DOWNGRADE): + last = pkg + + num += 1 + print(fmt % (old.tid, uistate, cn), "%s%s" % (lmark, rmark)) + +class DepSolveProgressCallBack(dnf.callback.Depsolve): + """Provides text output callback functions for Dependency Solver callback.""" + + def __init__(self): + """requires yum-cli log and errorlog functions as arguments""" + self.loops = 0 + + def pkg_added(self, pkg, mode): + """Print information about a package being added to the + transaction set. + + :param pkgtup: tuple containing the package name, arch, + version, and repository + :param mode: a short string indicating why the package is + being added to the transaction set. + + Valid current values for *mode* are:: + + i = the package will be installed + u = the package will be an update + e = the package will be erased + r = the package will be reinstalled + d = the package will be a downgrade + o = the package will be obsoleting another package + ud = the package will be updated + od = the package will be obsoleted + """ + output = None + if mode == 'i': + output = _('---> Package %s.%s %s will be installed') + elif mode == 'u': + output = _('---> Package %s.%s %s will be an upgrade') + elif mode == 'e': + output = _('---> Package %s.%s %s will be erased') + elif mode == 'r': + output = _('---> Package %s.%s %s will be reinstalled') + elif mode == 'd': + output = _('---> Package %s.%s %s will be a downgrade') + elif mode == 'o': + output = _('---> Package %s.%s %s will be obsoleting') + elif mode == 'ud': + output = _('---> Package %s.%s %s will be upgraded') + elif mode == 'od': + output = _('---> Package %s.%s %s will be obsoleted') + + if output: + logger.debug(output, pkg.name, pkg.arch, pkg.evr) + + def start(self): + """Perform setup at the beginning of the dependency solving + process. + """ + logger.debug(_('--> Starting dependency resolution')) + self.loops += 1 + + def end(self): + """Output a message stating that dependency resolution has finished.""" + logger.debug(_('--> Finished dependency resolution')) + + +class CliKeyImport(dnf.callback.KeyImport): + def __init__(self, base, output): + self.base = base + self.output = output + + def _confirm(self, id, userid, fingerprint, url, timestamp): + + def short_id(id): + rj = '0' if dnf.pycomp.PY3 else b'0' + return id[-8:].rjust(8, rj) + + msg = (_('Importing GPG key 0x%s:\n' + ' Userid : "%s"\n' + ' Fingerprint: %s\n' + ' From : %s') % + (short_id(id), userid, + dnf.crypto._printable_fingerprint(fingerprint), + url.replace("file://", ""))) + logger.critical("%s", msg) + + if self.base.conf.assumeyes: + return True + if self.base.conf.assumeno: + return False + return self.output.userconfirm() + + +class CliTransactionDisplay(LoggingTransactionDisplay): + """A YUM specific callback class for RPM operations.""" + + width = property(lambda self: dnf.cli.term._term_width()) + + def __init__(self): + super(CliTransactionDisplay, self).__init__() + self.lastmsg = "" + self.lastpackage = None # name of last package we looked at + self.output = True + + # for a progress bar + self.mark = "=" + self.marks = 22 + + def progress(self, package, action, ti_done, ti_total, ts_done, ts_total): + """Output information about an rpm operation. This may + include a text progress bar. + + :param package: the package involved in the event + :param action: the type of action that is taking place. Valid + values are given by + :func:`rpmtrans.LoggingTransactionDisplay.action.keys()` + :param ti_done: a number representing the amount of work + already done in the current transaction + :param ti_total: a number representing the total amount of work + to be done in the current transaction + :param ts_done: the number of the current transaction in + transaction set + :param ts_total: the total number of transactions in the + transaction set + """ + action_str = dnf.transaction.ACTIONS.get(action) + if action_str is None: + return + + wid1 = self._max_action_width() + + pkgname = ucd(package) + self.lastpackage = package + if ti_total == 0: + percent = 0 + else: + percent = (ti_done*long(100))//ti_total + self._out_progress(ti_done, ti_total, ts_done, ts_total, + percent, action_str, pkgname, wid1) + + def _max_action_width(self): + if not hasattr(self, '_max_action_wid_cache'): + wid1 = 0 + for val in dnf.transaction.ACTIONS.values(): + wid_val = exact_width(val) + if wid1 < wid_val: + wid1 = wid_val + self._max_action_wid_cache = wid1 + wid1 = self._max_action_wid_cache + return wid1 + + def _out_progress(self, ti_done, ti_total, ts_done, ts_total, + percent, process, pkgname, wid1): + if self.output and (sys.stdout.isatty() or ti_done == ti_total): + (fmt, wid1, wid2) = self._makefmt(percent, ts_done, ts_total, + progress=sys.stdout.isatty(), + pkgname=pkgname, wid1=wid1) + pkgname = ucd(pkgname) + msg = fmt % (fill_exact_width(process, wid1, wid1), + fill_exact_width(pkgname, wid2, wid2)) + if msg != self.lastmsg: + dnf.util._terminal_messenger('write_flush', msg, sys.stdout) + self.lastmsg = msg + if ti_done == ti_total: + print(" ") + + def filelog(self, package, action): + pass + + def error(self, message): + pass + + def scriptout(self, msgs): + """Print messages originating from a package script. + + :param msgs: the messages coming from the script + """ + if msgs: + self.rpm_logger.info(ucd(msgs)) + + def _makefmt(self, percent, ts_done, ts_total, progress=True, + pkgname=None, wid1=15): + l = len(str(ts_total)) + size = "%s.%s" % (l, l) + fmt_done = "%" + size + "s/%" + size + "s" + done = fmt_done % (ts_done, ts_total) + + # This should probably use TerminLine, but we don't want to dep. on + # that. So we kind do an ok job by hand ... at least it's dynamic now. + if pkgname is None: + pnl = 22 + else: + pnl = exact_width(pkgname) + + overhead = (2 * l) + 2 # Length of done, above + overhead += 2 + wid1 +2 # Length of beginning (" " action " :") + overhead += 1 # Space between pn and done + overhead += 2 # Ends for progress + overhead += 1 # Space for end + width = self.width + if width < overhead: + width = overhead # Give up + width -= overhead + if pnl > width // 2: + pnl = width // 2 + + marks = self.width - (overhead + pnl) + width = "%s.%s" % (marks, marks) + fmt_bar = "[%-" + width + "s]" + # pnl = str(28 + marks + 1) + full_pnl = pnl + marks + 1 + + if progress and percent == 100: # Don't chop pkg name on 100% + fmt = "\r %s: %s " + done + wid2 = full_pnl + elif progress: + if marks > 5: + bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), ) + else: + bar = "" + fmt = "\r %s: %s " + bar + " " + done + wid2 = pnl + elif percent == 100: + fmt = " %s: %s " + done + wid2 = full_pnl + else: + if marks > 5: + bar = fmt_bar % (self.mark * marks, ) + else: + bar = "" + fmt = " %s: %s " + bar + " " + done + wid2 = pnl + return fmt, wid1, wid2 + +def progressbar(current, total, name=None): + """Output the current status to the terminal using a simple + text progress bar consisting of 50 # marks. + + :param current: a number representing the amount of work + already done + :param total: a number representing the total amount of work + to be done + :param name: a name to label the progress bar with + """ + + mark = '#' + if not sys.stdout.isatty(): + return + + if current == 0: + percent = 0 + else: + if total != 0: + percent = float(current) / total + else: + percent = 0 + + width = dnf.cli.term._term_width() + + if name is None and current == total: + name = '-' + + end = ' %d/%d' % (current, total) + width -= len(end) + 1 + if width < 0: + width = 0 + if name is None: + width -= 2 + if width < 0: + width = 0 + hashbar = mark * int(width * percent) + output = '\r[%-*s]%s' % (width, hashbar, end) + elif current == total: # Don't chop name on 100% + output = '\r%s%s' % (fill_exact_width(name, width, width), end) + else: + width -= 4 + if width < 0: + width = 0 + nwid = width // 2 + if nwid > exact_width(name): + nwid = exact_width(name) + width -= nwid + hashbar = mark * int(width * percent) + output = '\r%s: [%-*s]%s' % (fill_exact_width(name, nwid, nwid), width, + hashbar, end) + + if current <= total: + dnf.util._terminal_messenger('write', output, sys.stdout) + + if current == total: + dnf.util._terminal_messenger('write', '\n', sys.stdout) + + dnf.util._terminal_messenger('flush', out=sys.stdout) diff --git a/dnf/cli/progress.py b/dnf/cli/progress.py new file mode 100644 index 0000000..60eccbe --- /dev/null +++ b/dnf/cli/progress.py @@ -0,0 +1,202 @@ +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. + +from __future__ import unicode_literals +from dnf.cli.format import format_number, format_time +from dnf.cli.term import _term_width +from dnf.pycomp import unicode +from time import time + +import sys +import dnf.callback +import dnf.util + + +class MultiFileProgressMeter(dnf.callback.DownloadProgress): + """Multi-file download progress meter""" + + STATUS_2_STR = { + dnf.callback.STATUS_FAILED: 'FAILED', + dnf.callback.STATUS_ALREADY_EXISTS: 'SKIPPED', + dnf.callback.STATUS_MIRROR: 'MIRROR', + dnf.callback.STATUS_DRPM: 'DRPM', + } + + def __init__(self, fo=sys.stderr, update_period=0.3, tick_period=1.0, rate_average=5.0): + """Creates a new progress meter instance + + update_period -- how often to update the progress bar + tick_period -- how fast to cycle through concurrent downloads + rate_average -- time constant for average speed calculation + """ + self.fo = fo + self.update_period = update_period + self.tick_period = tick_period + self.rate_average = rate_average + self.unknown_progres = 0 + self.total_drpm = 0 + self.isatty = sys.stdout.isatty() + self.done_drpm = 0 + self.done_files = 0 + self.done_size = 0 + self.active = [] + self.state = {} + self.last_time = 0 + self.last_size = 0 + self.rate = None + self.total_files = 0 + self.total_size = 0 + + def message(self, msg): + dnf.util._terminal_messenger('write_flush', msg, self.fo) + + def start(self, total_files, total_size, total_drpms=0): + self.total_files = total_files + self.total_size = total_size + self.total_drpm = total_drpms + + # download state + self.done_drpm = 0 + self.done_files = 0 + self.done_size = 0 + self.active = [] + self.state = {} + + # rate averaging + self.last_time = 0 + self.last_size = 0 + self.rate = None + + def progress(self, payload, done): + now = time() + text = unicode(payload) + total = int(payload.download_size) + done = int(done) + + # update done_size + if text not in self.state: + self.state[text] = now, 0 + self.active.append(text) + start, old = self.state[text] + self.state[text] = start, done + self.done_size += done - old + + # update screen if enough time has elapsed + if now - self.last_time > self.update_period: + if total > self.total_size: + self.total_size = total + self._update(now) + + def _update(self, now): + if self.last_time: + delta_time = now - self.last_time + delta_size = self.done_size - self.last_size + if delta_time > 0 and delta_size > 0: + # update the average rate + rate = delta_size / delta_time + if self.rate is not None: + weight = min(delta_time/self.rate_average, 1) + rate = rate*weight + self.rate*(1 - weight) + self.rate = rate + self.last_time = now + self.last_size = self.done_size + if not self.isatty: + return + # pick one of the active downloads + text = self.active[int(now/self.tick_period) % len(self.active)] + if self.total_files > 1: + n = '%d' % (self.done_files + 1) + if len(self.active) > 1: + n += '-%d' % (self.done_files + len(self.active)) + text = '(%s/%d): %s' % (n, self.total_files, text) + + # average rate, total done size, estimated remaining time + if self.rate and self.total_size: + time_eta = format_time((self.total_size - self.done_size) / self.rate) + else: + time_eta = '--:--' + msg = ' %5sB/s | %5sB %9s ETA\r' % ( + format_number(self.rate) if self.rate else '--- ', + format_number(self.done_size), + time_eta) + left = _term_width() - len(msg) + bl = (left - 7)//2 + if bl > 8: + # use part of the remaining space for progress bar + if self.total_size: + pct = self.done_size * 100 // self.total_size + n, p = divmod(self.done_size * bl * 2 // self.total_size, 2) + bar = '=' * n + '-' * p + msg = '%3d%% [%-*s]%s' % (pct, bl, bar, msg) + left -= bl + 7 + else: + n = self.unknown_progres - 3 + p = 3 + n = 0 if n < 0 else n + bar = ' ' * n + '=' * p + msg = ' [%-*s]%s' % (bl, bar, msg) + left -= bl + 7 + self.unknown_progres = self.unknown_progres + 3 if self.unknown_progres + 3 < bl \ + else 0 + self.message('%-*.*s%s' % (left, left, text, msg)) + + def end(self, payload, status, err_msg): + start = now = time() + text = unicode(payload) + size = int(payload.download_size) + done = 0 + + # update state + if status == dnf.callback.STATUS_MIRROR: + pass + elif status == dnf.callback.STATUS_DRPM: + self.done_drpm += 1 + elif text in self.state: + start, done = self.state.pop(text) + self.active.remove(text) + size -= done + self.done_files += 1 + self.done_size += size + elif status == dnf.callback.STATUS_ALREADY_EXISTS: + self.done_files += 1 + self.done_size += size + + if status: + # the error message, no trimming + if status is dnf.callback.STATUS_DRPM and self.total_drpm > 1: + msg = '[%s %d/%d] %s: ' % (self.STATUS_2_STR[status], self.done_drpm, + self.total_drpm, text) + else: + msg = '[%s] %s: ' % (self.STATUS_2_STR[status], text) + left = _term_width() - len(msg) - 1 + msg = '%s%-*s\n' % (msg, left, err_msg) + else: + if self.total_files > 1: + text = '(%d/%d): %s' % (self.done_files, self.total_files, text) + + # average rate, file size, download time + tm = max(now - start, 0.001) + msg = ' %5sB/s | %5sB %9s \n' % ( + format_number(float(done) / tm), + format_number(done), + format_time(tm)) + left = _term_width() - len(msg) + msg = '%-*.*s%s' % (left, left, text, msg) + self.message(msg) + + # now there's a blank line. fill it if possible. + if self.active: + self._update(now) diff --git a/dnf/cli/term.py b/dnf/cli/term.py new file mode 100644 index 0000000..aa075cf --- /dev/null +++ b/dnf/cli/term.py @@ -0,0 +1,389 @@ +# Copyright (C) 2013-2014 Red Hat, Inc. +# Terminal routines. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +import curses +import dnf.pycomp +import fcntl +import re +import struct +import sys +import termios + + +def _real_term_width(fd=1): + """ Get the real terminal width """ + try: + buf = 'abcdefgh' + buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf) + ret = struct.unpack(b'hhhh', buf)[1] + return ret + except IOError: + return None + + +def _term_width(fd=1): + """ Compute terminal width falling to default 80 in case of trouble""" + tw = _real_term_width(fd=1) + if not tw: + return 80 + elif tw < 20: + return 20 + else: + return tw + + +class Term(object): + """A class to provide some terminal "UI" helpers based on curses.""" + + # From initial search for "terminfo and python" got: + # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475116 + # ...it's probably not copyrightable, but if so ASPN says: + # + # Except where otherwise noted, recipes in the Python Cookbook are + # published under the Python license. + + __enabled = True + + real_columns = property(lambda self: _real_term_width()) + columns = property(lambda self: _term_width()) + + __cap_names = { + 'underline' : 'smul', + 'reverse' : 'rev', + 'normal' : 'sgr0', + } + + __colors = { + 'black' : 0, + 'blue' : 1, + 'green' : 2, + 'cyan' : 3, + 'red' : 4, + 'magenta' : 5, + 'yellow' : 6, + 'white' : 7 + } + __ansi_colors = { + 'black' : 0, + 'red' : 1, + 'green' : 2, + 'yellow' : 3, + 'blue' : 4, + 'magenta' : 5, + 'cyan' : 6, + 'white' : 7 + } + __ansi_forced_MODE = { + 'bold' : '\x1b[1m', + 'blink' : '\x1b[5m', + 'dim' : '', + 'reverse' : '\x1b[7m', + 'underline' : '\x1b[4m', + 'normal' : '\x1b(B\x1b[m' + } + __ansi_forced_FG_COLOR = { + 'black' : '\x1b[30m', + 'red' : '\x1b[31m', + 'green' : '\x1b[32m', + 'yellow' : '\x1b[33m', + 'blue' : '\x1b[34m', + 'magenta' : '\x1b[35m', + 'cyan' : '\x1b[36m', + 'white' : '\x1b[37m' + } + __ansi_forced_BG_COLOR = { + 'black' : '\x1b[40m', + 'red' : '\x1b[41m', + 'green' : '\x1b[42m', + 'yellow' : '\x1b[43m', + 'blue' : '\x1b[44m', + 'magenta' : '\x1b[45m', + 'cyan' : '\x1b[46m', + 'white' : '\x1b[47m' + } + + def __forced_init(self): + self.MODE = self.__ansi_forced_MODE + self.FG_COLOR = self.__ansi_forced_FG_COLOR + self.BG_COLOR = self.__ansi_forced_BG_COLOR + + def reinit(self, term_stream=None, color='auto'): + """Reinitializes the :class:`Term`. + + :param term_stream: the terminal stream that the + :class:`Term` should be initialized to use. If + *term_stream* is not given, :attr:`sys.stdout` is used. + :param color: when to colorize output. Valid values are + 'always', 'auto', and 'never'. 'always' will use ANSI codes + to always colorize output, 'auto' will decide whether do + colorize depending on the terminal, and 'never' will never + colorize. + """ + self.__enabled = True + self.lines = 24 + + if color == 'always': + self.__forced_init() + return + + # Output modes: + self.MODE = { + 'bold' : '', + 'blink' : '', + 'dim' : '', + 'reverse' : '', + 'underline' : '', + 'normal' : '' + } + + # Colours + self.FG_COLOR = { + 'black' : '', + 'blue' : '', + 'green' : '', + 'cyan' : '', + 'red' : '', + 'magenta' : '', + 'yellow' : '', + 'white' : '' + } + + self.BG_COLOR = { + 'black' : '', + 'blue' : '', + 'green' : '', + 'cyan' : '', + 'red' : '', + 'magenta' : '', + 'yellow' : '', + 'white' : '' + } + + if color == 'never': + self.__enabled = False + return + assert color == 'auto' + + # If the stream isn't a tty, then assume it has no capabilities. + if not term_stream: + term_stream = sys.stdout + if not term_stream.isatty(): + self.__enabled = False + return + + # Check the terminal type. If we fail, then assume that the + # terminal has no capabilities. + try: + curses.setupterm(fd=term_stream.fileno()) + except Exception: + self.__enabled = False + return + self._ctigetstr = curses.tigetstr + + self.lines = curses.tigetnum('lines') + + # Look up string capabilities. + for cap_name in self.MODE: + mode = cap_name + if cap_name in self.__cap_names: + cap_name = self.__cap_names[cap_name] + self.MODE[mode] = self._tigetstr(cap_name) + + # Colors + set_fg = self._tigetstr('setf').encode('utf-8') + if set_fg: + for (color, val) in self.__colors.items(): + self.FG_COLOR[color] = curses.tparm(set_fg, val).decode() or '' + set_fg_ansi = self._tigetstr('setaf').encode('utf-8') + if set_fg_ansi: + for (color, val) in self.__ansi_colors.items(): + fg_color = curses.tparm(set_fg_ansi, val).decode() or '' + self.FG_COLOR[color] = fg_color + set_bg = self._tigetstr('setb').encode('utf-8') + if set_bg: + for (color, val) in self.__colors.items(): + self.BG_COLOR[color] = curses.tparm(set_bg, val).decode() or '' + set_bg_ansi = self._tigetstr('setab').encode('utf-8') + if set_bg_ansi: + for (color, val) in self.__ansi_colors.items(): + bg_color = curses.tparm(set_bg_ansi, val).decode() or '' + self.BG_COLOR[color] = bg_color + + def __init__(self, term_stream=None, color='auto'): + self.reinit(term_stream, color) + + def _tigetstr(self, cap_name): + # String capabilities can include "delays" of the form "$<2>". + # For any modern terminal, we should be able to just ignore + # these, so strip them out. + cap = self._ctigetstr(cap_name) or '' + if dnf.pycomp.is_py3bytes(cap): + cap = cap.decode() + return re.sub(r'\$<\d+>[/*]?', '', cap) + + def color(self, color, s): + """Colorize string with color""" + return (self.MODE[color] + str(s) + self.MODE['normal']) + + def bold(self, s): + """Make string bold.""" + return self.color('bold', s) + + def sub(self, haystack, beg, end, needles, escape=None, ignore_case=False): + """Search the string *haystack* for all occurrences of any + string in the list *needles*. Prefix each occurrence with + *beg*, and postfix each occurrence with *end*, then return the + modified string. For example:: + + >>> yt = Term() + >>> yt.sub('spam and eggs', 'x', 'z', ['and']) + 'spam xandz eggs' + + This is particularly useful for emphasizing certain words + in output: for example, calling :func:`sub` with *beg* = + MODE['bold'] and *end* = MODE['normal'] will return a string + that when printed to the terminal will appear to be *haystack* + with each occurrence of the strings in *needles* in bold + face. Note, however, that the :func:`sub_mode`, + :func:`sub_bold`, :func:`sub_fg`, and :func:`sub_bg` methods + provide convenient ways to access this same emphasizing functionality. + + :param haystack: the string to be modified + :param beg: the string to be prefixed onto matches + :param end: the string to be postfixed onto matches + :param needles: a list of strings to add the prefixes and + postfixes to + :param escape: a function that accepts a string and returns + the same string with problematic characters escaped. By + default, :func:`re.escape` is used. + :param ignore_case: whether case should be ignored when + searching for matches + :return: *haystack* with *beg* prefixing, and *end* + postfixing, occurrences of the strings in *needles* + """ + if not self.__enabled: + return haystack + + if not escape: + escape = re.escape + + render = lambda match: beg + match.group() + end + for needle in needles: + pat = escape(needle) + if ignore_case: + pat = re.template(pat, re.I) + haystack = re.sub(pat, render, haystack) + return haystack + def sub_norm(self, haystack, beg, needles, **kwds): + """Search the string *haystack* for all occurrences of any + string in the list *needles*. Prefix each occurrence with + *beg*, and postfix each occurrence with self.MODE['normal'], + then return the modified string. If *beg* is an ANSI escape + code, such as given by self.MODE['bold'], this method will + return *haystack* with the formatting given by the code only + applied to the strings in *needles*. + + :param haystack: the string to be modified + :param beg: the string to be prefixed onto matches + :param end: the string to be postfixed onto matches + :param needles: a list of strings to add the prefixes and + postfixes to + :return: *haystack* with *beg* prefixing, and self.MODE['normal'] + postfixing, occurrences of the strings in *needles* + """ + return self.sub(haystack, beg, self.MODE['normal'], needles, **kwds) + + def sub_mode(self, haystack, mode, needles, **kwds): + """Search the string *haystack* for all occurrences of any + string in the list *needles*. Prefix each occurrence with + self.MODE[*mode*], and postfix each occurrence with + self.MODE['normal'], then return the modified string. This + will return a string that when printed to the terminal will + appear to be *haystack* with each occurrence of the strings in + *needles* in the given *mode*. + + :param haystack: the string to be modified + :param mode: the mode to set the matches to be in. Valid + values are given by self.MODE.keys(). + :param needles: a list of strings to add the prefixes and + postfixes to + :return: *haystack* with self.MODE[*mode*] prefixing, and + self.MODE['normal'] postfixing, occurrences of the strings + in *needles* + """ + return self.sub_norm(haystack, self.MODE[mode], needles, **kwds) + + def sub_bold(self, haystack, needles, **kwds): + """Search the string *haystack* for all occurrences of any + string in the list *needles*. Prefix each occurrence with + self.MODE['bold'], and postfix each occurrence with + self.MODE['normal'], then return the modified string. This + will return a string that when printed to the terminal will + appear to be *haystack* with each occurrence of the strings in + *needles* in bold face. + + :param haystack: the string to be modified + :param needles: a list of strings to add the prefixes and + postfixes to + :return: *haystack* with self.MODE['bold'] prefixing, and + self.MODE['normal'] postfixing, occurrences of the strings + in *needles* + """ + return self.sub_mode(haystack, 'bold', needles, **kwds) + + def sub_fg(self, haystack, color, needles, **kwds): + """Search the string *haystack* for all occurrences of any + string in the list *needles*. Prefix each occurrence with + self.FG_COLOR[*color*], and postfix each occurrence with + self.MODE['normal'], then return the modified string. This + will return a string that when printed to the terminal will + appear to be *haystack* with each occurrence of the strings in + *needles* in the given color. + + :param haystack: the string to be modified + :param color: the color to set the matches to be in. Valid + values are given by self.FG_COLOR.keys(). + :param needles: a list of strings to add the prefixes and + postfixes to + :return: *haystack* with self.FG_COLOR[*color*] prefixing, and + self.MODE['normal'] postfixing, occurrences of the strings + in *needles* + """ + return self.sub_norm(haystack, self.FG_COLOR[color], needles, **kwds) + + def sub_bg(self, haystack, color, needles, **kwds): + """Search the string *haystack* for all occurrences of any + string in the list *needles*. Prefix each occurrence with + self.BG_COLOR[*color*], and postfix each occurrence with + self.MODE['normal'], then return the modified string. This + will return a string that when printed to the terminal will + appear to be *haystack* with each occurrence of the strings in + *needles* highlighted in the given background color. + + :param haystack: the string to be modified + :param color: the background color to set the matches to be in. Valid + values are given by self.BG_COLOR.keys(). + :param needles: a list of strings to add the prefixes and + postfixes to + :return: *haystack* with self.BG_COLOR[*color*] prefixing, and + self.MODE['normal'] postfixing, occurrences of the strings + in *needles* + """ + return self.sub_norm(haystack, self.BG_COLOR[color], needles, **kwds) diff --git a/dnf/cli/utils.py b/dnf/cli/utils.py new file mode 100644 index 0000000..1c3db75 --- /dev/null +++ b/dnf/cli/utils.py @@ -0,0 +1,129 @@ +# Copyright (C) 2016 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +"""Various utility functions, and a utility class.""" + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.cli.format import format_number +from dnf.i18n import _ +import dnf.util +import logging +import os +import time + +_USER_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK']) +logger = logging.getLogger('dnf') + +def jiffies_to_seconds(jiffies): + """Convert a number of jiffies to seconds. How many jiffies are in a second + is system-dependent, e.g. 100 jiffies = 1 second is common. + + :param jiffies: a number of jiffies + :return: the equivalent number of seconds + """ + return int(jiffies) / _USER_HZ + + +def seconds_to_ui_time(seconds): + """Return a human-readable string representation of the length of + a time interval given in seconds. + + :param seconds: the length of the time interval in seconds + :return: a human-readable string representation of the length of + the time interval + """ + if seconds >= 60 * 60 * 24: + return "%d day(s) %d:%02d:%02d" % (seconds // (60 * 60 * 24), + (seconds // (60 * 60)) % 24, + (seconds // 60) % 60, + seconds % 60) + if seconds >= 60 * 60: + return "%d:%02d:%02d" % (seconds // (60 * 60), (seconds // 60) % 60, + (seconds % 60)) + return "%02d:%02d" % ((seconds // 60), seconds % 60) + + +def get_process_info(pid): + """Return info dict about a process.""" + + pid = int(pid) + + # Maybe true if /proc isn't mounted, or not Linux ... or something. + if (not os.path.exists("/proc/%d/status" % pid) or + not os.path.exists("/proc/stat") or + not os.path.exists("/proc/%d/stat" % pid)): + return + + ps = {} + with open("/proc/%d/status" % pid) as status_file: + for line in status_file: + if line[-1] != '\n': + continue + data = line[:-1].split(':\t', 1) + if len(data) < 2: + continue + data[1] = dnf.util.rtrim(data[1], ' kB') + ps[data[0].strip().lower()] = data[1].strip() + if 'vmrss' not in ps: + return + if 'vmsize' not in ps: + return + + boot_time = None + with open("/proc/stat") as stat_file: + for line in stat_file: + if line.startswith("btime "): + boot_time = int(line[len("btime "):-1]) + break + if boot_time is None: + return + + with open('/proc/%d/stat' % pid) as stat_file: + ps_stat = stat_file.read().split() + ps['start_time'] = boot_time + jiffies_to_seconds(ps_stat[21]) + ps['state'] = {'R' : _('Running'), + 'S' : _('Sleeping'), + 'D' : _('Uninterruptible'), + 'Z' : _('Zombie'), + 'T' : _('Traced/Stopped') + }.get(ps_stat[2], _('Unknown')) + + return ps + + +def show_lock_owner(pid): + """Output information about process holding a lock.""" + + ps = get_process_info(pid) + if not ps: + msg = _('Unable to find information about the locking process (PID %d)') + logger.critical(msg, pid) + return + + msg = _(' The application with PID %d is: %s') % (pid, ps['name']) + + logger.critical("%s", msg) + logger.critical(_(" Memory : %5s RSS (%5sB VSZ)"), + format_number(int(ps['vmrss']) * 1024), + format_number(int(ps['vmsize']) * 1024)) + + ago = seconds_to_ui_time(int(time.time()) - ps['start_time']) + logger.critical(_(' Started: %s - %s ago'), + dnf.util.normalize_time(ps['start_time']), ago) + logger.critical(_(' State : %s'), ps['state']) + + return diff --git a/dnf/comps.py b/dnf/comps.py new file mode 100644 index 0000000..316d647 --- /dev/null +++ b/dnf/comps.py @@ -0,0 +1,717 @@ +# comps.py +# Interface to libcomps. +# +# Copyright (C) 2013-2018 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +import libdnf.transaction + +from dnf.exceptions import CompsError +from dnf.i18n import _, ucd +from functools import reduce + +import dnf.i18n +import dnf.util +import fnmatch +import gettext +import itertools +import libcomps +import locale +import logging +import operator +import re +import sys + +logger = logging.getLogger("dnf") + +# :api :binformat +CONDITIONAL = libdnf.transaction.CompsPackageType_CONDITIONAL +DEFAULT = libdnf.transaction.CompsPackageType_DEFAULT +MANDATORY = libdnf.transaction.CompsPackageType_MANDATORY +OPTIONAL = libdnf.transaction.CompsPackageType_OPTIONAL + +ALL_TYPES = CONDITIONAL | DEFAULT | MANDATORY | OPTIONAL + + +def _internal_comps_length(comps): + collections = (comps.categories, comps.groups, comps.environments) + return reduce(operator.__add__, map(len, collections)) + + +def _first_if_iterable(seq): + if seq is None: + return None + return dnf.util.first(seq) + + +def _by_pattern(pattern, case_sensitive, sqn): + """Return items from sqn matching either exactly or glob-wise.""" + + pattern = dnf.i18n.ucd(pattern) + exact = {g for g in sqn if g.name == pattern or g.id == pattern} + if exact: + return exact + + if case_sensitive: + match = re.compile(fnmatch.translate(pattern)).match + else: + match = re.compile(fnmatch.translate(pattern), flags=re.I).match + + return {g for g in sqn if match(g.name) or match(g.id) or match(g.ui_name)} + + +def _fn_display_order(group): + return sys.maxsize if group.display_order is None else group.display_order + + +def install_or_skip(install_fnc, grp_or_env_id, types, exclude=None, + strict=True, exclude_groups=None): + """Either mark in persistor as installed given `grp_or_env` (group + or environment) or skip it (if it's already installed). + `install_fnc` has to be Solver._group_install + or Solver._environment_install. + """ + try: + return install_fnc(grp_or_env_id, types, exclude, strict, exclude_groups) + except dnf.comps.CompsError as e: + logger.warning("%s, %s", ucd(e)[:-1], _("skipping.")) + + +class _Langs(object): + + """Get all usable abbreviations for the current language.""" + + def __init__(self): + self.last_locale = None + self.cache = None + + @staticmethod + def _dotted_locale_str(): + lcl = locale.getlocale(locale.LC_MESSAGES) + if lcl == (None, None): + return 'C' + return'.'.join(lcl) + + def get(self): + current_locale = self._dotted_locale_str() + if self.last_locale == current_locale: + return self.cache + + self.cache = [] + locales = [current_locale] + if current_locale != 'C': + locales.append('C') + for l in locales: + for nlang in gettext._expand_lang(l): + if nlang not in self.cache: + self.cache.append(nlang) + + self.last_locale = current_locale + return self.cache + + +class CompsQuery(object): + + AVAILABLE = 1 + INSTALLED = 2 + + ENVIRONMENTS = 1 + GROUPS = 2 + + def __init__(self, comps, history, kinds, status): + self.comps = comps + self.history = history + self.kinds = kinds + self.status = status + + def _get_groups(self, available, installed): + result = set() + if self.status & self.AVAILABLE: + result.update({i.id for i in available}) + if self.status & self.INSTALLED: + for i in installed: + group = i.getCompsGroupItem() + if not group: + continue + result.add(group.getGroupId()) + return result + + def _get_envs(self, available, installed): + result = set() + if self.status & self.AVAILABLE: + result.update({i.id for i in available}) + if self.status & self.INSTALLED: + for i in installed: + env = i.getCompsEnvironmentItem() + if not env: + continue + result.add(env.getEnvironmentId()) + return result + + def get(self, *patterns): + res = dnf.util.Bunch() + res.environments = [] + res.groups = [] + for pat in patterns: + envs = grps = [] + if self.kinds & self.ENVIRONMENTS: + available = self.comps.environments_by_pattern(pat) + installed = self.history.env.search_by_pattern(pat) + envs = self._get_envs(available, installed) + res.environments.extend(envs) + if self.kinds & self.GROUPS: + available = self.comps.groups_by_pattern(pat) + installed = self.history.group.search_by_pattern(pat) + grps = self._get_groups(available, installed) + res.groups.extend(grps) + if not envs and not grps: + if self.status == self.INSTALLED: + msg = _("Module or Group '%s' is not installed.") % ucd(pat) + elif self.status == self.AVAILABLE: + msg = _("Module or Group '%s' is not available.") % ucd(pat) + else: + msg = _("Module or Group '%s' does not exist.") % ucd(pat) + raise CompsError(msg) + return res + + +class Forwarder(object): + def __init__(self, iobj, langs): + self._i = iobj + self._langs = langs + + def __getattr__(self, name): + return getattr(self._i, name) + + def _ui_text(self, default, dct): + for l in self._langs.get(): + t = dct.get(l) + if t is not None: + return t + return default + + @property + def ui_description(self): + return self._ui_text(self.desc, self.desc_by_lang) + + @property + def ui_name(self): + return self._ui_text(self.name, self.name_by_lang) + +class Category(Forwarder): + # :api + def __init__(self, iobj, langs, group_factory): + super(Category, self).__init__(iobj, langs) + self._group_factory = group_factory + + def _build_group(self, grp_id): + grp = self._group_factory(grp_id.name) + if grp is None: + msg = "no group '%s' from category '%s'" + raise ValueError(msg % (grp_id.name, self.id)) + return grp + + def groups_iter(self): + for grp_id in self.group_ids: + yield self._build_group(grp_id) + + @property + def groups(self): + return list(self.groups_iter()) + +class Environment(Forwarder): + # :api + + def __init__(self, iobj, langs, group_factory): + super(Environment, self).__init__(iobj, langs) + self._group_factory = group_factory + + def _build_group(self, grp_id): + grp = self._group_factory(grp_id.name) + if grp is None: + msg = "no group '%s' from environment '%s'" + raise ValueError(msg % (grp_id.name, self.id)) + return grp + + def _build_groups(self, ids): + groups = [] + for gi in ids: + try: + groups.append(self._build_group(gi)) + except ValueError as e: + logger.error(e) + + return groups + + def groups_iter(self): + for grp_id in itertools.chain(self.group_ids, self.option_ids): + try: + yield self._build_group(grp_id) + except ValueError as e: + logger.error(e) + + @property + def mandatory_groups(self): + return self._build_groups(self.group_ids) + + @property + def optional_groups(self): + return self._build_groups(self.option_ids) + +class Group(Forwarder): + # :api + def __init__(self, iobj, langs, pkg_factory): + super(Group, self).__init__(iobj, langs) + self._pkg_factory = pkg_factory + self.selected = iobj.default + + def _packages_of_type(self, type_): + return [pkg for pkg in self.packages if pkg.type == type_] + + @property + def conditional_packages(self): + return self._packages_of_type(libcomps.PACKAGE_TYPE_CONDITIONAL) + + @property + def default_packages(self): + return self._packages_of_type(libcomps.PACKAGE_TYPE_DEFAULT) + + def packages_iter(self): + # :api + return map(self._pkg_factory, self.packages) + + @property + def mandatory_packages(self): + return self._packages_of_type(libcomps.PACKAGE_TYPE_MANDATORY) + + @property + def optional_packages(self): + return self._packages_of_type(libcomps.PACKAGE_TYPE_OPTIONAL) + + @property + def visible(self): + return self._i.uservisible + +class Package(Forwarder): + """Represents comps package data. :api""" + + _OPT_MAP = { + libcomps.PACKAGE_TYPE_CONDITIONAL : CONDITIONAL, + libcomps.PACKAGE_TYPE_DEFAULT : DEFAULT, + libcomps.PACKAGE_TYPE_MANDATORY : MANDATORY, + libcomps.PACKAGE_TYPE_OPTIONAL : OPTIONAL, + } + + def __init__(self, ipkg): + self._i = ipkg + + @property + def name(self): + # :api + return self._i.name + + @property + def option_type(self): + # :api + return self._OPT_MAP[self.type] + +class Comps(object): + # :api + + def __init__(self): + self._i = libcomps.Comps() + self._langs = _Langs() + + def __len__(self): + return _internal_comps_length(self._i) + + def _build_category(self, icategory): + return Category(icategory, self._langs, self._group_by_id) + + def _build_environment(self, ienvironment): + return Environment(ienvironment, self._langs, self._group_by_id) + + def _build_group(self, igroup): + return Group(igroup, self._langs, self._build_package) + + def _build_package(self, ipkg): + return Package(ipkg) + + def _add_from_xml_filename(self, fn): + comps = libcomps.Comps() + try: + comps.fromxml_f(fn) + except libcomps.ParserError: + errors = comps.get_last_errors() + raise CompsError(' '.join(errors)) + self._i += comps + + @property + def categories(self): + # :api + return list(self.categories_iter()) + + def category_by_pattern(self, pattern, case_sensitive=False): + # :api + assert dnf.util.is_string_type(pattern) + cats = self.categories_by_pattern(pattern, case_sensitive) + return _first_if_iterable(cats) + + def categories_by_pattern(self, pattern, case_sensitive=False): + # :api + assert dnf.util.is_string_type(pattern) + return _by_pattern(pattern, case_sensitive, self.categories) + + def categories_iter(self): + # :api + return (self._build_category(c) for c in self._i.categories) + + @property + def environments(self): + # :api + return sorted(self.environments_iter(), key=_fn_display_order) + + def _environment_by_id(self, id): + assert dnf.util.is_string_type(id) + return dnf.util.first(g for g in self.environments_iter() if g.id == id) + + def environment_by_pattern(self, pattern, case_sensitive=False): + # :api + assert dnf.util.is_string_type(pattern) + envs = self.environments_by_pattern(pattern, case_sensitive) + return _first_if_iterable(envs) + + def environments_by_pattern(self, pattern, case_sensitive=False): + # :api + assert dnf.util.is_string_type(pattern) + envs = list(self.environments_iter()) + found_envs = _by_pattern(pattern, case_sensitive, envs) + return sorted(found_envs, key=_fn_display_order) + + def environments_iter(self): + # :api + return (self._build_environment(e) for e in self._i.environments) + + @property + def groups(self): + # :api + return sorted(self.groups_iter(), key=_fn_display_order) + + def _group_by_id(self, id_): + assert dnf.util.is_string_type(id_) + return dnf.util.first(g for g in self.groups_iter() if g.id == id_) + + def group_by_pattern(self, pattern, case_sensitive=False): + # :api + assert dnf.util.is_string_type(pattern) + grps = self.groups_by_pattern(pattern, case_sensitive) + return _first_if_iterable(grps) + + def groups_by_pattern(self, pattern, case_sensitive=False): + # :api + assert dnf.util.is_string_type(pattern) + grps = _by_pattern(pattern, case_sensitive, list(self.groups_iter())) + return sorted(grps, key=_fn_display_order) + + def groups_iter(self): + # :api + return (self._build_group(g) for g in self._i.groups) + +class CompsTransPkg(object): + def __init__(self, pkg_or_name): + if dnf.util.is_string_type(pkg_or_name): + # from package name + self.basearchonly = False + self.name = pkg_or_name + self.optional = True + self.requires = None + elif isinstance(pkg_or_name, libdnf.transaction.CompsGroupPackage): + # from swdb package + # TODO: + self.basearchonly = False + # self.basearchonly = pkg_or_name.basearchonly + self.name = pkg_or_name.getName() + self.optional = pkg_or_name.getPackageType() & libcomps.PACKAGE_TYPE_OPTIONAL + # TODO: + self.requires = None + # self.requires = pkg_or_name.requires + else: + # from comps package + self.basearchonly = pkg_or_name.basearchonly + self.name = pkg_or_name.name + self.optional = pkg_or_name.type & libcomps.PACKAGE_TYPE_OPTIONAL + self.requires = pkg_or_name.requires + + def __eq__(self, other): + return (self.name == other.name and + self.basearchonly == self.basearchonly and + self.optional == self.optional and + self.requires == self.requires) + + def __str__(self): + return self.name + + def __hash__(self): + return hash((self.name, + self.basearchonly, + self.optional, + self.requires)) + +class TransactionBunch(object): + def __init__(self): + self._install = set() + self._install_opt = set() + self._remove = set() + self._upgrade = set() + + def __iadd__(self, other): + self._install.update(other._install) + self._install_opt.update(other._install_opt) + self._upgrade.update(other._upgrade) + self._remove = (self._remove | other._remove) - \ + self._install - self._install_opt - self._upgrade + return self + + def __len__(self): + return len(self.install) + len(self.install_opt) + len(self.upgrade) + len(self.remove) + + @staticmethod + def _set_value(param, val): + for item in val: + if isinstance(item, CompsTransPkg): + param.add(item) + else: + param.add(CompsTransPkg(item)) + + @property + def install(self): + """ + Packages to be installed with strict=True - transaction will + fail if they cannot be installed due to dependency errors etc. + """ + return self._install + + @install.setter + def install(self, value): + self._set_value(self._install, value) + + @property + def install_opt(self): + """ + Packages to be installed with strict=False - they will be + skipped if they cannot be installed + """ + return self._install_opt + + @install_opt.setter + def install_opt(self, value): + self._set_value(self._install_opt, value) + + @property + def remove(self): + return self._remove + + @remove.setter + def remove(self, value): + self._set_value(self._remove, value) + + @property + def upgrade(self): + return self._upgrade + + @upgrade.setter + def upgrade(self, value): + self._set_value(self._upgrade, value) + + +class Solver(object): + def __init__(self, history, comps, reason_fn): + self.history = history + self.comps = comps + self._reason_fn = reason_fn + + @staticmethod + def _mandatory_group_set(env): + return {grp.id for grp in env.mandatory_groups} + + @staticmethod + def _full_package_set(grp): + return {pkg.getName() for pkg in grp.mandatory_packages + + grp.default_packages + grp.optional_packages + + grp.conditional_packages} + + @staticmethod + def _pkgs_of_type(group, pkg_types, exclude=[]): + def filter(pkgs): + return [pkg for pkg in pkgs + if pkg.name not in exclude] + + pkgs = set() + if pkg_types & MANDATORY: + pkgs.update(filter(group.mandatory_packages)) + if pkg_types & DEFAULT: + pkgs.update(filter(group.default_packages)) + if pkg_types & OPTIONAL: + pkgs.update(filter(group.optional_packages)) + if pkg_types & CONDITIONAL: + pkgs.update(filter(group.conditional_packages)) + return pkgs + + def _removable_pkg(self, pkg_name): + assert dnf.util.is_string_type(pkg_name) + return self.history.group.is_removable_pkg(pkg_name) + + def _removable_grp(self, group_id): + assert dnf.util.is_string_type(group_id) + return self.history.env.is_removable_group(group_id) + + def _environment_install(self, env_id, pkg_types, exclude, strict=True, exclude_groups=None): + assert dnf.util.is_string_type(env_id) + comps_env = self.comps._environment_by_id(env_id) + swdb_env = self.history.env.new(env_id, comps_env.name, comps_env.ui_name, pkg_types) + self.history.env.install(swdb_env) + + trans = TransactionBunch() + for comps_group in comps_env.mandatory_groups: + if exclude_groups and comps_group.id in exclude_groups: + continue + trans += self._group_install(comps_group.id, pkg_types, exclude, strict) + swdb_env.addGroup(comps_group.id, True, MANDATORY) + + for comps_group in comps_env.optional_groups: + if exclude_groups and comps_group.id in exclude_groups: + continue + swdb_env.addGroup(comps_group.id, False, OPTIONAL) + # TODO: if a group is already installed, mark it as installed? + return trans + + def _environment_remove(self, env_id): + assert dnf.util.is_string_type(env_id) is True + swdb_env = self.history.env.get(env_id) + if not swdb_env: + raise CompsError(_("Environment '%s' is not installed.") % env_id) + + self.history.env.remove(swdb_env) + + trans = TransactionBunch() + group_ids = set([i.getGroupId() for i in swdb_env.getGroups()]) + for group_id in group_ids: + if not self._removable_grp(group_id): + continue + trans += self._group_remove(group_id) + return trans + + def _environment_upgrade(self, env_id): + assert dnf.util.is_string_type(env_id) + comps_env = self.comps._environment_by_id(env_id) + swdb_env = self.history.env.get(comps_env.id) + if not swdb_env: + raise CompsError(_("Environment '%s' is not installed.") % env_id) + if not comps_env: + raise CompsError(_("Environment '%s' is not available.") % env_id) + + old_set = set([i.getGroupId() for i in swdb_env.getGroups() if i.getInstalled()]) + pkg_types = swdb_env.getPackageTypes() + + # create a new record for current transaction + swdb_env = self.history.env.new(comps_env.id, comps_env.name, comps_env.ui_name, pkg_types) + + trans = TransactionBunch() + for comps_group in comps_env.mandatory_groups: + if comps_group.id in old_set: + # upgrade existing group + trans += self._group_upgrade(comps_group.id) + else: + # install new group + trans += self._group_install(comps_group.id, pkg_types) + swdb_env.addGroup(comps_group.id, True, MANDATORY) + + for comps_group in comps_env.optional_groups: + swdb_env.addGroup(comps_group.id, False, OPTIONAL) + # TODO: if a group is already installed, mark it as installed? + self.history.env.upgrade(swdb_env) + return trans + + def _group_install(self, group_id, pkg_types, exclude=None, strict=True, exclude_groups=None): + assert dnf.util.is_string_type(group_id) + comps_group = self.comps._group_by_id(group_id) + if not comps_group: + raise ValueError(_("Group_id '%s' does not exist.") % ucd(group_id)) + + swdb_group = self.history.group.new(group_id, comps_group.name, comps_group.ui_name, pkg_types) + for i in comps_group.packages_iter(): + swdb_group.addPackage(i.name, False, i.type) + self.history.group.install(swdb_group) + + trans = TransactionBunch() + # TODO: remove exclude + if strict: + trans.install.update(self._pkgs_of_type(comps_group, pkg_types, exclude=[])) + else: + trans.install_opt.update(self._pkgs_of_type(comps_group, pkg_types, exclude=[])) + return trans + + def _group_remove(self, group_id): + assert dnf.util.is_string_type(group_id) + swdb_group = self.history.group.get(group_id) + self.history.group.remove(swdb_group) + + trans = TransactionBunch() + trans.remove = {pkg for pkg in swdb_group.getPackages() if self._removable_pkg(pkg.getName())} + return trans + + def _group_upgrade(self, group_id): + assert dnf.util.is_string_type(group_id) + comps_group = self.comps._group_by_id(group_id) + swdb_group = self.history.group.get(group_id) + exclude = [] + + if not swdb_group: + argument = comps_group.ui_name if comps_group else group_id + raise CompsError(_("Module or Group '%s' is not installed.") % argument) + if not comps_group: + raise CompsError(_("Module or Group '%s' is not available.") % group_id) + pkg_types = swdb_group.getPackageTypes() + old_set = set([i.getName() for i in swdb_group.getPackages()]) + new_set = self._pkgs_of_type(comps_group, pkg_types, exclude) + + # create a new record for current transaction + swdb_group = self.history.group.new(group_id, comps_group.name, comps_group.ui_name, pkg_types) + for i in comps_group.packages_iter(): + swdb_group.addPackage(i.name, False, i.type) + self.history.group.upgrade(swdb_group) + + trans = TransactionBunch() + trans.install = {pkg for pkg in new_set if pkg.name not in old_set} + trans.remove = {name for name in old_set + if name not in [pkg.name for pkg in new_set]} + trans.upgrade = {pkg for pkg in new_set if pkg.name in old_set} + return trans + + def _exclude_packages_from_installed_groups(self, base): + for group in self.persistor.groups: + p_grp = self.persistor.group(group) + if p_grp.installed: + installed_pkg_names = \ + set(p_grp.full_list) - set(p_grp.pkg_exclude) + installed_pkgs = base.sack.query().installed().filterm(name=installed_pkg_names) + for pkg in installed_pkgs: + base._goal.install(pkg) diff --git a/dnf/conf/CMakeLists.txt b/dnf/conf/CMakeLists.txt new file mode 100644 index 0000000..7ea4513 --- /dev/null +++ b/dnf/conf/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB conf *.py) +INSTALL (FILES ${conf} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/conf) diff --git a/dnf/conf/__init__.py b/dnf/conf/__init__.py new file mode 100644 index 0000000..77ba8ab --- /dev/null +++ b/dnf/conf/__init__.py @@ -0,0 +1,46 @@ +# conf.py +# dnf configuration classes. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + + +""" +The configuration classes and routines in yum are splattered over too many +places, hard to change and debug. The new structure here will replace that. Its +goal is to: + +* accept configuration options from all three sources (the main config file, + repo config files, command line switches) +* handle all the logic of storing those and producing related values. +* returning configuration values. +* optionally: asserting no value is overridden once it has been applied + somewhere (e.g. do not let a new repo be initialized with different global + cache path than an already existing one). + +""" + +from __future__ import absolute_import +from __future__ import unicode_literals + +from dnf.conf.config import PRIO_DEFAULT, PRIO_MAINCONFIG, PRIO_AUTOMATICCONFIG +from dnf.conf.config import PRIO_REPOCONFIG, PRIO_PLUGINDEFAULT, PRIO_PLUGINCONFIG +from dnf.conf.config import PRIO_COMMANDLINE, PRIO_RUNTIME + +from dnf.conf.config import BaseConfig, MainConf, RepoConf + +Conf = MainConf diff --git a/dnf/conf/config.py b/dnf/conf/config.py new file mode 100644 index 0000000..3cb561b --- /dev/null +++ b/dnf/conf/config.py @@ -0,0 +1,505 @@ +# dnf configuration classes. +# +# Copyright (C) 2016-2017 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +from dnf.yum import misc +from dnf.i18n import ucd, _ +from dnf.pycomp import basestring, urlparse + +import fnmatch +import dnf.conf.substitutions +import dnf.const +import dnf.exceptions +import dnf.pycomp +import dnf.util +import hawkey +import logging +import os +import libdnf.conf +import libdnf.repo +import tempfile + +PRIO_EMPTY = libdnf.conf.Option.Priority_EMPTY +PRIO_DEFAULT = libdnf.conf.Option.Priority_DEFAULT +PRIO_MAINCONFIG = libdnf.conf.Option.Priority_MAINCONFIG +PRIO_AUTOMATICCONFIG = libdnf.conf.Option.Priority_AUTOMATICCONFIG +PRIO_REPOCONFIG = libdnf.conf.Option.Priority_REPOCONFIG +PRIO_PLUGINDEFAULT = libdnf.conf.Option.Priority_PLUGINDEFAULT +PRIO_PLUGINCONFIG = libdnf.conf.Option.Priority_PLUGINCONFIG +PRIO_COMMANDLINE = libdnf.conf.Option.Priority_COMMANDLINE +PRIO_RUNTIME = libdnf.conf.Option.Priority_RUNTIME + +logger = logging.getLogger('dnf') + + +class BaseConfig(object): + """Base class for storing configuration definitions. + + Subclass when creating your own definitions. + + """ + + def __init__(self, config=None, section=None, parser=None): + self.__dict__["_config"] = config + self._section = section + + def __getattr__(self, name): + if "_config" not in self.__dict__: + raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__, name)) + option = getattr(self._config, name) + if option is None: + return None + try: + value = option().getValue() + except Exception as ex: + return None + if isinstance(value, str): + return ucd(value) + return value + + def __setattr__(self, name, value): + option = getattr(self._config, name, None) + if option is None: + # unknown config option, store to BaseConfig only + return super(BaseConfig, self).__setattr__(name, value) + self._set_value(name, value, PRIO_RUNTIME) + + def __str__(self): + out = [] + out.append('[%s]' % self._section) + if self._config: + for optBind in self._config.optBinds(): + try: + value = optBind.second.getValueString() + except RuntimeError: + value = "" + out.append('%s: %s' % (optBind.first, value)) + return '\n'.join(out) + + def _has_option(self, name): + method = getattr(self._config, name, None) + return method is not None + + def _get_value(self, name): + method = getattr(self._config, name, None) + if method is None: + return None + return method().getValue() + + def _get_priority(self, name): + method = getattr(self._config, name, None) + if method is None: + return None + return method().getPriority() + + def _set_value(self, name, value, priority=PRIO_RUNTIME): + """Set option's value if priority is equal or higher + than current priority.""" + method = getattr(self._config, name, None) + if method is None: + raise Exception("Option \"" + name + "\" does not exists") + option = method() + if value is None: + try: + option.set(priority, value) + except Exception: + pass + else: + try: + if isinstance(value, list) or isinstance(value, tuple): + option.set(priority, libdnf.conf.VectorString(value)) + elif (isinstance(option, libdnf.conf.OptionBool) + or isinstance(option, libdnf.conf.OptionChildBool) + ) and isinstance(value, int): + option.set(priority, bool(value)) + else: + option.set(priority, value) + except RuntimeError as e: + raise dnf.exceptions.ConfigError(_("Error parsing '%s': %s") + % (value, str(e)), + raw_error=str(e)) + + def _populate(self, parser, section, filename, priority=PRIO_DEFAULT): + """Set option values from an INI file section.""" + if parser.hasSection(section): + for name in parser.options(section): + value = parser.getSubstitutedValue(section, name) + if not value or value == 'None': + value = '' + if hasattr(self._config, name): + try: + self._config.optBinds().at(name).newString(priority, value) + except RuntimeError as e: + logger.debug(_('Unknown configuration value: %s=%s in %s; %s'), + ucd(name), ucd(value), ucd(filename), str(e)) + else: + if name == 'arch' and hasattr(self, name): + setattr(self, name, value) + else: + logger.debug( + _('Unknown configuration option: %s = %s in %s'), + ucd(name), ucd(value), ucd(filename)) + + def dump(self): + # :api + """Return a string representing the values of all the + configuration options. + """ + output = ['[%s]' % self._section] + + if self._config: + for optBind in self._config.optBinds(): + # if not opt._is_runtimeonly(): + try: + output.append('%s = %s' % (optBind.first, optBind.second.getValueString())) + except RuntimeError: + pass + + return '\n'.join(output) + '\n' + + @staticmethod + def write_raw_configfile(filename, section_id, substitutions, modify): + # :api + """ + filename - name of config file (.conf or .repo) + section_id - id of modified section (e.g. main, fedora, updates) + substitutions - instance of base.conf.substitutions + modify - dict of modified options + """ + parser = libdnf.conf.ConfigParser() + parser.read(filename) + + # b/c repoids can have $values in them we need to map both ways to figure + # out which one is which + if not parser.hasSection(section_id): + for sect in parser.getData(): + if libdnf.conf.ConfigParser.substitute(sect, substitutions) == section_id: + section_id = sect + + for name, value in modify.items(): + if isinstance(value, list): + value = ' '.join(value) + parser.setValue(section_id, name, value) + + parser.write(filename, False) + + +class MainConf(BaseConfig): + # :api + """Configuration option definitions for dnf.conf's [main] section.""" + def __init__(self, section='main', parser=None): + # pylint: disable=R0915 + config = libdnf.conf.ConfigMain() + super(MainConf, self).__init__(config, section, parser) + self._set_value('pluginpath', [dnf.const.PLUGINPATH], PRIO_DEFAULT) + self._set_value('pluginconfpath', [dnf.const.PLUGINCONFPATH], PRIO_DEFAULT) + self.substitutions = dnf.conf.substitutions.Substitutions() + self.arch = hawkey.detect_arch() + self._config.system_cachedir().set(PRIO_DEFAULT, dnf.const.SYSTEM_CACHEDIR) + + # setup different cache and log for non-privileged users + if dnf.util.am_i_root(): + cachedir = dnf.const.SYSTEM_CACHEDIR + logdir = '/var/log' + else: + try: + cachedir = logdir = misc.getCacheDir() + except (IOError, OSError) as e: + msg = _('Could not set cachedir: {}').format(ucd(e)) + raise dnf.exceptions.Error(msg) + + self._config.cachedir().set(PRIO_DEFAULT, cachedir) + self._config.logdir().set(PRIO_DEFAULT, logdir) + + # track list of temporary files created + self.tempfiles = [] + + def __del__(self): + for file_name in self.tempfiles: + os.unlink(file_name) + + @property + def get_reposdir(self): + # :api + """Returns the value of reposdir""" + myrepodir = None + # put repo file into first reposdir which exists or create it + for rdir in self._get_value('reposdir'): + if os.path.exists(rdir): + myrepodir = rdir + break + + if not myrepodir: + myrepodir = self._get_value('reposdir')[0] + dnf.util.ensure_dir(myrepodir) + return myrepodir + + def _check_remote_file(self, optname): + """ + In case the option value is a remote URL, download it to the temporary location + and use this temporary file instead. + """ + prio = self._get_priority(optname) + val = self._get_value(optname) + if isinstance(val, basestring): + location = urlparse.urlparse(val) + if location[0] in ('file', ''): + # just strip the file:// prefix + self._set_value(optname, location.path, prio) + else: + downloader = libdnf.repo.Downloader() + temp_fd, temp_path = tempfile.mkstemp(prefix='dnf-downloaded-config-') + self.tempfiles.append(temp_path) + try: + downloader.downloadURL(None, val, temp_fd) + except RuntimeError as e: + raise dnf.exceptions.ConfigError( + _('Configuration file URL "{}" could not be downloaded:\n' + ' {}').format(val, str(e))) + else: + self._set_value(optname, temp_path, prio) + finally: + os.close(temp_fd) + + def _search_inside_installroot(self, optname): + """ + Return root used as prefix for option (installroot or "/"). When specified from commandline + it returns value from conf.installroot + """ + installroot = self._get_value('installroot') + if installroot == "/": + return installroot + prio = self._get_priority(optname) + # don't modify paths specified on commandline + if prio >= PRIO_COMMANDLINE: + return installroot + val = self._get_value(optname) + # if it exists inside installroot use it (i.e. adjust configuration) + # for lists any component counts + if not isinstance(val, str): + if any(os.path.exists(os.path.join(installroot, p.lstrip('/'))) for p in val): + self._set_value( + optname, + libdnf.conf.VectorString([self._prepend_installroot_path(p) for p in val]), + prio + ) + return installroot + elif os.path.exists(os.path.join(installroot, val.lstrip('/'))): + self._set_value(optname, self._prepend_installroot_path(val), prio) + return installroot + return "/" + + def prepend_installroot(self, optname): + # :api + prio = self._get_priority(optname) + new_path = self._prepend_installroot_path(self._get_value(optname)) + self._set_value(optname, new_path, prio) + + def _prepend_installroot_path(self, path): + root_path = os.path.join(self._get_value('installroot'), path.lstrip('/')) + return libdnf.conf.ConfigParser.substitute(root_path, self.substitutions) + + def _configure_from_options(self, opts): + """Configure parts of CLI from the opts """ + config_args = ['plugins', 'version', 'config_file_path', + 'debuglevel', 'errorlevel', 'installroot', + 'best', 'assumeyes', 'assumeno', 'clean_requirements_on_remove', 'gpgcheck', + 'showdupesfromrepos', 'plugins', 'ip_resolve', + 'rpmverbosity', 'disable_excludes', 'color', + 'downloadonly', 'exclude', 'excludepkgs', 'skip_broken', + 'tsflags', 'arch', 'basearch', 'ignorearch', 'cacheonly', 'comment'] + + for name in config_args: + value = getattr(opts, name, None) + if value is not None and value != []: + if self._has_option(name): + appendValue = False + if self._config: + try: + appendValue = self._config.optBinds().at(name).getAddValue() + except RuntimeError: + # fails if option with "name" does not exist in _config (libdnf) + pass + if appendValue: + add_priority = dnf.conf.PRIO_COMMANDLINE + if add_priority < self._get_priority(name): + add_priority = self._get_priority(name) + for item in value: + if item: + self._set_value(name, self._get_value(name) + [item], add_priority) + else: + self._set_value(name, [], dnf.conf.PRIO_COMMANDLINE) + else: + self._set_value(name, value, dnf.conf.PRIO_COMMANDLINE) + elif hasattr(self, name): + setattr(self, name, value) + else: + logger.warning(_('Unknown configuration option: %s = %s'), + ucd(name), ucd(value)) + + if getattr(opts, 'gpgcheck', None) is False: + self._set_value("localpkg_gpgcheck", False, dnf.conf.PRIO_COMMANDLINE) + + if hasattr(opts, 'main_setopts'): + # now set all the non-first-start opts from main from our setopts + # pylint: disable=W0212 + for name, values in opts.main_setopts.items(): + for val in values: + if hasattr(self._config, name): + try: + # values in main_setopts are strings, try to parse it using newString() + self._config.optBinds().at(name).newString(PRIO_COMMANDLINE, val) + except RuntimeError as e: + raise dnf.exceptions.ConfigError( + _("Error parsing --setopt with key '%s', value '%s': %s") + % (name, val, str(e)), raw_error=str(e)) + else: + # if config option with "name" doesn't exist in _config, it could be defined + # only in Python layer + if hasattr(self, name): + setattr(self, name, val) + else: + msg = _("Main config did not have a %s attr. before setopt") + logger.warning(msg, name) + + def exclude_pkgs(self, pkgs): + # :api + name = "excludepkgs" + + if pkgs is not None and pkgs != []: + if self._has_option(name): + self._set_value(name, pkgs, dnf.conf.PRIO_COMMANDLINE) + else: + logger.warning(_('Unknown configuration option: %s = %s'), + ucd(name), ucd(pkgs)) + + def _adjust_conf_options(self): + """Adjust conf options interactions""" + + skip_broken_val = self._get_value('skip_broken') + if skip_broken_val: + self._set_value('strict', not skip_broken_val, self._get_priority('skip_broken')) + + @property + def releasever(self): + # :api + return self.substitutions.get('releasever') + + @releasever.setter + def releasever(self, val): + # :api + if val is None: + self.substitutions.pop('releasever', None) + return + self.substitutions['releasever'] = str(val) + + @property + def arch(self): + # :api + return self.substitutions.get('arch') + + @arch.setter + def arch(self, val): + # :api + + if val is None: + self.substitutions.pop('arch', None) + return + if val not in dnf.rpm._BASEARCH_MAP.keys(): + msg = _('Incorrect or unknown "{}": {}') + raise dnf.exceptions.Error(msg.format("arch", val)) + self.substitutions['arch'] = val + self.basearch = dnf.rpm.basearch(val) + + @property + def basearch(self): + # :api + return self.substitutions.get('basearch') + + @basearch.setter + def basearch(self, val): + # :api + + if val is None: + self.substitutions.pop('basearch', None) + return + if val not in dnf.rpm._BASEARCH_MAP.values(): + msg = _('Incorrect or unknown "{}": {}') + raise dnf.exceptions.Error(msg.format("basearch", val)) + self.substitutions['basearch'] = val + + def read(self, filename=None, priority=PRIO_DEFAULT): + # :api + if filename is None: + filename = self._get_value('config_file_path') + parser = libdnf.conf.ConfigParser() + try: + parser.read(filename) + except RuntimeError as e: + raise dnf.exceptions.ConfigError(_('Parsing file "%s" failed: %s') % (filename, e)) + except IOError as e: + logger.warning(e) + self._populate(parser, self._section, filename, priority) + + # update to where we read the file from + self._set_value('config_file_path', filename, priority) + + @property + def verbose(self): + return self._get_value('debuglevel') >= dnf.const.VERBOSE_LEVEL + + +class RepoConf(BaseConfig): + """Option definitions for repository INI file sections.""" + + def __init__(self, parent, section=None, parser=None): + masterConfig = parent._config if parent else libdnf.conf.ConfigMain() + super(RepoConf, self).__init__(libdnf.conf.ConfigRepo(masterConfig), section, parser) + # Do not remove! Attribute is a reference holder. + # Prevents premature removal of the masterConfig. The libdnf ConfigRepo points to it. + self._masterConfigRefHolder = masterConfig + if section: + self._config.name().set(PRIO_DEFAULT, section) + + def _configure_from_options(self, opts): + """Configure repos from the opts. """ + + if getattr(opts, 'gpgcheck', None) is False: + for optname in ['gpgcheck', 'repo_gpgcheck']: + self._set_value(optname, False, dnf.conf.PRIO_COMMANDLINE) + + repo_setopts = getattr(opts, 'repo_setopts', {}) + for repoid, setopts in repo_setopts.items(): + if not fnmatch.fnmatch(self._section, repoid): + continue + for name, values in setopts.items(): + for val in values: + if hasattr(self._config, name): + try: + # values in repo_setopts are strings, try to parse it using newString() + self._config.optBinds().at(name).newString(PRIO_COMMANDLINE, val) + except RuntimeError as e: + raise dnf.exceptions.ConfigError( + _("Error parsing --setopt with key '%s.%s', value '%s': %s") + % (self._section, name, val, str(e)), raw_error=str(e)) + else: + msg = _("Repo %s did not have a %s attr. before setopt") + logger.warning(msg, self._section, name) diff --git a/dnf/conf/read.py b/dnf/conf/read.py new file mode 100644 index 0000000..a526a71 --- /dev/null +++ b/dnf/conf/read.py @@ -0,0 +1,110 @@ +# read.py +# Reading configuration from files. +# +# Copyright (C) 2014-2017 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _, ucd +import dnf.conf +import libdnf.conf +import dnf.exceptions +import dnf.repo +import glob +import logging + +logger = logging.getLogger('dnf') + + +class RepoReader(object): + def __init__(self, conf, opts): + self.conf = conf + self.opts = opts + + def __iter__(self): + # get the repos from the main yum.conf file + for r in self._get_repos(self.conf.config_file_path): + yield r + + # read .repo files from directories specified by conf.reposdir + for repofn in (repofn for reposdir in self.conf.reposdir + for repofn in sorted(glob.glob('%s/*.repo' % reposdir))): + try: + for r in self._get_repos(repofn): + yield r + except dnf.exceptions.ConfigError: + logger.warning(_("Warning: failed loading '%s', skipping."), + repofn) + + def _build_repo(self, parser, id_, repofn): + """Build a repository using the parsed data.""" + + repo = dnf.repo.Repo(id_, self.conf) + try: + repo._populate(parser, id_, repofn, dnf.conf.PRIO_REPOCONFIG) + except ValueError as e: + msg = _("Repository '%s': Error parsing config: %s") % (id_, e) + raise dnf.exceptions.ConfigError(msg) + + # Ensure that the repo name is set + if repo._get_priority('name') == dnf.conf.PRIO_DEFAULT: + msg = _("Repository '%s' is missing name in configuration, using id.") + logger.warning(msg, id_) + repo.name = ucd(repo.name) + repo._substitutions.update(self.conf.substitutions) + repo.cfg = parser + + return repo + + def _get_repos(self, repofn): + """Parse and yield all repositories from a config file.""" + + substs = self.conf.substitutions + parser = libdnf.conf.ConfigParser() + parser.setSubstitutions(substs) + try: + parser.read(repofn) + except RuntimeError as e: + raise dnf.exceptions.ConfigError(_('Parsing file "%s" failed: %s') % (repofn, e)) + except IOError as e: + logger.warning(e) + + # Check sections in the .repo file that was just slurped up + for section in parser.getData(): + + if section == 'main': + continue + + # Check the repo.id against the valid chars + invalid = dnf.repo.repo_id_invalid(section) + if invalid is not None: + logger.warning(_("Bad id for repo: %s, byte = %s %d"), section, + section[invalid], invalid) + continue + + try: + thisrepo = self._build_repo(parser, ucd(section), repofn) + except (dnf.exceptions.RepoError, dnf.exceptions.ConfigError) as e: + logger.warning(e) + continue + else: + thisrepo.repofile = repofn + + thisrepo._configure_from_options(self.opts) + + yield thisrepo diff --git a/dnf/conf/substitutions.py b/dnf/conf/substitutions.py new file mode 100644 index 0000000..703e4a4 --- /dev/null +++ b/dnf/conf/substitutions.py @@ -0,0 +1,64 @@ +# substitutions.py +# Config file substitutions. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +import os +import re + +import dnf +import dnf.exceptions + +ENVIRONMENT_VARS_RE = re.compile(r'^DNF_VAR_[A-Za-z0-9_]+$') + +class Substitutions(dict): + # :api + + def __init__(self): + super(Substitutions, self).__init__() + self._update_from_env() + + def _update_from_env(self): + numericvars = ['DNF%d' % num for num in range(0, 10)] + for key, val in os.environ.items(): + if ENVIRONMENT_VARS_RE.match(key): + self[key[8:]] = val # remove "DNF_VAR_" prefix + elif key in numericvars: + self[key] = val + + def update_from_etc(self, installroot, varsdir=("/etc/yum/vars/", "/etc/dnf/vars/")): + # :api + + for vars_path in varsdir: + fsvars = [] + try: + dir_fsvars = os.path.join(installroot, vars_path.lstrip('/')) + fsvars = os.listdir(dir_fsvars) + except OSError: + continue + for fsvar in fsvars: + filepath = os.path.join(dir_fsvars, fsvar) + if os.path.isfile(filepath): + try: + with open(filepath) as fp: + val = fp.readline() + if val and val[-1] == '\n': + val = val[:-1] + except (OSError, IOError): + continue + self[fsvar] = val diff --git a/dnf/const.py.in b/dnf/const.py.in new file mode 100644 index 0000000..4ef2613 --- /dev/null +++ b/dnf/const.py.in @@ -0,0 +1,58 @@ +# const.py +# dnf constants. +# +# Copyright (C) 2012-2015 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals +import distutils.sysconfig + +CONF_FILENAME='/etc/dnf/dnf.conf' # :api +CONF_AUTOMATIC_FILENAME='/etc/dnf/automatic.conf' +DISTROVERPKG=('system-release(releasever)', 'system-release', + 'distribution-release(releasever)', 'distribution-release', + 'redhat-release', 'suse-release') +GROUP_PACKAGE_TYPES = ('mandatory', 'default', 'conditional') # :api +INSTALLONLYPKGS=['kernel', 'kernel-PAE', + 'installonlypkg(kernel)', + 'installonlypkg(kernel-module)', + 'installonlypkg(vm)', + 'multiversion(kernel)'] +LOG='dnf.log' +LOG_HAWKEY='hawkey.log' +LOG_LIBREPO='dnf.librepo.log' +LOG_MARKER='--- logging initialized ---' +LOG_RPM='dnf.rpm.log' +NAME='DNF' +PERSISTDIR='/var/lib/dnf' # :api +PID_FILENAME = '/var/run/dnf.pid' +RUNDIR='/run' +USER_RUNDIR='/run/user' +SYSTEM_CACHEDIR='/var/cache/dnf' +TMPDIR='/var/tmp/' +# CLI verbose values greater or equal to this are considered "verbose": +VERBOSE_LEVEL=6 + +PREFIX=NAME.lower() +PROGRAM_NAME=NAME.lower() # Deprecated - no longer used, Argparser prints program name based on sys.argv +PLUGINCONFPATH = '/etc/dnf/plugins' # :api +PLUGINPATH = '%s/dnf-plugins' % distutils.sysconfig.get_python_lib() +VERSION='@DNF_VERSION@' +USER_AGENT = "dnf/%s" % VERSION + +BUGTRACKER_COMPONENT=NAME.lower() +BUGTRACKER='https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora&component=%s' % BUGTRACKER_COMPONENT diff --git a/dnf/crypto.py b/dnf/crypto.py new file mode 100644 index 0000000..7bda414 --- /dev/null +++ b/dnf/crypto.py @@ -0,0 +1,199 @@ +# crypto.py +# Keys and signatures. +# +# Copyright (C) 2014 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ +import contextlib +import dnf.pycomp +import dnf.util +import dnf.yum.misc +import io +import logging +import os +import tempfile + +try: + from gpg import Context + from gpg import Data +except ImportError: + import gpgme + + + class Context(object): + def __init__(self): + self.__dict__["ctx"] = gpgme.Context() + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + pass + + @property + def armor(self): + return self.ctx.armor + + @armor.setter + def armor(self, value): + self.ctx.armor = value + + def op_import(self, key_fo): + if isinstance(key_fo, basestring): + key_fo = io.BytesIO(key_fo) + self.ctx.import_(key_fo) + + def op_export(self, pattern, mode, keydata): + self.ctx.export(pattern, keydata) + + def __getattr__(self, name): + return getattr(self.ctx, name) + + + class Data(object): + def __init__(self): + self.__dict__["buf"] = io.BytesIO() + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + pass + + def read(self): + return self.buf.getvalue() + + def __getattr__(self, name): + return getattr(self.buf, name) + + +GPG_HOME_ENV = 'GNUPGHOME' +logger = logging.getLogger('dnf') + + +def _extract_signing_subkey(key): + return dnf.util.first(subkey for subkey in key.subkeys if subkey.can_sign) + + +def _printable_fingerprint(fpr_hex): + segments = (fpr_hex[i:i + 4] for i in range(0, len(fpr_hex), 4)) + return " ".join(segments) + + +def import_repo_keys(repo): + gpgdir = repo._pubring_dir + known_keys = keyids_from_pubring(gpgdir) + for keyurl in repo.gpgkey: + for keyinfo in retrieve(keyurl, repo): + keyid = keyinfo.id_ + if keyid in known_keys: + logger.debug(_('repo %s: 0x%s already imported'), repo.id, keyid) + continue + if not repo._key_import._confirm(keyinfo): + continue + dnf.yum.misc.import_key_to_pubring( + keyinfo.raw_key, keyinfo.short_id, gpgdir=gpgdir, + make_ro_copy=False) + logger.debug(_('repo %s: imported key 0x%s.'), repo.id, keyid) + + +def keyids_from_pubring(gpgdir): + if not os.path.exists(gpgdir): + return [] + + with pubring_dir(gpgdir), Context() as ctx: + keyids = [] + for k in ctx.keylist(): + subkey = _extract_signing_subkey(k) + if subkey is not None: + keyids.append(subkey.keyid) + return keyids + + +def log_key_import(keyinfo): + msg = (_('Importing GPG key 0x%s:\n' + ' Userid : "%s"\n' + ' Fingerprint: %s\n' + ' From : %s') % + (keyinfo.short_id, keyinfo.userid, + _printable_fingerprint(keyinfo.fingerprint), + keyinfo.url.replace("file://", ""))) + logger.critical("%s", msg) + + +@contextlib.contextmanager +def pubring_dir(pubring_dir): + orig = os.environ.get(GPG_HOME_ENV, None) + os.environ[GPG_HOME_ENV] = pubring_dir + try: + yield + finally: + if orig is None: + del os.environ[GPG_HOME_ENV] + else: + os.environ[GPG_HOME_ENV] = orig + + +def rawkey2infos(key_fo): + pb_dir = tempfile.mkdtemp() + keyinfos = [] + with pubring_dir(pb_dir), Context() as ctx: + ctx.op_import(key_fo) + for key in ctx.keylist(): + subkey = _extract_signing_subkey(key) + if subkey is None: + continue + keyinfos.append(Key(key, subkey)) + ctx.armor = True + for info in keyinfos: + with Data() as sink: + ctx.op_export(info.id_, 0, sink) + sink.seek(0, os.SEEK_SET) + info.raw_key = sink.read() + dnf.util.rm_rf(pb_dir) + return keyinfos + + +def retrieve(keyurl, repo=None): + with dnf.util._urlopen(keyurl, repo=repo) as handle: + keyinfos = rawkey2infos(handle) + for keyinfo in keyinfos: + keyinfo.url = keyurl + return keyinfos + + +class Key(object): + def __init__(self, key, subkey): + self.id_ = subkey.keyid + self.fingerprint = subkey.fpr + self.raw_key = None + self.timestamp = subkey.timestamp + self.url = None + self.userid = key.uids[0].uid + + @property + def short_id(self): + rj = '0' if dnf.pycomp.PY3 else b'0' + return self.id_[-8:].rjust(8, rj) + + @property + def rpm_id(self): + return self.short_id.lower() diff --git a/dnf/db/CMakeLists.txt b/dnf/db/CMakeLists.txt new file mode 100644 index 0000000..2ee4e9f --- /dev/null +++ b/dnf/db/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB db_srcs *.py) +INSTALL (FILES ${db_srcs} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/db) diff --git a/dnf/db/__init__.py b/dnf/db/__init__.py new file mode 100644 index 0000000..05d6273 --- /dev/null +++ b/dnf/db/__init__.py @@ -0,0 +1,17 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# DNF database subpackage +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. diff --git a/dnf/db/group.py b/dnf/db/group.py new file mode 100644 index 0000000..e3a0877 --- /dev/null +++ b/dnf/db/group.py @@ -0,0 +1,378 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2017-2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# + + +import libdnf.transaction + +import dnf.db.history +import dnf.transaction +import dnf.exceptions +from dnf.i18n import _ +from dnf.util import logger + + +class PersistorBase(object): + def __init__(self, history): + assert isinstance(history, dnf.db.history.SwdbInterface), str(type(history)) + self.history = history + self._installed = {} + self._removed = {} + self._upgraded = {} + + def __len__(self): + return len(self._installed) + len(self._removed) + len(self._upgraded) + + def clean(self): + self._installed = {} + self._removed = {} + self._upgraded = {} + + def _get_obj_id(self, obj): + raise NotImplementedError + + def install(self, obj): + self._installed[self._get_obj_id(obj)] = obj + + def remove(self, obj): + self._removed[self._get_obj_id(obj)] = obj + + def upgrade(self, obj): + self._upgraded[self._get_obj_id(obj)] = obj + + def new(self, obj_id, name, translated_name, pkg_types): + raise NotImplementedError + + def get(self, obj_id): + raise NotImplementedError + + def search_by_pattern(self, pattern): + raise NotImplementedError + + +class GroupPersistor(PersistorBase): + + def __iter__(self): + items = self.history.swdb.getItems() + items = [i for i in items if i.getCompsGroupItem()] + return iter(items) + + def _get_obj_id(self, obj): + return obj.getGroupId() + + def new(self, obj_id, name, translated_name, pkg_types): + swdb_group = self.history.swdb.createCompsGroupItem() + swdb_group.setGroupId(obj_id) + swdb_group.setName(name) + swdb_group.setTranslatedName(translated_name) + swdb_group.setPackageTypes(pkg_types) + return swdb_group + + def get(self, obj_id): + swdb_group = self.history.swdb.getCompsGroupItem(obj_id) + if not swdb_group: + return None + swdb_group = swdb_group.getCompsGroupItem() + return swdb_group + + def search_by_pattern(self, pattern): + return self.history.swdb.getCompsGroupItemsByPattern(pattern) + + def get_package_groups(self, pkg_name): + return self.history.swdb.getPackageCompsGroups(pkg_name) + + def is_removable_pkg(self, pkg_name): + # for group removal and autoremove + reason = self.history.swdb.resolveRPMTransactionItemReason(pkg_name, "", -2) + if reason != libdnf.transaction.TransactionItemReason_GROUP: + return False + + # TODO: implement lastTransId == -2 in libdnf + package_groups = set(self.get_package_groups(pkg_name)) + for group_id, group in self._removed.items(): + for pkg in group.getPackages(): + if pkg.getName() != pkg_name: + continue + if not pkg.getInstalled(): + continue + package_groups.remove(group_id) + for group_id, group in self._installed.items(): + for pkg in group.getPackages(): + if pkg.getName() != pkg_name: + continue + if not pkg.getInstalled(): + continue + package_groups.add(group_id) + if package_groups: + return False + return True + + +class EnvironmentPersistor(PersistorBase): + + def __iter__(self): + items = self.history.swdb.getItems() + items = [i for i in items if i.getCompsEnvironmentItem()] + return iter(items) + + def _get_obj_id(self, obj): + return obj.getEnvironmentId() + + def new(self, obj_id, name, translated_name, pkg_types): + swdb_env = self.history.swdb.createCompsEnvironmentItem() + swdb_env.setEnvironmentId(obj_id) + swdb_env.setName(name) + swdb_env.setTranslatedName(translated_name) + swdb_env.setPackageTypes(pkg_types) + return swdb_env + + def get(self, obj_id): + swdb_env = self.history.swdb.getCompsEnvironmentItem(obj_id) + if not swdb_env: + return None + swdb_env = swdb_env.getCompsEnvironmentItem() + return swdb_env + + def search_by_pattern(self, pattern): + return self.history.swdb.getCompsEnvironmentItemsByPattern(pattern) + + def get_group_environments(self, group_id): + return self.history.swdb.getCompsGroupEnvironments(group_id) + + def is_removable_group(self, group_id): + # for environment removal + swdb_group = self.history.group.get(group_id) + if not swdb_group: + return False + + # TODO: implement lastTransId == -2 in libdnf + group_environments = set(self.get_group_environments(group_id)) + for env_id, env in self._removed.items(): + for group in env.getGroups(): + if group.getGroupId() != group_id: + continue + if not group.getInstalled(): + continue + group_environments.remove(env_id) + for env_id, env in self._installed.items(): + for group in env.getGroups(): + if group.getGroupId() != group_id: + continue + if not group.getInstalled(): + continue + group_environments.add(env_id) + if group_environments: + return False + return True + + +class RPMTransaction(object): + def __init__(self, history, transaction=None): + self.history = history + self.transaction = transaction + if not self.transaction: + try: + self.history.swdb.initTransaction() + except: + pass + self._swdb_ti_pkg = {} + + # TODO: close trans if needed + + def __iter__(self): + # :api + if self.transaction: + items = self.transaction.getItems() + else: + items = self.history.swdb.getItems() + items = [dnf.db.history.RPMTransactionItemWrapper(self.history, i) for i in items if i.getRPMItem()] + return iter(items) + + def __len__(self): + if self.transaction: + items = self.transaction.getItems() + else: + items = self.history.swdb.getItems() + items = [dnf.db.history.RPMTransactionItemWrapper(self.history, i) for i in items if i.getRPMItem()] + return len(items) + + def _pkg_to_swdb_rpm_item(self, pkg): + rpm_item = self.history.swdb.createRPMItem() + rpm_item.setName(pkg.name) + rpm_item.setEpoch(pkg.epoch or 0) + rpm_item.setVersion(pkg.version) + rpm_item.setRelease(pkg.release) + rpm_item.setArch(pkg.arch) + return rpm_item + + def new(self, pkg, action, reason=None, replaced_by=None): + rpm_item = self._pkg_to_swdb_rpm_item(pkg) + repoid = self.get_repoid(pkg) + if reason is None: + reason = self.get_reason(pkg) + result = self.history.swdb.addItem(rpm_item, repoid, action, reason) + if replaced_by: + result.addReplacedBy(replaced_by) + self._swdb_ti_pkg[result] = pkg + return result + + def get_repoid(self, pkg): + result = getattr(pkg, "_force_swdb_repoid", None) + if result: + return result + return pkg.reponame + + def get_reason(self, pkg): + """Get reason for package""" + return self.history.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1) + + def get_reason_name(self, pkg): + """Get reason for package""" + return libdnf.transaction.TransactionItemReasonToString(self.get_reason(pkg)) + + def _add_obsoleted(self, obsoleted, replaced_by=None): + obsoleted = obsoleted or [] + for obs in obsoleted: + ti = self.new(obs, libdnf.transaction.TransactionItemAction_OBSOLETED) + if replaced_by: + ti.addReplacedBy(replaced_by) + + def add_downgrade(self, new, old, obsoleted=None): + ti_new = self.new(new, libdnf.transaction.TransactionItemAction_DOWNGRADE) + ti_old = self.new(old, libdnf.transaction.TransactionItemAction_DOWNGRADED, replaced_by=ti_new) + self._add_obsoleted(obsoleted, replaced_by=ti_new) + + def add_erase(self, old, reason=None): + self.add_remove(old, reason) + + def add_install(self, new, obsoleted=None, reason=None): + reason = reason or libdnf.transaction.TransactionItemReason_USER + ti_new = self.new(new, libdnf.transaction.TransactionItemAction_INSTALL, reason) + self._add_obsoleted(obsoleted, replaced_by=ti_new) + + def add_reinstall(self, new, old, obsoleted=None): + ti_new = self.new(new, libdnf.transaction.TransactionItemAction_REINSTALL) + ti_old = self.new(old, libdnf.transaction.TransactionItemAction_REINSTALLED, replaced_by=ti_new) + self._add_obsoleted(obsoleted, replaced_by=ti_new) + + def add_remove(self, old, reason=None): + reason = reason or libdnf.transaction.TransactionItemReason_USER + ti_old = self.new(old, libdnf.transaction.TransactionItemAction_REMOVE, reason) + + def add_upgrade(self, new, old, obsoleted=None): + ti_new = self.new(new, libdnf.transaction.TransactionItemAction_UPGRADE) + ti_old = self.new(old, libdnf.transaction.TransactionItemAction_UPGRADED, replaced_by=ti_new) + self._add_obsoleted(obsoleted, replaced_by=ti_new) + + def _test_fail_safe(self, hdr, pkg): + if pkg._from_cmdline: + return 0 + if pkg.repo.module_hotfixes: + return 0 + try: + if hdr['modularitylabel'] and not pkg._is_in_active_module(): + logger.critical(_("No available modular metadata for modular package '{}', " + "it cannot be installed on the system").format(pkg)) + return 1 + except ValueError: + return 0 + return 0 + + def _populate_rpm_ts(self, ts): + """Populate the RPM transaction set.""" + modular_problems = 0 + + for tsi in self: + if tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADE: + hdr = tsi.pkg._header + modular_problems += self._test_fail_safe(hdr, tsi.pkg) + ts.addInstall(hdr, tsi, 'u') + elif tsi.action == libdnf.transaction.TransactionItemAction_DOWNGRADED: + ts.addErase(tsi.pkg.idx) + elif tsi.action == libdnf.transaction.TransactionItemAction_INSTALL: + hdr = tsi.pkg._header + modular_problems += self._test_fail_safe(hdr, tsi.pkg) + ts.addInstall(hdr, tsi, 'i') + elif tsi.action == libdnf.transaction.TransactionItemAction_OBSOLETE: + hdr = tsi.pkg._header + modular_problems += self._test_fail_safe(hdr, tsi.pkg) + ts.addInstall(hdr, tsi, 'u') + elif tsi.action == libdnf.transaction.TransactionItemAction_OBSOLETED: + ts.addErase(tsi.pkg.idx) + elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALL: + # note: in rpm 4.12 there should not be set + # rpm.RPMPROB_FILTER_REPLACEPKG to work + hdr = tsi.pkg._header + modular_problems += self._test_fail_safe(hdr, tsi.pkg) + ts.addReinstall(hdr, tsi) + elif tsi.action == libdnf.transaction.TransactionItemAction_REINSTALLED: + # Required when multiple packages with the same NEVRA marked as installed + ts.addErase(tsi.pkg.idx) + elif tsi.action == libdnf.transaction.TransactionItemAction_REMOVE: + ts.addErase(tsi.pkg.idx) + elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADE: + hdr = tsi.pkg._header + modular_problems += self._test_fail_safe(hdr, tsi.pkg) + ts.addInstall(hdr, tsi, 'u') + elif tsi.action == libdnf.transaction.TransactionItemAction_UPGRADED: + ts.addErase(tsi.pkg.idx) + elif tsi.action == libdnf.transaction.TransactionItemAction_REASON_CHANGE: + pass + else: + raise RuntimeError("TransactionItemAction not handled: %s" % tsi.action) + if modular_problems: + raise dnf.exceptions.Error(_("No available modular metadata for modular package")) + + return ts + + @property + def install_set(self): + # :api + result = set() + for tsi in self: + if tsi.action in dnf.transaction.FORWARD_ACTIONS: + try: + result.add(tsi.pkg) + except KeyError: + raise RuntimeError("TransactionItem is has no RPM attached: %s" % tsi) + return result + + @property + def remove_set(self): + # :api + result = set() + for tsi in self: + if tsi.action in dnf.transaction.BACKWARD_ACTIONS + [libdnf.transaction.TransactionItemAction_REINSTALLED]: + try: + result.add(tsi.pkg) + except KeyError: + raise RuntimeError("TransactionItem is has no RPM attached: %s" % tsi) + return result + + def _rpm_limitations(self): + """ Ensures all the members can be passed to rpm as they are to perform + the transaction. + """ + src_installs = [pkg for pkg in self.install_set if pkg.arch == 'src'] + if len(src_installs): + return _("Will not install a source rpm package (%s).") % \ + src_installs[0] + return None + + def _get_items(self, action): + return [tsi for tsi in self if tsi.action == action] diff --git a/dnf/db/history.py b/dnf/db/history.py new file mode 100644 index 0000000..2fa37ab --- /dev/null +++ b/dnf/db/history.py @@ -0,0 +1,548 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2009, 2012-2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# + +import calendar +import os +import time + +import libdnf.transaction +import libdnf.utils + +from dnf.i18n import ucd +from dnf.yum import misc + +from .group import GroupPersistor, EnvironmentPersistor, RPMTransaction + + +class RPMTransactionItemWrapper(object): + def __init__(self, swdb, item): + assert item is not None + self._swdb = swdb + self._item = item + + def __str__(self): + return self._item.getItem().toStr() + + def __lt__(self, other): + return self._item < other._item + + def __eq__(self, other): + return self._item == other._item + + def __hash__(self): + return self._item.__hash__() + + def match(self, pattern): + return True + + @property + def name(self): + return self._item.getRPMItem().getName() + + @property + def epoch(self): + return self._item.getRPMItem().getEpoch() + + @property + def version(self): + return self._item.getRPMItem().getVersion() + + @property + def release(self): + return self._item.getRPMItem().getRelease() + + @property + def arch(self): + return self._item.getRPMItem().getArch() + + @property + def evr(self): + if self.epoch: + return "{}:{}-{}".format(self.epoch, self.version, self.release) + return "{}-{}".format(self.version, self.release) + + @property + def action(self): + return self._item.getAction() + + @action.setter + def action(self, value): + self._item.setAction(value) + + @property + def reason(self): + return self._item.getReason() + + @property + def action_name(self): + try: + return self._item.getActionName() + except AttributeError: + return "" + + @property + def action_short(self): + try: + return self._item.getActionShort() + except AttributeError: + return "" + + @property + def state(self): + return self._item.getState() + + @state.setter + def state(self, value): + self._item.setState(value) + + @property + def from_repo(self): + return self._item.getRepoid() + + def ui_from_repo(self): + if not self._item.getRepoid(): + return "" + return "@" + self._item.getRepoid() + + @property + def obsoleting(self): + return None + + def get_reason(self): + # TODO: get_history_reason + return self._swdb.rpm.get_reason(self) + + @property + def pkg(self): + return self._swdb.rpm._swdb_ti_pkg[self._item] + + @property + def files(self): + return self.pkg.files + + @property + def _active(self): + return self.pkg + + +class TransactionWrapper(object): + + altered_lt_rpmdb = False + altered_gt_rpmdb = False + + def __init__(self, trans): + self._trans = trans + + @property + def tid(self): + return self._trans.getId() + + @property + def cmdline(self): + return self._trans.getCmdline() + + @property + def releasever(self): + return self._trans.getReleasever() + + @property + def beg_timestamp(self): + return self._trans.getDtBegin() + + @property + def end_timestamp(self): + return self._trans.getDtEnd() + + @property + def beg_rpmdb_version(self): + return self._trans.getRpmdbVersionBegin() + + @property + def end_rpmdb_version(self): + return self._trans.getRpmdbVersionEnd() + + @property + def return_code(self): + return int(self._trans.getState() != libdnf.transaction.TransactionItemState_DONE) + + @property + def loginuid(self): + return self._trans.getUserId() + + @property + def data(self): + return self.packages + + @property + def is_output(self): + output = self._trans.getConsoleOutput() + return bool(output) + + def tids(self): + return [self._trans.getId()] + + def performed_with(self): + return [] + + def packages(self): + result = self._trans.getItems() + return [RPMTransactionItemWrapper(self, i) for i in result] + + def output(self): + return [i[1] for i in self._trans.getConsoleOutput()] + + def error(self): + return [] + + def compare_rpmdbv(self, rpmdbv): + self.altered_gt_rpmdb = self._trans.getRpmdbVersionEnd() != rpmdbv + + +class MergedTransactionWrapper(TransactionWrapper): + + def __init__(self, trans): + self._trans = libdnf.transaction.MergedTransaction(trans._trans) + + def merge(self, trans): + self._trans.merge(trans._trans) + + @property + def loginuid(self): + return self._trans.listUserIds() + + def tids(self): + return self._trans.listIds() + + @property + def return_code(self): + return [int(i != libdnf.transaction.TransactionItemState_DONE) for i in self._trans.listStates()] + + @property + def cmdline(self): + return self._trans.listCmdlines() + + @property + def releasever(self): + return self._trans.listReleasevers() + + def output(self): + return [i[1] for i in self._trans.getConsoleOutput()] + +class SwdbInterface(object): + + def __init__(self, db_dir, releasever=""): + # TODO: record all vars + # TODO: remove relreasever from options + self.releasever = str(releasever) + self._rpm = None + self._group = None + self._env = None + self._addon_data = None + self._swdb = None + self._db_dir = db_dir + self._output = [] + + def __del__(self): + self.close() + + @property + def rpm(self): + if self._rpm is None: + self._rpm = RPMTransaction(self) + return self._rpm + + @property + def group(self): + if self._group is None: + self._group = GroupPersistor(self) + return self._group + + @property + def env(self): + if self._env is None: + self._env = EnvironmentPersistor(self) + return self._env + + @property + def dbpath(self): + return os.path.join(self._db_dir, libdnf.transaction.Swdb.defaultDatabaseName) + + @property + def swdb(self): + """ Lazy initialize Swdb object """ + if not self._swdb: + # _db_dir == persistdir which is prepended with installroot already + self._swdb = libdnf.transaction.Swdb(self.dbpath) + self._swdb.initTransaction() + # TODO: vars -> libdnf + return self._swdb + + def transform(self, input_dir): + transformer = libdnf.transaction.Transformer(input_dir, self.dbpath) + transformer.transform() + + def close(self): + try: + del self._tid + except AttributeError: + pass + self.swdb.closeTransaction() + self._rpm = None + self._group = None + self._env = None + if self._swdb: + self._swdb.closeDatabase() + self._swdb = None + self._output = [] + + @property + def path(self): + return self.swdb.getPath() + + def reset_db(self): + return self.swdb.resetDatabase() + + # TODO: rename to get_last_transaction? + def last(self, complete_transactions_only=True): + # TODO: complete_transactions_only + t = self.swdb.getLastTransaction() + if not t: + return None + return TransactionWrapper(t) + + # TODO: rename to: list_transactions? + def old(self, tids=None, limit=0, complete_transactions_only=False): + tids = tids or [] + tids = [int(i) for i in tids] + result = self.swdb.listTransactions() + result = [TransactionWrapper(i) for i in result] + # TODO: move to libdnf + if tids: + result = [i for i in result if i.tid in tids] + + # populate altered_lt_rpmdb and altered_gt_rpmdb + for i, trans in enumerate(result): + if i == 0: + continue + prev_trans = result[i-1] + if trans._trans.getRpmdbVersionBegin() != prev_trans._trans.getRpmdbVersionEnd(): + trans.altered_lt_rpmdb = True + prev_trans.altered_gt_rpmdb = True + return result[::-1] + + def set_reason(self, pkg, reason): + """Set reason for package""" + rpm_item = self.rpm._pkg_to_swdb_rpm_item(pkg) + repoid = self.repo(pkg) + action = libdnf.transaction.TransactionItemAction_REASON_CHANGE + reason = reason + replaced_by = None + ti = self.swdb.addItem(rpm_item, repoid, action, reason) + ti.setState(libdnf.transaction.TransactionItemState_DONE) + return ti + + ''' + def package(self, pkg): + """Get SwdbPackage from package""" + return self.swdb.package(str(pkg)) + ''' + + def repo(self, pkg): + """Get repository of package""" + return self.swdb.getRPMRepo(str(pkg)) + + def package_data(self, pkg): + """Get package data for package""" + # trans item is returned + result = self.swdb.getRPMTransactionItem(str(pkg)) + if result is None: + return result + result = RPMTransactionItemWrapper(self, result) + return result + +# def reason(self, pkg): +# """Get reason for package""" +# result = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1) +# return result + + # TODO: rename to begin_transaction? + def beg(self, rpmdb_version, using_pkgs, tsis, cmdline=None): + try: + self.swdb.initTransaction() + except: + pass + + ''' + for pkg in using_pkgs: + pid = self.pkg2pid(pkg) + self.swdb.trans_with(tid, pid) + ''' + + # add RPMs to the transaction + # TODO: _populate_rpm_ts() ? + + if self.group: + for group_id, group_item in sorted(self.group._installed.items()): + repoid = "" + action = libdnf.transaction.TransactionItemAction_INSTALL + reason = libdnf.transaction.TransactionItemReason_USER + replaced_by = None + ti = self.swdb.addItem(group_item, repoid, action, reason) + ti.setState(libdnf.transaction.TransactionItemState_DONE) + + for group_id, group_item in sorted(self.group._upgraded.items()): + repoid = "" + action = libdnf.transaction.TransactionItemAction_UPGRADE + reason = libdnf.transaction.TransactionItemReason_USER + replaced_by = None + ti = self.swdb.addItem(group_item, repoid, action, reason) + ti.setState(libdnf.transaction.TransactionItemState_DONE) + + for group_id, group_item in sorted(self.group._removed.items()): + repoid = "" + action = libdnf.transaction.TransactionItemAction_REMOVE + reason = libdnf.transaction.TransactionItemReason_USER + replaced_by = None + ti = self.swdb.addItem(group_item, repoid, action, reason) + ti.setState(libdnf.transaction.TransactionItemState_DONE) + + if self.env: + for env_id, env_item in sorted(self.env._installed.items()): + repoid = "" + action = libdnf.transaction.TransactionItemAction_INSTALL + reason = libdnf.transaction.TransactionItemReason_USER + replaced_by = None + ti = self.swdb.addItem(env_item, repoid, action, reason) + ti.setState(libdnf.transaction.TransactionItemState_DONE) + + for env_id, env_item in sorted(self.env._upgraded.items()): + repoid = "" + action = libdnf.transaction.TransactionItemAction_UPGRADE + reason = libdnf.transaction.TransactionItemReason_USER + replaced_by = None + ti = self.swdb.addItem(env_item, repoid, action, reason) + ti.setState(libdnf.transaction.TransactionItemState_DONE) + + for env_id, env_item in sorted(self.env._removed.items()): + repoid = "" + action = libdnf.transaction.TransactionItemAction_REMOVE + reason = libdnf.transaction.TransactionItemReason_USER + replaced_by = None + ti = self.swdb.addItem(env_item, repoid, action, reason) + ti.setState(libdnf.transaction.TransactionItemState_DONE) + + + # save when everything is in memory + tid = self.swdb.beginTransaction( + int(calendar.timegm(time.gmtime())), + str(rpmdb_version), + cmdline or "", + int(misc.getloginuid()) + ) + self.swdb.setReleasever(self.releasever) + self._tid = tid + + return tid + + def pkg_to_swdb_rpm_item(self, po): + rpm_item = self.swdb.createRPMItem() + rpm_item.setName(po.name) + rpm_item.setEpoch(po.epoch or 0) + rpm_item.setVersion(po.version) + rpm_item.setRelease(po.release) + rpm_item.setArch(po.arch) + return rpm_item + + def log_scriptlet_output(self, msg): + if not hasattr(self, '_tid'): + return + if not msg: + return + for line in msg.splitlines(): + line = ucd(line) + # logging directly to database fails if transaction runs in a background process + self._output.append((1, line)) + + ''' + def _log_errors(self, errors): + for error in errors: + error = ucd(error) + self.swdb.log_error(self._tid, error) + ''' + + def end(self, end_rpmdb_version="", return_code=None, errors=None): + if not hasattr(self, '_tid'): + return # Failed at beg() time + + if return_code is None: + # return_code/state auto-detection + return_code = libdnf.transaction.TransactionState_DONE + for tsi in self.rpm: + if tsi.state == libdnf.transaction.TransactionItemState_ERROR: + return_code = libdnf.transaction.TransactionState_ERROR + break + + for file_descriptor, line in self._output: + self.swdb.addConsoleOutputLine(file_descriptor, line) + self._output = [] + + self.swdb.endTransaction( + int(time.time()), + str(end_rpmdb_version), + return_code, + ) + + # Closing and cleanup is done in the close() method. + # It is important to keep data around after the transaction ends + # because it's needed by plugins to report installed packages etc. + + # TODO: ignore_case, more patterns + def search(self, patterns, ignore_case=True): + """ Search for history transactions which contain specified + packages al. la. "yum list". Returns transaction ids. """ + return self.swdb.searchTransactionsByRPM(patterns) + + def user_installed(self, pkg): + """Returns True if package is user installed""" + reason = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1) + if reason == libdnf.transaction.TransactionItemReason_USER: + return True + # if reason is not known, consider a package user-installed + # because it was most likely installed via rpm + if reason == libdnf.transaction.TransactionItemReason_UNKNOWN: + return True + return False + + def get_erased_reason(self, pkg, first_trans, rollback): + """Get reason of package before transaction being undone. If package + is already installed in the system, keep his reason. + + :param pkg: package being installed + :param first_trans: id of first transaction being undone + :param rollback: True if transaction is performing a rollback""" + if rollback: + # return the reason at the point of rollback; we're setting that reason + result = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, first_trans) + else: + result = self.swdb.resolveRPMTransactionItemReason(pkg.name, pkg.arch, -1) + + # consider unknown reason as user-installed + if result == libdnf.transaction.TransactionItemReason_UNKNOWN: + result = libdnf.transaction.TransactionItemReason_USER + return result diff --git a/dnf/dnssec.py b/dnf/dnssec.py new file mode 100644 index 0000000..ac098ff --- /dev/null +++ b/dnf/dnssec.py @@ -0,0 +1,308 @@ +# dnssec.py +# DNS extension for automatic GPG key verification +# +# Copyright (C) 2012-2018 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import unicode_literals + +from enum import Enum +import base64 +import hashlib +import logging +import re + +from dnf.i18n import _ +import dnf.rpm +import dnf.exceptions + +logger = logging.getLogger("dnf") + + +RR_TYPE_OPENPGPKEY = 61 + + +class DnssecError(dnf.exceptions.Error): + """ + Exception used in the dnssec module + """ + def __repr__(self): + return ""\ + .format(self.value if self.value is not None else "Not specified") + + +def email2location(email_address, tag="_openpgpkey"): + # type: (str, str) -> str + """ + Implements RFC 7929, section 3 + https://tools.ietf.org/html/rfc7929#section-3 + :param email_address: + :param tag: + :return: + """ + split = email_address.split("@") + if len(split) != 2: + msg = "Email address should contain exactly one '@' sign." + logger.error(msg) + raise DnssecError(msg) + + local = split[0] + domain = split[1] + hash = hashlib.sha256() + hash.update(local.encode('utf-8')) + digest = base64.b16encode(hash.digest()[0:28])\ + .decode("utf-8")\ + .lower() + return digest + "." + tag + "." + domain + + +class Validity(Enum): + """ + Output of the verification algorithm. + TODO: this type might be simplified in order to less reflect the underlying DNS layer. + TODO: more specifically the variants from 3 to 5 should have more understandable names + """ + VALID = 1 + REVOKED = 2 + PROVEN_NONEXISTENCE = 3 + RESULT_NOT_SECURE = 4 + BOGUS_RESULT = 5 + ERROR = 9 + + +class NoKey: + """ + This class represents an absence of a key in the cache. It is an expression of non-existence + using the Python's type system. + """ + pass + + +class KeyInfo: + """ + Wrapper class for email and associated verification key, where both are represented in + form of a string. + """ + def __init__(self, email=None, key=None): + self.email = email + self.key = key + + @staticmethod + def from_rpm_key_object(userid, raw_key): + # type: (str, bytes) -> KeyInfo + """ + Since dnf uses different format of the key than the one used in DNS RR, I need to convert + the former one into the new one. + """ + input_email = re.search('<(.*@.*)>', userid) + if input_email is None: + raise DnssecError + + email = input_email.group(1) + key = raw_key.decode('ascii').split('\n') + + start = 0 + stop = 0 + for i in range(0, len(key)): + if key[i] == '-----BEGIN PGP PUBLIC KEY BLOCK-----': + start = i + if key[i] == '-----END PGP PUBLIC KEY BLOCK-----': + stop = i + + cat_key = ''.join(key[start + 2:stop - 1]).encode('ascii') + return KeyInfo(email, cat_key) + + +class DNSSECKeyVerification: + """ + The main class when it comes to verification itself. It wraps Unbound context and a cache with + already obtained results. + """ + + # Mapping from email address to b64 encoded public key or NoKey in case of proven nonexistence + _cache = {} + # type: Dict[str, Union[str, NoKey]] + + @staticmethod + def _cache_hit(key_union, input_key_string): + # type: (Union[str, NoKey], str) -> Validity + """ + Compare the key in case it was found in the cache. + """ + if key_union == input_key_string: + logger.debug("Cache hit, valid key") + return Validity.VALID + elif key_union is NoKey: + logger.debug("Cache hit, proven non-existence") + return Validity.PROVEN_NONEXISTENCE + else: + logger.debug("Key in cache: {}".format(key_union)) + logger.debug("Input key : {}".format(input_key_string)) + return Validity.REVOKED + + @staticmethod + def _cache_miss(input_key): + # type: (KeyInfo) -> Validity + """ + In case the key was not found in the cache, create an Unbound context and contact the DNS + system + """ + try: + import unbound + except ImportError as e: + msg = _("Configuration option 'gpgkey_dns_verification' requires " + "libunbound ({})".format(e)) + raise dnf.exceptions.Error(msg) + + ctx = unbound.ub_ctx() + if ctx.set_option("verbosity:", "0") != 0: + logger.debug("Unbound context: Failed to set verbosity") + + if ctx.set_option("qname-minimisation:", "yes") != 0: + logger.debug("Unbound context: Failed to set qname minimisation") + + if ctx.resolvconf() != 0: + logger.debug("Unbound context: Failed to read resolv.conf") + + if ctx.add_ta_file("/var/lib/unbound/root.key") != 0: + logger.debug("Unbound context: Failed to add trust anchor file") + + status, result = ctx.resolve(email2location(input_key.email), + RR_TYPE_OPENPGPKEY, unbound.RR_CLASS_IN) + if status != 0: + logger.debug("Communication with DNS servers failed") + return Validity.ERROR + if result.bogus: + logger.debug("DNSSEC signatures are wrong") + return Validity.BOGUS_RESULT + if not result.secure: + logger.debug("Result is not secured with DNSSEC") + return Validity.RESULT_NOT_SECURE + if result.nxdomain: + logger.debug("Non-existence of this record was proven by DNSSEC") + return Validity.PROVEN_NONEXISTENCE + if not result.havedata: + # TODO: This is weird result, but there is no way to perform validation, so just return + # an error + logger.debug("Unknown error in DNS communication") + return Validity.ERROR + else: + data = result.data.as_raw_data()[0] + dns_data_b64 = base64.b64encode(data) + if dns_data_b64 == input_key.key: + return Validity.VALID + else: + # In case it is different, print the keys for further examination in debug mode + logger.debug("Key from DNS: {}".format(dns_data_b64)) + logger.debug("Input key : {}".format(input_key.key)) + return Validity.REVOKED + + @staticmethod + def verify(input_key): + # type: (KeyInfo) -> Validity + """ + Public API. Use this method to verify a KeyInfo object. + """ + logger.debug("Running verification for key with id: {}".format(input_key.email)) + key_union = DNSSECKeyVerification._cache.get(input_key.email) + if key_union is not None: + return DNSSECKeyVerification._cache_hit(key_union, input_key.key) + else: + result = DNSSECKeyVerification._cache_miss(input_key) + if result == Validity.VALID: + DNSSECKeyVerification._cache[input_key.email] = input_key.key + elif result == Validity.PROVEN_NONEXISTENCE: + DNSSECKeyVerification._cache[input_key.email] = NoKey() + return result + + +def nice_user_msg(ki, v): + # type: (KeyInfo, Validity) -> str + """ + Inform the user about key validity in a human readable way. + """ + prefix = _("DNSSEC extension: Key for user ") + ki.email + " " + if v == Validity.VALID: + return prefix + _("is valid.") + else: + return prefix + _("has unknown status.") + + +def any_msg(m): + # type: (str) -> str + """ + Label any given message with DNSSEC extension tag + """ + return _("DNSSEC extension: ") + m + + +class RpmImportedKeys: + """ + Wrapper around keys, that are imported in the RPM database. + + The keys are stored in packages with name gpg-pubkey, where the version and + release is different for each of them. The key content itself is stored as + an ASCII armored string in the package description, so it needs to be parsed + before it can be used. + """ + @staticmethod + def _query_db_for_gpg_keys(): + # type: () -> List[KeyInfo] + # TODO: base.conf.installroot ?? -----------------------\ + transaction_set = dnf.rpm.transaction.TransactionWrapper() + packages = transaction_set.dbMatch("name", "gpg-pubkey") + return_list = [] + for pkg in packages: + packager = dnf.rpm.getheader(pkg, 'packager') + email = re.search('<(.*@.*)>', packager).group(1) + description = dnf.rpm.getheader(pkg, 'description') + key_lines = description.split('\n')[3:-3] + key_str = ''.join(key_lines) + return_list += [KeyInfo(email, key_str.encode('ascii'))] + + return return_list + + @staticmethod + def check_imported_keys_validity(): + keys = RpmImportedKeys._query_db_for_gpg_keys() + logger.info(any_msg(_("Testing already imported keys for their validity."))) + for key in keys: + try: + result = DNSSECKeyVerification.verify(key) + except DnssecError as e: + # Errors in this exception should not be fatal, print it and just continue + logger.exception("Exception raised in DNSSEC extension: email={}, exception={}" + .format(key.email, repr(e))) + continue + # TODO: remove revoked keys automatically and possibly ask user to confirm + if result == Validity.VALID: + logger.debug(any_msg("GPG Key {} is valid".format(key.email))) + pass + elif result == Validity.PROVEN_NONEXISTENCE: + logger.debug(any_msg("GPG Key {} does not support DNS" + " verification".format(key.email))) + elif result == Validity.BOGUS_RESULT: + logger.info(any_msg("GPG Key {} could not be verified, because DNSSEC signatures" + " are bogus. Possible causes: wrong configuration of the DNS" + " server, MITM attack".format(key.email))) + elif result == Validity.REVOKED: + logger.info(any_msg("GPG Key {} has been revoked and should" + " be removed immediately".format(key.email))) + else: + logger.debug(any_msg("GPG Key {} could not be tested".format(key.email))) diff --git a/dnf/drpm.py b/dnf/drpm.py new file mode 100644 index 0000000..ff4f6a7 --- /dev/null +++ b/dnf/drpm.py @@ -0,0 +1,180 @@ +# drpm.py +# Delta RPM support +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from binascii import hexlify +from dnf.yum.misc import unlink_f +from dnf.i18n import _ + +import dnf.callback +import dnf.logging +import dnf.repo +import hawkey +import logging +import libdnf.repo +import os + +APPLYDELTA = '/usr/bin/applydeltarpm' + +logger = logging.getLogger("dnf") + + +class DeltaPayload(dnf.repo.PackagePayload): + def __init__(self, delta_info, delta, pkg, progress): + super(DeltaPayload, self).__init__(pkg, progress) + self.delta_info = delta_info + self.delta = delta + + def __str__(self): + return os.path.basename(self.delta.location) + + def _end_cb(self, cbdata, lr_status, msg): + super(DeltaPayload, self)._end_cb(cbdata, lr_status, msg) + if lr_status != libdnf.repo.PackageTargetCB.TransferStatus_ERROR: + self.delta_info.enqueue(self) + + def _target_params(self): + delta = self.delta + ctype, csum = delta.chksum + ctype = hawkey.chksum_name(ctype) + chksum = hexlify(csum).decode() + + ctype_code = libdnf.repo.PackageTarget.checksumType(ctype) + if ctype_code == libdnf.repo.PackageTarget.ChecksumType_UNKNOWN: + logger.warning(_("unsupported checksum type: %s"), ctype) + + return { + 'relative_url' : delta.location, + 'checksum_type' : ctype_code, + 'checksum' : chksum, + 'expectedsize' : delta.downloadsize, + 'base_url' : delta.baseurl, + } + + @property + def download_size(self): + return self.delta.downloadsize + + @property + def _full_size(self): + return self.pkg.downloadsize + + def localPkg(self): + location = self.delta.location + return os.path.join(self.pkg.repo.pkgdir, os.path.basename(location)) + + +class DeltaInfo(object): + def __init__(self, query, progress, deltarpm_percentage=None): + '''A delta lookup and rebuild context + query -- installed packages to use when looking up deltas + progress -- progress obj to display finished delta rebuilds + ''' + self.deltarpm_installed = False + if os.access(APPLYDELTA, os.X_OK): + self.deltarpm_installed = True + try: + self.deltarpm_jobs = os.sysconf('SC_NPROCESSORS_ONLN') + except (TypeError, ValueError): + self.deltarpm_jobs = 4 + if deltarpm_percentage is None: + self.deltarpm_percentage = dnf.conf.Conf().deltarpm_percentage + else: + self.deltarpm_percentage = deltarpm_percentage + self.query = query + self.progress = progress + + self.queue = [] + self.jobs = {} + self.err = {} + + def delta_factory(self, po, progress): + '''Turn a po to Delta RPM po, if possible''' + if not self.deltarpm_installed: + # deltarpm is not installed + return None + if not po.repo.deltarpm or not self.deltarpm_percentage: + # drpm disabled + return None + if po._is_local_pkg(): + # drpm disabled for local + return None + if os.path.exists(po.localPkg()): + # already there + return None + + best = po._size * self.deltarpm_percentage / 100 + best_delta = None + for ipo in self.query.filter(name=po.name, arch=po.arch): + delta = po.get_delta_from_evr(ipo.evr) + if delta and delta.downloadsize < best: + best = delta.downloadsize + best_delta = delta + if best_delta: + return DeltaPayload(self, best_delta, po, progress) + return None + + def job_done(self, pid, code): + # handle a finished delta rebuild + logger.log(dnf.logging.SUBDEBUG, 'drpm: %d: return code: %d, %d', pid, + code >> 8, code & 0xff) + + pload = self.jobs.pop(pid) + pkg = pload.pkg + if code != 0: + unlink_f(pload.pkg.localPkg()) + self.err[pkg] = [_('Delta RPM rebuild failed')] + elif not pload.pkg.verifyLocalPkg(): + self.err[pkg] = [_('Checksum of the delta-rebuilt RPM failed')] + else: + os.unlink(pload.localPkg()) + self.progress.end(pload, dnf.callback.STATUS_DRPM, _('done')) + + def start_job(self, pload): + # spawn a delta rebuild job + spawn_args = [APPLYDELTA, APPLYDELTA, + '-a', pload.pkg.arch, + pload.localPkg(), pload.pkg.localPkg()] + pid = os.spawnl(os.P_NOWAIT, *spawn_args) + logger.log(dnf.logging.SUBDEBUG, 'drpm: spawned %d: %s', pid, + ' '.join(spawn_args[1:])) + self.jobs[pid] = pload + + def enqueue(self, pload): + # process finished jobs, start new ones + while self.jobs: + pid, code = os.waitpid(-1, os.WNOHANG) + if not pid: + break + self.job_done(pid, code) + self.queue.append(pload) + while len(self.jobs) < self.deltarpm_jobs: + self.start_job(self.queue.pop(0)) + if not self.queue: + break + + def wait(self): + '''Wait until all jobs have finished''' + while self.jobs: + pid, code = os.wait() + self.job_done(pid, code) + if self.queue: + self.start_job(self.queue.pop(0)) diff --git a/dnf/exceptions.py b/dnf/exceptions.py new file mode 100644 index 0000000..1c61e34 --- /dev/null +++ b/dnf/exceptions.py @@ -0,0 +1,189 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# Copyright 2004 Duke University + +""" +Core DNF Errors. +""" + +from __future__ import unicode_literals +from dnf.i18n import ucd, _, P_ +import dnf.util +import libdnf +import warnings + +class DeprecationWarning(DeprecationWarning): + # :api + pass + + +class Error(Exception): + # :api + """Base Error. All other Errors thrown by DNF should inherit from this. + + :api + + """ + def __init__(self, value=None): + super(Error, self).__init__() + self.value = None if value is None else ucd(value) + + def __str__(self): + return "{}".format(self.value) + + def __unicode__(self): + return ucd(self.__str__()) + + + +class CompsError(Error): + # :api + pass + + +class ConfigError(Error): + def __init__(self, value=None, raw_error=None): + super(ConfigError, self).__init__(value) + self.raw_error = ucd(raw_error) if raw_error is not None else None + + +class DepsolveError(Error): + # :api + pass + + +class DownloadError(Error): + # :api + def __init__(self, errmap): + super(DownloadError, self).__init__() + self.errmap = errmap + + @staticmethod + def errmap2str(errmap): + errstrings = [] + for key in errmap: + for error in errmap[key]: + msg = '%s: %s' % (key, error) if key else '%s' % error + errstrings.append(msg) + return '\n'.join(errstrings) + + def __str__(self): + return self.errmap2str(self.errmap) + + +class LockError(Error): + pass + + +class MarkingError(Error): + # :api + + def __init__(self, value=None, pkg_spec=None): + """Initialize the marking error instance.""" + super(MarkingError, self).__init__(value) + self.pkg_spec = None if pkg_spec is None else ucd(pkg_spec) + + def __str__(self): + string = super(MarkingError, self).__str__() + if self.pkg_spec: + string += ': ' + self.pkg_spec + return string + + +class MarkingErrors(Error): + # :api + def __init__(self, no_match_group_specs=(), error_group_specs=(), no_match_pkg_specs=(), + error_pkg_specs=(), module_depsolv_errors=()): + """Initialize the marking error instance.""" + msg = _("Problems in request:") + if (no_match_pkg_specs): + msg += "\n" + _("missing packages: ") + ", ".join(no_match_pkg_specs) + if (error_pkg_specs): + msg += "\n" + _("broken packages: ") + ", ".join(error_pkg_specs) + if (no_match_group_specs): + msg += "\n" + _("missing groups or modules: ") + ", ".join(no_match_group_specs) + if (error_group_specs): + msg += "\n" + _("broken groups or modules: ") + ", ".join(error_group_specs) + if (module_depsolv_errors): + msg_mod = dnf.util._format_resolve_problems(module_depsolv_errors[0]) + if module_depsolv_errors[1] == \ + libdnf.module.ModulePackageContainer.ModuleErrorType_ERROR_IN_DEFAULTS: + msg += "\n" + "\n".join([P_('Modular dependency problem with Defaults:', + 'Modular dependency problems with Defaults:', + len(module_depsolv_errors)), + msg_mod]) + else: + msg += "\n" + "\n".join([P_('Modular dependency problem:', + 'Modular dependency problems:', + len(module_depsolv_errors)), + msg_mod]) + super(MarkingErrors, self).__init__(msg) + self.no_match_group_specs = no_match_group_specs + self.error_group_specs = error_group_specs + self.no_match_pkg_specs = no_match_pkg_specs + self.error_pkg_specs = error_pkg_specs + self.module_depsolv_errors = module_depsolv_errors + + @property + def module_debsolv_errors(self): + msg = "Attribute module_debsolv_errors is deprecated. Use module_depsolv_errors " \ + "attribute instead." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return self.module_depsolv_errors + +class MetadataError(Error): + pass + + +class MiscError(Error): + pass + + +class PackagesNotAvailableError(MarkingError): + def __init__(self, value=None, pkg_spec=None, packages=None): + super(PackagesNotAvailableError, self).__init__(value, pkg_spec) + self.packages = packages or [] + + +class PackageNotFoundError(MarkingError): + pass + + +class PackagesNotInstalledError(MarkingError): + def __init__(self, value=None, pkg_spec=None, packages=None): + super(PackagesNotInstalledError, self).__init__(value, pkg_spec) + self.packages = packages or [] + + +class ProcessLockError(LockError): + def __init__(self, value, pid): + super(ProcessLockError, self).__init__(value) + self.pid = pid + + def __reduce__(self): + """Pickling support.""" + return (ProcessLockError, (self.value, self.pid)) + + +class RepoError(Error): + # :api + pass + + +class ThreadLockError(LockError): + pass + + +class TransactionCheckError(Error): + pass diff --git a/dnf/goal.py b/dnf/goal.py new file mode 100644 index 0000000..a0b0bca --- /dev/null +++ b/dnf/goal.py @@ -0,0 +1,24 @@ +# goal.py +# Customized hawkey.Goal +# +# Copyright (C) 2014-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +from hawkey import Goal diff --git a/dnf/history.py b/dnf/history.py new file mode 100644 index 0000000..139ac92 --- /dev/null +++ b/dnf/history.py @@ -0,0 +1,25 @@ +# history.py +# Interfaces to the history of transactions. +# +# Copyright (C) 2013-2018 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +"""Interfaces to the history of transactions.""" + +from __future__ import absolute_import +from __future__ import unicode_literals + diff --git a/dnf/i18n.py b/dnf/i18n.py new file mode 100644 index 0000000..77e807b --- /dev/null +++ b/dnf/i18n.py @@ -0,0 +1,354 @@ +# i18n.py +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import print_function +from __future__ import unicode_literals +from dnf.pycomp import unicode + +import dnf +import locale +import os +import signal +import sys +import unicodedata + +""" +Centralize i18n stuff here. Must be unittested. +""" + +class UnicodeStream(object): + def __init__(self, stream, encoding): + self.stream = stream + self.encoding = encoding + + def write(self, s): + if not isinstance(s, str): + s = (s.decode(self.encoding, 'replace') if dnf.pycomp.PY3 else + s.encode(self.encoding, 'replace')) + try: + self.stream.write(s) + except UnicodeEncodeError: + s_bytes = s.encode(self.stream.encoding, 'backslashreplace') + if hasattr(self.stream, 'buffer'): + self.stream.buffer.write(s_bytes) + else: + s = s_bytes.decode(self.stream.encoding, 'ignore') + self.stream.write(s) + + + def __getattr__(self, name): + return getattr(self.stream, name) + +def _full_ucd_support(encoding): + """Return true if encoding can express any Unicode character. + + Even if an encoding can express all accented letters in the given language, + we can't generally settle for it in DNF since sometimes we output special + characters like the registered trademark symbol (U+00AE) and surprisingly + many national non-unicode encodings, including e.g. ASCII and ISO-8859-2, + don't contain it. + + """ + if encoding is None: + return False + lower = encoding.lower() + if lower.startswith('utf-') or lower.startswith('utf_'): + return True + return False + +def _guess_encoding(): + """ Take the best shot at the current system's string encoding. """ + encoding = locale.getpreferredencoding(False) + return 'utf-8' if encoding.startswith("ANSI") else encoding + +def setup_locale(): + try: + dnf.pycomp.setlocale(locale.LC_ALL, '') + except locale.Error: + # default to C.UTF-8 or C locale if we got a failure. + try: + dnf.pycomp.setlocale(locale.LC_ALL, 'C.UTF-8') + os.environ['LC_ALL'] = 'C.UTF-8' + except locale.Error: + dnf.pycomp.setlocale(locale.LC_ALL, 'C') + os.environ['LC_ALL'] = 'C' + print('Failed to set locale, defaulting to {}'.format(os.environ['LC_ALL']), + file=sys.stderr) + +def setup_stdout(): + """ Check that stdout is of suitable encoding and handle the situation if + not. + + Returns True if stdout was of suitable encoding already and no changes + were needed. + """ + stdout = sys.stdout + if not stdout.isatty(): + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + try: + encoding = stdout.encoding + except AttributeError: + encoding = None + if not _full_ucd_support(encoding): + sys.stdout = UnicodeStream(stdout, _guess_encoding()) + return False + return True + + +def ucd_input(ucstring): + # :api, deprecated in 2.0.0, will be erased when python2 is abandoned + """ It uses print instead of passing the prompt to raw_input. + + raw_input doesn't encode the passed string and the output + goes into stderr + """ + print(ucstring, end='') + return dnf.pycomp.raw_input() + + +def ucd(obj): + # :api, deprecated in 2.0.0, will be erased when python2 is abandoned + """ Like the builtin unicode() but tries to use a reasonable encoding. """ + if dnf.pycomp.PY3: + if dnf.pycomp.is_py3bytes(obj): + return str(obj, _guess_encoding(), errors='ignore') + elif isinstance(obj, str): + return obj + return str(obj) + else: + if isinstance(obj, dnf.pycomp.unicode): + return obj + if hasattr(obj, '__unicode__'): + # see the doc for the unicode() built-in. The logic here is: if obj + # implements __unicode__, let it take a crack at it, but handle the + # situation if it fails: + try: + return dnf.pycomp.unicode(obj) + except UnicodeError: + pass + return dnf.pycomp.unicode(str(obj), _guess_encoding(), errors='ignore') + + +# functions for formatting output according to terminal width, +# They should be used instead of build-in functions to count on different +# widths of Unicode characters + +def _exact_width_char(uchar): + return 2 if unicodedata.east_asian_width(uchar) in ('W', 'F') else 1 + + +def chop_str(msg, chop=None): + """ Return the textual width of a Unicode string, chopping it to + a specified value. This is what you want to use instead of %.*s, as it + does the "right" thing with regard to different Unicode character width + Eg. "%.*s" % (10, msg) <= becomes => "%s" % (chop_str(msg, 10)) """ + + if chop is None: + return exact_width(msg), msg + + width = 0 + chopped_msg = "" + for char in msg: + char_width = _exact_width_char(char) + if width + char_width > chop: + break + chopped_msg += char + width += char_width + return width, chopped_msg + + +def exact_width(msg): + """ Calculates width of char at terminal screen + (Asian char counts for two) """ + return sum(_exact_width_char(c) for c in msg) + + +def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''): + """ Expand a msg to a specified "width" or chop to same. + Expansion can be left or right. This is what you want to use instead of + %*.*s, as it does the "right" thing with regard to different Unicode + character width. + prefix and suffix should be used for "invisible" bytes, like + highlighting. + + Examples: + + ``"%-*.*s" % (10, 20, msg)`` becomes + ``"%s" % (fill_exact_width(msg, 10, 20))``. + + ``"%20.10s" % (msg)`` becomes + ``"%s" % (fill_exact_width(msg, 20, 10, left=False))``. + + ``"%s%.10s%s" % (pre, msg, suf)`` becomes + ``"%s" % (fill_exact_width(msg, 0, 10, prefix=pre, suffix=suf))``. + """ + width, msg = chop_str(msg, chop) + + if width >= fill: + if prefix or suffix: + msg = ''.join([prefix, msg, suffix]) + else: + extra = " " * (fill - width) + if left: + msg = ''.join([prefix, msg, suffix, extra]) + else: + msg = ''.join([extra, prefix, msg, suffix]) + + return msg + + +def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''): + """ Works like we want textwrap.wrap() to work, uses Unicode strings + and doesn't screw up lists/blocks/etc. """ + + def _indent_at_beg(line): + count = 0 + byte = 'X' + for byte in line: + if byte != ' ': + break + count += 1 + if byte not in ("-", "*", ".", "o", '\xe2'): + return count, 0 + list_chr = chop_str(line[count:], 1)[1] + if list_chr in ("-", "*", ".", "o", + "\u2022", "\u2023", "\u2218"): + nxt = _indent_at_beg(line[count+len(list_chr):]) + nxt = nxt[1] or nxt[0] + if nxt: + return count, count + 1 + nxt + return count, 0 + + text = text.rstrip('\n') + lines = text.replace('\t', ' ' * 8).split('\n') + + ret = [] + indent = initial_indent + wrap_last = False + csab = 0 + cspc_indent = 0 + for line in lines: + line = line.rstrip(' ') + (lsab, lspc_indent) = (csab, cspc_indent) + (csab, cspc_indent) = _indent_at_beg(line) + force_nl = False # We want to stop wrapping under "certain" conditions: + if wrap_last and cspc_indent: # if line starts a list or + force_nl = True + if wrap_last and csab == len(line): # is empty line + force_nl = True + # if line doesn't continue a list and is "block indented" + if wrap_last and not lspc_indent: + if csab >= 4 and csab != lsab: + force_nl = True + if force_nl: + ret.append(indent.rstrip(' ')) + indent = subsequent_indent + wrap_last = False + if csab == len(line): # empty line, remove spaces to make it easier. + line = '' + if wrap_last: + line = line.lstrip(' ') + cspc_indent = lspc_indent + + if exact_width(indent + line) <= width: + wrap_last = False + ret.append(indent + line) + indent = subsequent_indent + continue + + wrap_last = True + words = line.split(' ') + line = indent + spcs = cspc_indent + if not spcs and csab >= 4: + spcs = csab + for word in words: + if (width < exact_width(line + word)) and \ + (exact_width(line) > exact_width(subsequent_indent)): + ret.append(line.rstrip(' ')) + line = subsequent_indent + ' ' * spcs + line += word + line += ' ' + indent = line.rstrip(' ') + ' ' + if wrap_last: + ret.append(indent.rstrip(' ')) + + return '\n'.join(ret) + + +def select_short_long(width, msg_short, msg_long): + """ Automatically selects the short (abbreviated) or long (full) message + depending on whether we have enough screen space to display the full + message or not. If a caller by mistake passes a long string as + msg_short and a short string as a msg_long this function recognizes + the mistake and swaps the arguments. This function is especially useful + in the i18n context when you cannot predict how long are the translated + messages. + + Limitations: + + 1. If msg_short is longer than width you will still get an overflow. + This function does not abbreviate the string. + 2. You are not obliged to provide an actually abbreviated string, it is + perfectly correct to pass the same string twice if you don't want + any abbreviation. However, if you provide two different strings but + having the same width this function is unable to recognize which one + is correct and you should assume that it is unpredictable which one + is returned. + + Example: + + ``select_short_long (10, _("Repo"), _("Repository"))`` + + will return "Repository" in English but the results in other languages + may be different. """ + width_short = exact_width(msg_short) + width_long = exact_width(msg_long) + # If we have two strings of the same width: + if width_short == width_long: + return msg_long + # If the short string is wider than the long string: + elif width_short > width_long: + return msg_short if width_short <= width else msg_long + # The regular case: + else: + return msg_long if width_long <= width else msg_short + + +def translation(name): + # :api, deprecated in 2.0.0, will be erased when python2 is abandoned + """ Easy gettext translations setup based on given domain name """ + + setup_locale() + def ucd_wrapper(fnc): + return lambda *w: ucd(fnc(*w)) + t = dnf.pycomp.gettext.translation(name, fallback=True) + return map(ucd_wrapper, dnf.pycomp.gettext_setup(t)) + + +def pgettext(context, message): + result = _(context + chr(4) + message) + if "\004" in result: + return message + else: + return result + +# setup translations +_, P_ = translation("dnf") +C_ = pgettext diff --git a/dnf/lock.py b/dnf/lock.py new file mode 100644 index 0000000..6817aac --- /dev/null +++ b/dnf/lock.py @@ -0,0 +1,148 @@ +# lock.py +# DNF Locking Subsystem. +# +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.exceptions import ProcessLockError, ThreadLockError, LockError +from dnf.i18n import _ +from dnf.yum import misc +import dnf.logging +import dnf.util +import errno +import fcntl +import hashlib +import logging +import os +import threading +import time + +logger = logging.getLogger("dnf") + +def _fit_lock_dir(dir_): + if not dnf.util.am_i_root(): + # for regular users the best we currently do is not to clash with + # another DNF process of the same user. Since dir_ is quite definitely + # not writable for us, yet significant, use its hash: + hexdir = hashlib.sha1(dir_.encode('utf-8')).hexdigest() + dir_ = os.path.join(misc.getCacheDir(), 'locks', hexdir) + return dir_ + +def build_download_lock(cachedir, exit_on_lock): + return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'download_lock.pid'), + 'cachedir', not exit_on_lock) + +def build_metadata_lock(cachedir, exit_on_lock): + return ProcessLock(os.path.join(_fit_lock_dir(cachedir), 'metadata_lock.pid'), + 'metadata', not exit_on_lock) + + +def build_rpmdb_lock(persistdir, exit_on_lock): + return ProcessLock(os.path.join(_fit_lock_dir(persistdir), 'rpmdb_lock.pid'), + 'RPMDB', not exit_on_lock) + + +def build_log_lock(logdir, exit_on_lock): + return ProcessLock(os.path.join(_fit_lock_dir(logdir), 'log_lock.pid'), + 'log', not exit_on_lock) + + +class ProcessLock(object): + def __init__(self, target, description, blocking=False): + self.blocking = blocking + self.count = 0 + self.description = description + self.target = target + self.thread_lock = threading.RLock() + + def _lock_thread(self): + if not self.thread_lock.acquire(blocking=False): + msg = '%s already locked by a different thread' % self.description + raise ThreadLockError(msg) + self.count += 1 + + def _try_lock(self, pid): + fd = os.open(self.target, os.O_CREAT | os.O_RDWR, 0o644) + + try: + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as e: + if e.errno == errno.EWOULDBLOCK: + return -1 + raise + + old_pid = os.read(fd, 20) + if len(old_pid) == 0: + # empty file, write our pid + os.write(fd, str(pid).encode('utf-8')) + return pid + + try: + old_pid = int(old_pid) + except ValueError: + msg = _('Malformed lock file found: %s.\n' + 'Ensure no other dnf/yum process is running and ' + 'remove the lock file manually or run ' + 'systemd-tmpfiles --remove dnf.conf.') % (self.target) + raise LockError(msg) + + if old_pid == pid: + # already locked by this process + return pid + + if not os.access('/proc/%d/stat' % old_pid, os.F_OK): + # locked by a dead process, write our pid + os.lseek(fd, 0, os.SEEK_SET) + os.ftruncate(fd, 0) + os.write(fd, str(pid).encode('utf-8')) + return pid + + return old_pid + + finally: + os.close(fd) + + def _unlock_thread(self): + self.count -= 1 + self.thread_lock.release() + + def __enter__(self): + dnf.util.ensure_dir(os.path.dirname(self.target)) + self._lock_thread() + prev_pid = -1 + my_pid = os.getpid() + pid = self._try_lock(my_pid) + while pid != my_pid: + if pid != -1: + if not self.blocking: + self._unlock_thread() + msg = '%s already locked by %d' % (self.description, pid) + raise ProcessLockError(msg, pid) + if prev_pid != pid: + msg = _('Waiting for process with pid %d to finish.') % (pid) + logger.info(msg) + prev_pid = pid + time.sleep(1) + pid = self._try_lock(my_pid) + + def __exit__(self, *exc_args): + if self.count == 1: + os.unlink(self.target) + self._unlock_thread() diff --git a/dnf/logging.py b/dnf/logging.py new file mode 100644 index 0000000..df355ef --- /dev/null +++ b/dnf/logging.py @@ -0,0 +1,245 @@ +# logging.py +# DNF Logging Subsystem. +# +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +import dnf.exceptions +import dnf.const +import dnf.lock +import dnf.util +import libdnf.repo +import logging +import logging.handlers +import os +import sys +import time +import warnings + +# :api loggers are: 'dnf', 'dnf.plugin', 'dnf.rpm' + +SUPERCRITICAL = 100 # do not use this for logging +CRITICAL = logging.CRITICAL +ERROR = logging.ERROR +WARNING = logging.WARNING +INFO = logging.INFO +DEBUG = logging.DEBUG +DDEBUG = 8 # used by anaconda (pyanaconda/payload/dnfpayload.py) +SUBDEBUG = 6 +TRACE = 4 + +def only_once(func): + """Method decorator turning the method into noop on second or later calls.""" + def noop(*_args, **_kwargs): + pass + def swan_song(self, *args, **kwargs): + func(self, *args, **kwargs) + setattr(self, func.__name__, noop) + return swan_song + +class _MaxLevelFilter(object): + def __init__(self, max_level): + self.max_level = max_level + + def filter(self, record): + if record.levelno >= self.max_level: + return 0 + return 1 + +_VERBOSE_VAL_MAPPING = { + 0 : SUPERCRITICAL, + 1 : logging.INFO, + 2 : logging.INFO, # the default + 3 : logging.DEBUG, + 4 : logging.DEBUG, + 5 : logging.DEBUG, + 6 : logging.DEBUG, # verbose value + } + +def _cfg_verbose_val2level(cfg_errval): + assert 0 <= cfg_errval <= 10 + return _VERBOSE_VAL_MAPPING.get(cfg_errval, DDEBUG) + + +# Both the DNF default and the verbose default are WARNING. Note that ERROR has +# no specific level. +_ERR_VAL_MAPPING = { + 0: SUPERCRITICAL, + 1: logging.CRITICAL, + 2: logging.ERROR + } + +def _cfg_err_val2level(cfg_errval): + assert 0 <= cfg_errval <= 10 + return _ERR_VAL_MAPPING.get(cfg_errval, logging.WARNING) + + +class MultiprocessRotatingFileHandler(logging.handlers.RotatingFileHandler): + def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False): + super(MultiprocessRotatingFileHandler, self).__init__( + filename, mode, maxBytes, backupCount, encoding, delay) + self.rotate_lock = dnf.lock.build_log_lock("/var/log/", True) + + def emit(self, record): + while True: + try: + if self.shouldRollover(record): + with self.rotate_lock: + self.doRollover() + logging.FileHandler.emit(self, record) + return + except (dnf.exceptions.ProcessLockError, dnf.exceptions.ThreadLockError): + time.sleep(0.01) + except Exception: + self.handleError(record) + return + + +def _create_filehandler(logfile, log_size, log_rotate): + if not os.path.exists(logfile): + dnf.util.ensure_dir(os.path.dirname(logfile)) + dnf.util.touch(logfile) + # By default, make logfiles readable by the user (so the reporting ABRT + # user can attach root logfiles). + os.chmod(logfile, 0o644) + handler = MultiprocessRotatingFileHandler(logfile, maxBytes=log_size, backupCount=log_rotate) + formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", + "%Y-%m-%dT%H:%M:%SZ") + formatter.converter = time.gmtime + handler.setFormatter(formatter) + return handler + +def _paint_mark(logger): + logger.log(INFO, dnf.const.LOG_MARKER) + + +class Logging(object): + def __init__(self): + self.stdout_handler = self.stderr_handler = None + + @only_once + def _presetup(self): + logging.addLevelName(DDEBUG, "DDEBUG") + logging.addLevelName(SUBDEBUG, "SUBDEBUG") + logging.addLevelName(TRACE, "TRACE") + logger_dnf = logging.getLogger("dnf") + logger_dnf.setLevel(TRACE) + + # setup stdout + stdout = logging.StreamHandler(sys.stdout) + stdout.setLevel(INFO) + stdout.addFilter(_MaxLevelFilter(logging.WARNING)) + logger_dnf.addHandler(stdout) + self.stdout_handler = stdout + + # setup stderr + stderr = logging.StreamHandler(sys.stderr) + stderr.setLevel(WARNING) + logger_dnf.addHandler(stderr) + self.stderr_handler = stderr + + @only_once + def _setup(self, verbose_level, error_level, logdir, log_size, log_rotate): + self._presetup() + logger_dnf = logging.getLogger("dnf") + + # setup file logger + logfile = os.path.join(logdir, dnf.const.LOG) + handler = _create_filehandler(logfile, log_size, log_rotate) + logger_dnf.addHandler(handler) + # temporarily turn off stdout/stderr handlers: + self.stdout_handler.setLevel(SUPERCRITICAL) + self.stderr_handler.setLevel(SUPERCRITICAL) + # put the marker in the file now: + _paint_mark(logger_dnf) + + # setup Python warnings + logging.captureWarnings(True) + logger_warnings = logging.getLogger("py.warnings") + logger_warnings.addHandler(self.stderr_handler) + logger_warnings.addHandler(handler) + + lr_logfile = os.path.join(logdir, dnf.const.LOG_LIBREPO) + libdnf.repo.LibrepoLog.addHandler(lr_logfile, verbose_level <= DEBUG) + + # setup RPM callbacks logger + logger_rpm = logging.getLogger("dnf.rpm") + logger_rpm.propagate = False + logger_rpm.setLevel(SUBDEBUG) + logfile = os.path.join(logdir, dnf.const.LOG_RPM) + handler = _create_filehandler(logfile, log_size, log_rotate) + logger_rpm.addHandler(self.stdout_handler) + logger_rpm.addHandler(self.stderr_handler) + logger_rpm.addHandler(handler) + _paint_mark(logger_rpm) + # bring std handlers to the preferred level + self.stdout_handler.setLevel(verbose_level) + self.stderr_handler.setLevel(error_level) + logging.raiseExceptions = False + + def _setup_from_dnf_conf(self, conf): + verbose_level_r = _cfg_verbose_val2level(conf.debuglevel) + error_level_r = _cfg_err_val2level(conf.errorlevel) + logdir = conf.logdir + log_size = conf.log_size + log_rotate = conf.log_rotate + return self._setup(verbose_level_r, error_level_r, logdir, log_size, log_rotate) + + +class Timer(object): + def __init__(self, what): + self.what = what + self.start = time.time() + + def __call__(self): + diff = time.time() - self.start + msg = 'timer: %s: %d ms' % (self.what, diff * 1000) + logging.getLogger("dnf").log(DDEBUG, msg) + + +_LIBDNF_TO_DNF_LOGLEVEL_MAPPING = { + libdnf.utils.Logger.Level_CRITICAL: CRITICAL, + libdnf.utils.Logger.Level_ERROR: ERROR, + libdnf.utils.Logger.Level_WARNING: WARNING, + libdnf.utils.Logger.Level_NOTICE: INFO, + libdnf.utils.Logger.Level_INFO: INFO, + libdnf.utils.Logger.Level_DEBUG: DEBUG, + libdnf.utils.Logger.Level_TRACE: TRACE +} + + +class LibdnfLoggerCB(libdnf.utils.Logger): + def __init__(self): + super(LibdnfLoggerCB, self).__init__() + self._logger = logging.getLogger("dnf") + + def write(self, source, *args): + """Log message. + + source -- integer, defines origin (libdnf, librepo, ...) of message, 0 - unknown + """ + if len(args) == 2: + level, message = args + elif len(args) == 4: + time, pid, level, message = args + self._logger.log(_LIBDNF_TO_DNF_LOGLEVEL_MAPPING[level], message) + + +libdnfLoggerCB = LibdnfLoggerCB() +libdnf.utils.Log.setLogger(libdnfLoggerCB) diff --git a/dnf/match_counter.py b/dnf/match_counter.py new file mode 100644 index 0000000..33d26c2 --- /dev/null +++ b/dnf/match_counter.py @@ -0,0 +1,121 @@ +# match_counter.py +# Implements class MatchCounter. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals +from functools import reduce + +WEIGHTS = { + 'name' : 7, + 'summary' : 4, + 'description' : 2, + 'url' : 1, + } + + +def _canonize_string_set(sset, length): + """ Ordered sset with empty strings prepended. """ + current = len(sset) + l = [''] * (length - current) + sorted(sset) + return l + + +class MatchCounter(dict): + """Map packages to which of their attributes matched in a search against + what values. + + The mapping is: ``package -> [(key, needle), ... ]``. + + """ + + @staticmethod + def _eval_weights(pkg, matches): + # how much is each match worth and return their sum: + def weight(match): + key = match[0] + needle = match[1] + haystack = getattr(pkg, key) + coef = 2 if haystack == needle else 1 + return coef * WEIGHTS[key] + + return sum(map(weight, matches)) + + @staticmethod + def _eval_distance(pkg, matches): + dist = 0 + for (key, needle) in matches: + haystack = getattr(pkg, key) + dist += len(haystack) - len(needle) + return dist + + def _key_func(self): + """Get the key function used for sorting matches. + + It is not enough to only look at the matches and order them by the sum + of their weighted hits. In case this number is the same we have to + ensure that the same matched needles are next to each other in the + result. + + Returned function is: + pkg -> (weights_sum, canonized_needles_set, -distance) + + """ + max_length = self._max_needles() + def get_key(pkg): + return (self._eval_weights(pkg, self[pkg]), + _canonize_string_set(self.matched_needles(pkg), max_length), + -self._eval_distance(pkg, self[pkg])) + return get_key + + def _max_needles(self): + """Return the max count of needles of all packages.""" + if self: + return max(len(self.matched_needles(pkg)) for pkg in self) + return 0 + + def add(self, pkg, key, needle): + self.setdefault(pkg, []).append((key, needle)) + + def dump(self): + for pkg in self: + print('%s\t%s' % (pkg, self[pkg])) + + def matched_haystacks(self, pkg): + return set(getattr(pkg, m[0]) for m in self[pkg]) + + def matched_keys(self, pkg): + # return keys in the same order they appear in the list + result = [] + for i in self[pkg]: + if i[0] in result: + continue + result.append(i[0]) + return result + + def matched_needles(self, pkg): + return set(m[1] for m in self[pkg]) + + def sorted(self, reverse=False, limit_to=None): + keys = limit_to if limit_to else self.keys() + return sorted(keys, key=self._key_func(), reverse=reverse) + + def total(self): + return reduce(lambda total, pkg: total + len(self[pkg]), self, 0) diff --git a/dnf/module/CMakeLists.txt b/dnf/module/CMakeLists.txt new file mode 100644 index 0000000..ac25e1b --- /dev/null +++ b/dnf/module/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB module *.py) +INSTALL (FILES ${module} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/module) diff --git a/dnf/module/__init__.py b/dnf/module/__init__.py new file mode 100644 index 0000000..cae1a88 --- /dev/null +++ b/dnf/module/__init__.py @@ -0,0 +1,31 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from dnf.i18n import _ + +DIFFERENT_STREAM_INFO = 1 +NOTHING_TO_SHOW = 2 +INSTALLING_NEWER_VERSION = 4 +ENABLED_MODULES = 5 +NO_PROFILE_SPECIFIED = 6 + +module_messages = { + DIFFERENT_STREAM_INFO: _("Enabling different stream for '{}'."), + NOTHING_TO_SHOW: _("Nothing to show."), + INSTALLING_NEWER_VERSION: _("Installing newer version of '{}' than specified. Reason: {}"), + ENABLED_MODULES: _("Enabled modules: {}."), + NO_PROFILE_SPECIFIED: _("No profile specified for '{}', please specify profile."), +} diff --git a/dnf/module/exceptions.py b/dnf/module/exceptions.py new file mode 100644 index 0000000..ad82966 --- /dev/null +++ b/dnf/module/exceptions.py @@ -0,0 +1,88 @@ +# supplies the 'module' command. +# +# Copyright (C) 2014-2017 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +import dnf +from dnf.module import module_messages, NO_PROFILE_SPECIFIED +from dnf.i18n import _ + + +class NoModuleException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "No such module: {}".format(module_spec) + super(NoModuleException, self).__init__(value) + + +class NoStreamException(dnf.exceptions.Error): + def __init__(self, stream): + value = "No such stream: {}".format(stream) + super(NoStreamException, self).__init__(value) + + +class EnabledStreamException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "No enabled stream for module: {}".format(module_spec) + super(EnabledStreamException, self).__init__(value) + + +class EnableMultipleStreamsException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "Cannot enable more streams from module '{}' at the same time".format(module_spec) + super(EnableMultipleStreamsException, self).__init__(value) + + +class DifferentStreamEnabledException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "Different stream enabled for module: {}".format(module_spec) + super(DifferentStreamEnabledException, self).__init__(value) + + +class NoProfileException(dnf.exceptions.Error): + def __init__(self, profile): + value = "No such profile: {}".format(profile) + super(NoProfileException, self).__init__(value) + + +class ProfileNotInstalledException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "Specified profile not installed for {}".format(module_spec) + super(ProfileNotInstalledException, self).__init__(value) + + +class NoStreamSpecifiedException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "No stream specified for '{}', please specify stream".format(module_spec) + super(NoStreamSpecifiedException, self).__init__(value) + + +class NoProfileSpecifiedException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = module_messages[NO_PROFILE_SPECIFIED].format(module_spec) + super(NoProfileSpecifiedException, self).__init__(value) + + +class NoProfilesException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "No such profile: {}. No profiles available".format(module_spec) + super(NoProfilesException, self).__init__(value) + + +class NoProfileToRemoveException(dnf.exceptions.Error): + def __init__(self, module_spec): + value = "No profile to remove for '{}'".format(module_spec) + super(NoProfileToRemoveException, self).__init__(value) diff --git a/dnf/module/module_base.py b/dnf/module/module_base.py new file mode 100644 index 0000000..8093ab4 --- /dev/null +++ b/dnf/module/module_base.py @@ -0,0 +1,686 @@ +# Copyright (C) 2017-2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from collections import OrderedDict + +import hawkey +import libdnf.smartcols +import libdnf.module +import dnf.selector +import dnf.exceptions + +from dnf.module.exceptions import EnableMultipleStreamsException +from dnf.util import logger +from dnf.i18n import _, P_, ucd + +STATE_DEFAULT = libdnf.module.ModulePackageContainer.ModuleState_DEFAULT +STATE_ENABLED = libdnf.module.ModulePackageContainer.ModuleState_ENABLED +STATE_DISABLED = libdnf.module.ModulePackageContainer.ModuleState_DISABLED +STATE_UNKNOWN = libdnf.module.ModulePackageContainer.ModuleState_UNKNOWN +MODULE_TABLE_HINT = _("\n\nHint: [d]efault, [e]nabled, [x]disabled, [i]nstalled") +MODULE_INFO_TABLE_HINT = _("\n\nHint: [d]efault, [e]nabled, [x]disabled, [i]nstalled, [a]ctive") + + +def _profile_comparison_key(profile): + return profile.getName() + + +class ModuleBase(object): + # :api + + def __init__(self, base): + # :api + self.base = base + + def enable(self, module_specs): + # :api + no_match_specs, error_specs, solver_errors, module_dicts = \ + self._resolve_specs_enable_update_sack(module_specs) + for spec, (nsvcap, module_dict) in module_dicts.items(): + if nsvcap.profile: + logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( + nsvcap.name, nsvcap.profile)) + if no_match_specs or error_specs or solver_errors: + raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs, + error_group_specs=error_specs, + module_depsolv_errors=solver_errors) + + def disable(self, module_specs): + # :api + no_match_specs, solver_errors = self._modules_reset_or_disable(module_specs, STATE_DISABLED) + if no_match_specs or solver_errors: + raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs, + module_depsolv_errors=solver_errors) + + def install(self, module_specs, strict=True): + # :api + no_match_specs, error_specs, solver_errors, module_dicts = \ + self._resolve_specs_enable_update_sack(module_specs) + + # + fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME + install_dict = {} + install_set_artefacts = set() + fail_safe_repo_used = False + for spec, (nsvcap, moduledict) in module_dicts.items(): + for name, streamdict in moduledict.items(): + for stream, module_list in streamdict.items(): + install_module_list = [x for x in module_list + if self.base._moduleContainer.isModuleActive(x.getId())] + if not install_module_list: + logger.error(_("All matches for argument '{0}' in module '{1}:{2}' are not " + "active").format(spec, name, stream)) + error_specs.append(spec) + continue + profiles = [] + latest_module = self._get_latest(install_module_list) + if latest_module.getRepoID() == fail_safe_repo: + msg = _( + "Installing module '{0}' from Fail-Safe repository {1} is not allowed") + logger.critical(msg.format(latest_module.getNameStream(), fail_safe_repo)) + fail_safe_repo_used = True + if nsvcap.profile: + profiles.extend(latest_module.getProfiles(nsvcap.profile)) + if not profiles: + available_profiles = latest_module.getProfiles() + if available_profiles: + profile_names = ", ".join(sorted( + [profile.getName() for profile in available_profiles])) + msg = _("Unable to match profile for argument {}. Available " + "profiles for '{}:{}': {}").format( + spec, name, stream, profile_names) + else: + msg = _("Unable to match profile for argument {}").format(spec) + logger.error(msg) + no_match_specs.append(spec) + continue + else: + profiles_strings = self.base._moduleContainer.getDefaultProfiles( + name, stream) + if not profiles_strings: + available_profiles = latest_module.getProfiles() + if available_profiles: + profile_names = ", ".join(sorted( + [profile.getName() for profile in available_profiles])) + msg = _("No default profiles for module {}:{}. Available profiles" + ": {}").format( + name, stream, profile_names) + else: + msg = _("No default profiles for module {}:{}").format(name, stream) + logger.error(msg) + no_match_specs.append(spec) + for profile in set(profiles_strings): + module_profiles = latest_module.getProfiles(profile) + if not module_profiles: + logger.error( + _("Default profile {} not available in module {}:{}").format( + profile, name, stream)) + no_match_specs.append(spec) + + profiles.extend(module_profiles) + for profile in profiles: + self.base._moduleContainer.install(latest_module ,profile.getName()) + for pkg_name in profile.getContent(): + install_dict.setdefault(pkg_name, set()).add(spec) + for module in install_module_list: + install_set_artefacts.update(module.getArtifacts()) + if fail_safe_repo_used: + raise dnf.exceptions.Error(_( + "Installing module from Fail-Safe repository is not allowed")) + install_base_query = self.base.sack.query().filterm( + nevra_strict=install_set_artefacts).apply() + + # add hot-fix packages + hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes] + hotfix_packages = self.base.sack.query().filterm(reponame=hot_fix_repos).filterm( + name=install_dict.keys()) + install_base_query = install_base_query.union(hotfix_packages) + + for pkg_name, set_specs in install_dict.items(): + query = install_base_query.filter(name=pkg_name) + if not query: + # package can also be non-modular or part of another stream + query = self.base.sack.query().filterm(name=pkg_name) + if not query: + for spec in set_specs: + logger.error(_("Unable to resolve argument {}").format(spec)) + logger.error(_("No match for package {}").format(pkg_name)) + error_specs.extend(set_specs) + continue + self.base._goal.group_members.add(pkg_name) + sltr = dnf.selector.Selector(self.base.sack) + sltr.set(pkg=query) + self.base._goal.install(select=sltr, optional=(not strict)) + if no_match_specs or error_specs or solver_errors: + raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs, + error_group_specs=error_specs, + module_depsolv_errors=solver_errors) + + def reset(self, module_specs): + # :api + no_match_specs, solver_errors = self._modules_reset_or_disable(module_specs, STATE_UNKNOWN) + if no_match_specs: + raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_specs, + module_depsolv_errors=solver_errors) + + def upgrade(self, module_specs): + # :api + no_match_specs = [] + fail_safe_repo = hawkey.MODULE_FAIL_SAFE_REPO_NAME + fail_safe_repo_used = False + + for spec in module_specs: + module_list, nsvcap = self._get_modules(spec) + if not module_list: + no_match_specs.append(spec) + continue + update_module_list = [x for x in module_list + if self.base._moduleContainer.isModuleActive(x.getId())] + if not update_module_list: + logger.error(_("Unable to resolve argument {}").format(spec)) + continue + module_dict = self._create_module_dict_and_enable(update_module_list, False) + upgrade_package_set = set() + for name, streamdict in module_dict.items(): + for stream, module_list_from_dict in streamdict.items(): + upgrade_package_set.update(self._get_package_name_set_and_remove_profiles( + module_list_from_dict, nsvcap)) + latest_module = self._get_latest(module_list_from_dict) + if latest_module.getRepoID() == fail_safe_repo: + msg = _( + "Upgrading module '{0}' from Fail-Safe repository {1} is not allowed") + logger.critical(msg.format(latest_module.getNameStream(), fail_safe_repo)) + fail_safe_repo_used = True + if nsvcap.profile: + profiles_set = latest_module.getProfiles(nsvcap.profile) + if not profiles_set: + continue + for profile in profiles_set: + upgrade_package_set.update(profile.getContent()) + else: + for profile in latest_module.getProfiles(): + upgrade_package_set.update(profile.getContent()) + for artefact in latest_module.getArtifacts(): + subj = hawkey.Subject(artefact) + for nevra_obj in subj.get_nevra_possibilities( + forms=[hawkey.FORM_NEVRA]): + upgrade_package_set.add(nevra_obj.name) + + if not upgrade_package_set: + logger.error(_("Unable to match profile in argument {}").format(spec)) + query = self.base.sack.query().filterm(name=upgrade_package_set) + if query: + sltr = dnf.selector.Selector(self.base.sack) + sltr.set(pkg=query) + self.base._goal.upgrade(select=sltr) + if fail_safe_repo_used: + raise dnf.exceptions.Error(_( + "Upgrading module from Fail-Safe repository is not allowed")) + return no_match_specs + + def remove(self, module_specs): + # :api + no_match_specs = [] + remove_package_set = set() + + for spec in module_specs: + module_list, nsvcap = self._get_modules(spec) + if not module_list: + no_match_specs.append(spec) + continue + module_dict = self._create_module_dict_and_enable(module_list, False) + remove_packages_names = [] + for name, streamdict in module_dict.items(): + for stream, module_list_from_dict in streamdict.items(): + remove_packages_names.extend(self._get_package_name_set_and_remove_profiles( + module_list_from_dict, nsvcap, True)) + if not remove_packages_names: + logger.error(_("Unable to match profile in argument {}").format(spec)) + remove_package_set.update(remove_packages_names) + + if remove_package_set: + keep_pkg_names = self.base._moduleContainer.getInstalledPkgNames() + remove_package_set = remove_package_set.difference(keep_pkg_names) + if remove_package_set: + query = self.base.sack.query().installed().filterm(name=remove_package_set) + if query: + self.base._remove_if_unneeded(query) + return no_match_specs + + def get_modules(self, module_spec): + # :api + return self._get_modules(module_spec) + + def _get_modules(self, module_spec): + # used by ansible (lib/ansible/modules/packaging/os/dnf.py) + subj = hawkey.Subject(module_spec) + for nsvcap in subj.nsvcap_possibilities(): + name = nsvcap.name if nsvcap.name else "" + stream = nsvcap.stream if nsvcap.stream else "" + version = "" + context = nsvcap.context if nsvcap.context else "" + arch = nsvcap.arch if nsvcap.arch else "" + if nsvcap.version and nsvcap.version != -1: + version = str(nsvcap.version) + modules = self.base._moduleContainer.query(name, stream, version, context, arch) + if modules: + return modules, nsvcap + return (), None + + def _get_latest(self, module_list): + latest = None + if module_list: + latest = module_list[0] + for module in module_list[1:]: + if module.getVersion() > latest.getVersion(): + latest = module + return latest + + def _create_module_dict_and_enable(self, module_list, enable=True): + moduleDict = {} + for module in module_list: + moduleDict.setdefault( + module.getName(), {}).setdefault(module.getStream(), []).append(module) + + for moduleName, streamDict in moduleDict.items(): + moduleState = self.base._moduleContainer.getModuleState(moduleName) + if len(streamDict) > 1: + if moduleState != STATE_DEFAULT and moduleState != STATE_ENABLED \ + and moduleState != STATE_DISABLED: + raise EnableMultipleStreamsException(moduleName) + if moduleState == STATE_ENABLED: + stream = self.base._moduleContainer.getEnabledStream(moduleName) + else: + stream = self.base._moduleContainer.getDefaultStream(moduleName) + if not stream or stream not in streamDict: + raise EnableMultipleStreamsException(moduleName) + for key in sorted(streamDict.keys()): + if key == stream: + if enable: + self.base._moduleContainer.enable(moduleName, key) + continue + del streamDict[key] + elif enable: + for key in streamDict.keys(): + self.base._moduleContainer.enable(moduleName, key) + assert len(streamDict) == 1 + return moduleDict + + def _resolve_specs_enable_update_sack(self, module_specs): + no_match_specs = [] + error_spec = [] + module_dicts = {} + for spec in module_specs: + module_list, nsvcap = self._get_modules(spec) + if not module_list: + no_match_specs.append(spec) + continue + try: + module_dict = self._create_module_dict_and_enable(module_list, True) + module_dicts[spec] = (nsvcap, module_dict) + except (RuntimeError, EnableMultipleStreamsException) as e: + error_spec.append(spec) + logger.error(ucd(e)) + logger.error(_("Unable to resolve argument {}").format(spec)) + hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes] + try: + solver_errors = self.base.sack.filter_modules( + self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot, + self.base.conf.module_platform_id, update_only=True, + debugsolver=self.base.conf.debug_solver) + except hawkey.Exception as e: + raise dnf.exceptions.Error(ucd(e)) + for spec, (nsvcap, moduleDict) in module_dicts.items(): + for streamDict in moduleDict.values(): + for modules in streamDict.values(): + try: + self.base._moduleContainer.enableDependencyTree( + libdnf.module.VectorModulePackagePtr(modules)) + except RuntimeError as e: + error_spec.append(spec) + logger.error(ucd(e)) + logger.error(_("Unable to resolve argument {}").format(spec)) + return no_match_specs, error_spec, solver_errors, module_dicts + + def _modules_reset_or_disable(self, module_specs, to_state): + no_match_specs = [] + for spec in module_specs: + module_list, nsvcap = self._get_modules(spec) + if not module_list: + logger.error(_("Unable to resolve argument {}").format(spec)) + no_match_specs.append(spec) + continue + if nsvcap.stream or nsvcap.version or nsvcap.context or nsvcap.arch or nsvcap.profile: + logger.info(_("Only module name is required. " + "Ignoring unneeded information in argument: '{}'").format(spec)) + module_names = set() + for module in module_list: + module_names.add(module.getName()) + for name in module_names: + if to_state == STATE_UNKNOWN: + self.base._moduleContainer.reset(name) + if to_state == STATE_DISABLED: + self.base._moduleContainer.disable(name) + + hot_fix_repos = [i.id for i in self.base.repos.iter_enabled() if i.module_hotfixes] + try: + solver_errors = self.base.sack.filter_modules( + self.base._moduleContainer, hot_fix_repos, self.base.conf.installroot, + self.base.conf.module_platform_id, update_only=True, + debugsolver=self.base.conf.debug_solver) + except hawkey.Exception as e: + raise dnf.exceptions.Error(ucd(e)) + return no_match_specs, solver_errors + + def _get_package_name_set_and_remove_profiles(self, module_list, nsvcap, remove=False): + package_name_set = set() + latest_module = self._get_latest(module_list) + installed_profiles_strings = set(self.base._moduleContainer.getInstalledProfiles( + latest_module.getName())) + if not installed_profiles_strings: + return set() + if nsvcap.profile: + profiles_set = latest_module.getProfiles(nsvcap.profile) + if not profiles_set: + return set() + for profile in profiles_set: + if profile.getName() in installed_profiles_strings: + if remove: + self.base._moduleContainer.uninstall(latest_module, profile.getName()) + package_name_set.update(profile.getContent()) + else: + for profile_string in installed_profiles_strings: + if remove: + self.base._moduleContainer.uninstall(latest_module, profile_string) + for profile in latest_module.getProfiles(profile_string): + package_name_set.update(profile.getContent()) + return package_name_set + + def _get_info_profiles(self, module_specs): + output = set() + for module_spec in module_specs: + module_list, nsvcap = self._get_modules(module_spec) + if not module_list: + logger.info(_("Unable to resolve argument {}").format(module_spec)) + continue + + if nsvcap.profile: + logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( + nsvcap.name, nsvcap.profile)) + for module in module_list: + + lines = OrderedDict() + lines["Name"] = module.getFullIdentifier() + + for profile in sorted(module.getProfiles(), key=_profile_comparison_key): + lines[profile.getName()] = "\n".join( + [pkgName for pkgName in profile.getContent()]) + + output.add(self._create_simple_table(lines).toString()) + return "\n\n".join(sorted(output)) + + def _profile_report_formatter(self, modulePackage, default_profiles, enabled_str): + installed_profiles = self.base._moduleContainer.getInstalledProfiles( + modulePackage.getName()) + available_profiles = modulePackage.getProfiles() + profiles_str = "" + for profile in sorted(available_profiles, key=_profile_comparison_key): + profiles_str += "{}{}".format( + profile.getName(), " [d]" if profile.getName() in default_profiles else "") + profiles_str += " [i], " if profile.getName() in installed_profiles and enabled_str \ + else ", " + return profiles_str[:-2] + + def _module_strs_formatter(self, modulePackage, markActive=False): + default_str = "" + enabled_str = "" + disabled_str = "" + if modulePackage.getStream() == self.base._moduleContainer.getDefaultStream( + modulePackage.getName()): + default_str = " [d]" + if self.base._moduleContainer.isEnabled(modulePackage): + if not default_str: + enabled_str = " " + enabled_str += "[e]" + elif self.base._moduleContainer.isDisabled(modulePackage): + if not default_str: + disabled_str = " " + disabled_str += "[x]" + if markActive and self.base._moduleContainer.isModuleActive(modulePackage): + if not default_str: + disabled_str = " " + disabled_str += "[a]" + return default_str, enabled_str, disabled_str + + def _get_info(self, module_specs): + output = set() + for module_spec in module_specs: + module_list, nsvcap = self._get_modules(module_spec) + if not module_list: + logger.info(_("Unable to resolve argument {}").format(module_spec)) + continue + + if nsvcap.profile: + logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( + nsvcap.name, nsvcap.profile)) + for modulePackage in module_list: + default_str, enabled_str, disabled_str = self._module_strs_formatter( + modulePackage, markActive=True) + default_profiles = self.base._moduleContainer.getDefaultProfiles( + modulePackage.getName(), modulePackage.getStream()) + + profiles_str = self._profile_report_formatter( + modulePackage, default_profiles, enabled_str) + + lines = OrderedDict() + lines["Name"] = modulePackage.getName() + lines["Stream"] = modulePackage.getStream() + default_str + enabled_str + \ + disabled_str + lines["Version"] = modulePackage.getVersion() + lines["Context"] = modulePackage.getContext() + lines["Architecture"] = modulePackage.getArch() + lines["Profiles"] = profiles_str + lines["Default profiles"] = " ".join(default_profiles) + lines["Repo"] = modulePackage.getRepoID() + lines["Summary"] = modulePackage.getSummary() + lines["Description"] = modulePackage.getDescription() + req_set = set() + for req in modulePackage.getModuleDependencies(): + for require_dict in req.getRequires(): + for mod_require, stream in require_dict.items(): + req_set.add("{}:[{}]".format(mod_require, ",".join(stream))) + lines["Requires"] = "\n".join(sorted(req_set)) + lines["Artifacts"] = "\n".join(sorted(modulePackage.getArtifacts())) + output.add(self._create_simple_table(lines).toString()) + str_table = "\n\n".join(sorted(output)) + if str_table: + str_table += MODULE_INFO_TABLE_HINT + return str_table + + @staticmethod + def _create_simple_table(lines): + table = libdnf.smartcols.Table() + table.enableNoheadings(True) + table.setColumnSeparator(" : ") + + column_name = table.newColumn("Name") + column_value = table.newColumn("Value") + column_value.setWrap(True) + column_value.setSafechars("\n") + column_value.setNewlineWrapFunction() + + for line_name, value in lines.items(): + if value is None: + value = "" + line = table.newLine() + line.getColumnCell(column_name).setData(line_name) + line.getColumnCell(column_value).setData(str(value)) + + return table + + def _get_full_info(self, module_specs): + output = set() + for module_spec in module_specs: + module_list, nsvcap = self._get_modules(module_spec) + if not module_list: + logger.info(_("Unable to resolve argument {}").format(module_spec)) + continue + + if nsvcap.profile: + logger.info(_("Ignoring unnecessary profile: '{}/{}'").format( + nsvcap.name, nsvcap.profile)) + for modulePackage in module_list: + info = modulePackage.getYaml() + if info: + output.add(info) + output_string = "\n\n".join(sorted(output)) + return output_string + + def _what_provides(self, rpm_specs): + output = set() + modulePackages = self.base._moduleContainer.getModulePackages() + baseQuery = self.base.sack.query().filterm(empty=True).apply() + getBestInitQuery = self.base.sack.query(flags=hawkey.IGNORE_MODULAR_EXCLUDES) + + for spec in rpm_specs: + subj = dnf.subject.Subject(spec) + baseQuery = baseQuery.union(subj.get_best_query( + self.base.sack, with_nevra=True, with_provides=False, with_filenames=False, + query=getBestInitQuery)) + + baseQuery.apply() + + for modulePackage in modulePackages: + artifacts = modulePackage.getArtifacts() + if not artifacts: + continue + query = baseQuery.filter(nevra_strict=artifacts) + if query: + for pkg in query: + string_output = "" + profiles = [] + for profile in sorted(modulePackage.getProfiles(), key=_profile_comparison_key): + if pkg.name in profile.getContent(): + profiles.append(profile.getName()) + lines = OrderedDict() + lines["Module"] = modulePackage.getFullIdentifier() + lines["Profiles"] = " ".join(sorted(profiles)) + lines["Repo"] = modulePackage.getRepoID() + lines["Summary"] = modulePackage.getSummary() + + table = self._create_simple_table(lines) + + string_output += "{}\n".format(self.base.output.term.bold(str(pkg))) + string_output += "{}".format(table.toString()) + output.add(string_output) + + return "\n\n".join(sorted(output)) + + def _create_and_fill_table(self, latest): + table = libdnf.smartcols.Table() + table.setTermforce(libdnf.smartcols.Table.TermForce_AUTO) + table.enableMaxout(True) + column_name = table.newColumn("Name") + column_stream = table.newColumn("Stream") + column_profiles = table.newColumn("Profiles") + column_profiles.setWrap(True) + column_info = table.newColumn("Summary") + column_info.setWrap(True) + + if not self.base.conf.verbose: + column_info.hidden = True + + for latest_per_repo in latest: + for nameStreamArch in latest_per_repo: + if len(nameStreamArch) == 1: + modulePackage = nameStreamArch[0] + else: + active = [module for module in nameStreamArch + if self.base._moduleContainer.isModuleActive(module)] + if active: + modulePackage = active[0] + else: + modulePackage = nameStreamArch[0] + line = table.newLine() + default_str, enabled_str, disabled_str = self._module_strs_formatter( + modulePackage, markActive=False) + default_profiles = self.base._moduleContainer.getDefaultProfiles( + modulePackage.getName(), modulePackage.getStream()) + profiles_str = self._profile_report_formatter(modulePackage, default_profiles, + enabled_str) + line.getColumnCell(column_name).setData(modulePackage.getName()) + line.getColumnCell( + column_stream).setData( + modulePackage.getStream() + default_str + enabled_str + disabled_str) + line.getColumnCell(column_profiles).setData(profiles_str) + line.getColumnCell(column_info).setData(modulePackage.getSummary()) + + return table + + def _get_brief_description(self, module_specs, module_state): + modules = [] + if module_specs: + for spec in module_specs: + module_list, nsvcap = self._get_modules(spec) + modules.extend(module_list) + else: + modules = self.base._moduleContainer.getModulePackages() + latest = self.base._moduleContainer.getLatestModulesPerRepo(module_state, modules) + if not latest: + return "" + + table = self._create_and_fill_table(latest) + current_repo_id_index = 0 + already_printed_lines = 0 + try: + repo_name = self.base.repos[latest[0][0][0].getRepoID()].name + except KeyError: + repo_name = latest[0][0][0].getRepoID() + versions = len(latest[0]) + header = self._format_header(table) + str_table = self._format_repoid(repo_name) + str_table += header + for i in range(0, table.getNumberOfLines()): + if versions + already_printed_lines <= i: + already_printed_lines += versions + current_repo_id_index += 1 + # Fail-Safe repository is not in self.base.repos + try: + repo_name = self.base.repos[ + latest[current_repo_id_index][0][0].getRepoID()].name + except KeyError: + repo_name = latest[current_repo_id_index][0][0].getRepoID() + versions = len(latest[current_repo_id_index]) + str_table += "\n" + str_table += self._format_repoid(repo_name) + str_table += header + + line = table.getLine(i) + str_table += table.toString(line, line) + return str_table + MODULE_TABLE_HINT + + def _format_header(self, table): + line = table.getLine(0) + return table.toString(line, line).split('\n', 1)[0] + '\n' + + def _format_repoid(self, repo_name): + return "{}\n".format(self.base.output.term.bold(repo_name)) + + +def format_modular_solver_errors(errors): + msg = dnf.util._format_resolve_problems(errors) + return "\n".join( + [P_('Modular dependency problem:', 'Modular dependency problems:', len(errors)), msg]) diff --git a/dnf/package.py b/dnf/package.py new file mode 100644 index 0000000..56aac61 --- /dev/null +++ b/dnf/package.py @@ -0,0 +1,298 @@ +# package.py +# Module defining the dnf.Package class. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +""" Contains the dnf.Package class. """ + +from __future__ import absolute_import +from __future__ import unicode_literals + +from dnf.i18n import _ + +import binascii +import dnf.rpm +import dnf.yum.misc +import hawkey +import logging +import os + +logger = logging.getLogger("dnf") + + +class Package(hawkey.Package): + """ Represents a package. #:api """ + + DEBUGINFO_SUFFIX = "-debuginfo" # :api + DEBUGSOURCE_SUFFIX = "-debugsource" # :api + + def __init__(self, initobject, base): + super(Package, self).__init__(initobject) + self.base = base + self._priv_chksum = None + self._repo = None + self._priv_size = None + + @property + def _chksum(self): + if self._priv_chksum: + return self._priv_chksum + if self._from_cmdline: + chksum_type = dnf.yum.misc.get_default_chksum_type() + chksum_val = dnf.yum.misc.checksum(chksum_type, self.location) + return (hawkey.chksum_type(chksum_type), + binascii.unhexlify(chksum_val)) + return super(Package, self).chksum + + @_chksum.setter + def _chksum(self, val): + self._priv_chksum = val + + @property + def _from_cmdline(self): + return self.reponame == hawkey.CMDLINE_REPO_NAME + + @property + def _from_system(self): + return self.reponame == hawkey.SYSTEM_REPO_NAME + + @property + def _from_repo(self): + pkgrepo = None + if self._from_system: + pkgrepo = self.base.history.repo(self) + else: + pkgrepo = {} + if pkgrepo: + return '@' + pkgrepo + return self.reponame + + @property + def _header(self): + return dnf.rpm._header(self.localPkg()) + + @property + def _size(self): + if self._priv_size: + return self._priv_size + return super(Package, self).size + + @_size.setter + def _size(self, val): + self._priv_size = val + + @property + def _pkgid(self): + if self.hdr_chksum is None: + return None + (_, chksum) = self.hdr_chksum + return binascii.hexlify(chksum) + + @property + def source_name(self): + # :api + """ + returns name of source package + e.g. krb5-libs -> krb5 + """ + if self.sourcerpm is not None: + # trim suffix first + srcname = dnf.util.rtrim(self.sourcerpm, ".src.rpm") + # sourcerpm should be in form of name-version-release now, so we + # will strip the two rightmost parts separated by dash. + # Using rtrim with version and release of self is not sufficient + # because the package can have different version to the source + # package. + srcname = srcname.rsplit('-', 2)[0] + else: + srcname = None + return srcname + + @property + def debug_name(self): + # :api + """ + Returns name of the debuginfo package for this package. + If this package is a debuginfo package, returns its name. + If this package is a debugsource package, returns the debuginfo package + for the base package. + e.g. kernel-PAE -> kernel-PAE-debuginfo + """ + if self.name.endswith(self.DEBUGINFO_SUFFIX): + return self.name + + name = self.name + if self.name.endswith(self.DEBUGSOURCE_SUFFIX): + name = name[:-len(self.DEBUGSOURCE_SUFFIX)] + + return name + self.DEBUGINFO_SUFFIX + + @property + def debugsource_name(self): + # :api + """ + Returns name of the debugsource package for this package. + e.g. krb5-libs -> krb5-debugsource + """ + # assuming self.source_name is None only for a source package + src_name = self.source_name if self.source_name is not None else self.name + return src_name + self.DEBUGSOURCE_SUFFIX + + @property + def source_debug_name(self): + # :api + """ + returns name of debuginfo package for source package of given package + e.g. krb5-libs -> krb5-debuginfo + """ + # assuming self.source_name is None only for a source package + src_name = self.source_name if self.source_name is not None else self.name + return src_name + self.DEBUGINFO_SUFFIX + + @property # yum compatibility attribute + def idx(self): + """ Always type it to int, rpm bindings expect it like that. """ + return int(self.rpmdbid) + + @property # yum compatibility attribute + def repoid(self): + return self.reponame + + @property # yum compatibility attribute + def pkgtup(self): + return (self.name, self.arch, str(self.e), self.v, self.r) + + @property # yum compatibility attribute + def repo(self): + if self._repo: + return self._repo + return self.base.repos[self.reponame] + + @repo.setter + def repo(self, val): + self._repo = val + + @property + def reason(self): + if self.repoid != hawkey.SYSTEM_REPO_NAME: + return None + return self.base.history.rpm.get_reason_name(self) + + @property # yum compatibility attribute + def relativepath(self): + return self.location + + @property # yum compatibility attribute + def a(self): + return self.arch + + @property # yum compatibility attribute + def e(self): + return self.epoch + + @property # yum compatibility attribute + def v(self): + return self.version + + @property # yum compatibility attribute + def r(self): + return self.release + + @property # yum compatibility attribute + def ui_from_repo(self): + return self.reponame + + # yum compatibility method + def evr_eq(self, pkg): + return self.evr_cmp(pkg) == 0 + + # yum compatibility method + def evr_gt(self, pkg): + return self.evr_cmp(pkg) > 0 + + # yum compatibility method + def evr_lt(self, pkg): + return self.evr_cmp(pkg) < 0 + + # yum compatibility method + def getDiscNum(self): + return self.medianr + + # yum compatibility method + def localPkg(self): + """ Package's location in the filesystem. + + For packages in remote repo returns where the package will be/has + been downloaded. + """ + if self._from_cmdline: + return self.location + loc = self.location + if self.repo._repo.isLocal() and self.baseurl and self.baseurl.startswith('file://'): + return os.path.join(self.baseurl, loc.lstrip("/"))[7:] + if not self._is_local_pkg(): + loc = os.path.basename(loc) + return os.path.join(self.pkgdir, loc.lstrip("/")) + + def remote_location(self, schemes=('http', 'ftp', 'file', 'https')): + # :api + """ + The location from where the package can be downloaded from + + :param schemes: list of allowed protocols. Default is ('http', 'ftp', 'file', 'https') + :return: location (string) or None + """ + return self.repo.remote_location(self.location, schemes) + + def _is_local_pkg(self): + if self.repoid == "@System": + return True + return self._from_cmdline or \ + (self.repo._repo.isLocal() and (not self.baseurl or self.baseurl.startswith('file://'))) + + @property + def pkgdir(self): + if (self.repo._repo.isLocal() and not self._is_local_pkg()): + return self.repo.cache_pkgdir() + else: + return self.repo.pkgdir + + # yum compatibility method + def returnIdSum(self): + """ Return the chksum type and chksum string how the legacy yum expects + it. + """ + if self._chksum is None: + return (None, None) + (chksum_type, chksum) = self._chksum + return (hawkey.chksum_name(chksum_type), binascii.hexlify(chksum).decode()) + + # yum compatibility method + def verifyLocalPkg(self): + if self._from_system: + raise ValueError("Can not verify an installed package.") + if self._from_cmdline: + return True # local package always verifies against itself + (chksum_type, chksum) = self.returnIdSum() + real_sum = dnf.yum.misc.checksum(chksum_type, self.localPkg(), + datasize=self._size) + if real_sum != chksum: + logger.debug(_('%s: %s check failed: %s vs %s'), + self, chksum_type, real_sum, chksum) + return False + return True diff --git a/dnf/persistor.py b/dnf/persistor.py new file mode 100644 index 0000000..bb5cdf6 --- /dev/null +++ b/dnf/persistor.py @@ -0,0 +1,133 @@ +# persistor.py +# Persistence data container. +# +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +# The current implementation is storing to files in persistdir. Do not depend on +# specific files existing, instead use the persistor API. The underlying +# implementation can change, e.g. for one general file with a serialized dict of +# data etc. + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ +import distutils.version +import dnf.util +import errno +import fnmatch +import json +import logging +import os +import re + +logger = logging.getLogger("dnf") + + +class JSONDB(object): + + def _check_json_db(self, json_path): + if not os.path.isfile(json_path): + # initialize new db + dnf.util.ensure_dir(os.path.dirname(json_path)) + self._write_json_db(json_path, []) + + def _get_json_db(self, json_path, default=[]): + with open(json_path, 'r') as f: + content = f.read() + if content == "": + # empty file is invalid json format + logger.warning(_("%s is empty file"), json_path) + self._write_json_db(json_path, default) + else: + try: + default = json.loads(content) + except ValueError as e: + logger.warning(e) + return default + + @staticmethod + def _write_json_db(json_path, content): + with open(json_path, 'w') as f: + json.dump(content, f) + + +class RepoPersistor(JSONDB): + """Persistent data kept for repositories. + + Is arch/releasever specific and stores to cachedir. + + """ + + def __init__(self, cachedir): + self.cachedir = cachedir + self.db_path = os.path.join(self.cachedir, "expired_repos.json") + self.expired_to_add = set() + self.reset_last_makecache = False + + @property + def _last_makecache_path(self): + return os.path.join(self.cachedir, "last_makecache") + + def get_expired_repos(self): + self._check_json_db(self.db_path) + return set(self._get_json_db(self.db_path)) + + def save(self): + self._check_json_db(self.db_path) + self._write_json_db(self.db_path, list(self.expired_to_add)) + if self.reset_last_makecache: + try: + dnf.util.touch(self._last_makecache_path) + return True + except IOError: + logger.info(_("Failed storing last makecache time.")) + return False + + def since_last_makecache(self): + try: + return int(dnf.util.file_age(self._last_makecache_path)) + except OSError: + logger.info(_("Failed determining last makecache time.")) + return None + + +class TempfilePersistor(JSONDB): + + def __init__(self, cachedir): + self.db_path = os.path.join(cachedir, "tempfiles.json") + self.tempfiles_to_add = set() + self._empty = False + + def get_saved_tempfiles(self): + self._check_json_db(self.db_path) + return self._get_json_db(self.db_path) + + def save(self): + if not self._empty and not self.tempfiles_to_add: + return + self._check_json_db(self.db_path) + if self._empty: + self._write_json_db(self.db_path, []) + return + if self.tempfiles_to_add: + data = set(self._get_json_db(self.db_path)) + data.update(self.tempfiles_to_add) + self._write_json_db(self.db_path, list(data)) + + def empty(self): + self._empty = True diff --git a/dnf/plugin.py b/dnf/plugin.py new file mode 100644 index 0000000..6fd7ad2 --- /dev/null +++ b/dnf/plugin.py @@ -0,0 +1,251 @@ +# plugin.py +# The interface for building DNF plugins. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +import fnmatch +import glob +import importlib +import inspect +import logging +import operator +import os +import sys +import traceback + +import libdnf +import dnf.logging +import dnf.pycomp +import dnf.util +from dnf.i18n import _ + +logger = logging.getLogger('dnf') + +DYNAMIC_PACKAGE = 'dnf.plugin.dynamic' + + +class Plugin(object): + """The base class custom plugins must derive from. #:api""" + + name = '' + config_name = None + + @classmethod + def read_config(cls, conf): + # :api + parser = libdnf.conf.ConfigParser() + name = cls.config_name if cls.config_name else cls.name + files = ['%s/%s.conf' % (path, name) for path in conf.pluginconfpath] + for file in files: + if os.path.isfile(file): + try: + parser.read(file) + except Exception as e: + raise dnf.exceptions.ConfigError(_("Parsing file failed: %s") % str(e)) + return parser + + def __init__(self, base, cli): + # :api + self.base = base + self.cli = cli + + def pre_config(self): + # :api + pass + + def config(self): + # :api + pass + + def resolved(self): + # :api + pass + + def sack(self): + # :api + pass + + def pre_transaction(self): + # :api + pass + + def transaction(self): + # :api + pass + + +class Plugins(object): + def __init__(self): + self.plugin_cls = [] + self.plugins = [] + + def _caller(self, method): + for plugin in self.plugins: + try: + getattr(plugin, method)() + except dnf.exceptions.Error: + raise + except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() + except_list = traceback.format_exception(exc_type, exc_value, exc_traceback) + logger.critical(''.join(except_list)) + + def _check_enabled(self, conf, enable_plugins): + """Checks whether plugins are enabled or disabled in configuration files + and removes disabled plugins from list""" + for plug_cls in self.plugin_cls[:]: + name = plug_cls.name + if any(fnmatch.fnmatch(name, pattern) for pattern in enable_plugins): + continue + parser = plug_cls.read_config(conf) + # has it enabled = False? + disabled = (parser.has_section('main') + and parser.has_option('main', 'enabled') + and not parser.getboolean('main', 'enabled')) + if disabled: + self.plugin_cls.remove(plug_cls) + + def _load(self, conf, skips, enable_plugins): + """Dynamically load relevant plugin modules.""" + + if DYNAMIC_PACKAGE in sys.modules: + raise RuntimeError("load_plugins() called twice") + sys.modules[DYNAMIC_PACKAGE] = package = dnf.pycomp.ModuleType(DYNAMIC_PACKAGE) + package.__path__ = [] + + files = _get_plugins_files(conf.pluginpath, skips, enable_plugins) + _import_modules(package, files) + self.plugin_cls = _plugin_classes()[:] + self._check_enabled(conf, enable_plugins) + if len(self.plugin_cls) > 0: + names = sorted(plugin.name for plugin in self.plugin_cls) + logger.debug(_('Loaded plugins: %s'), ', '.join(names)) + + def _run_pre_config(self): + self._caller('pre_config') + + def _run_config(self): + self._caller('config') + + def _run_init(self, base, cli=None): + for p_cls in self.plugin_cls: + plugin = p_cls(base, cli) + self.plugins.append(plugin) + + def run_sack(self): + self._caller('sack') + + def run_resolved(self): + self._caller('resolved') + + def run_pre_transaction(self): + self._caller('pre_transaction') + + def run_transaction(self): + self._caller('transaction') + + def _unload(self): + del sys.modules[DYNAMIC_PACKAGE] + + def unload_removed_plugins(self, transaction): + erased = set([package.name for package in transaction.remove_set]) + if not erased: + return + installed = set([package.name for package in transaction.install_set]) + transaction_diff = erased - installed + if not transaction_diff: + return + files_erased = set() + for pkg in transaction.remove_set: + if pkg.name in transaction_diff: + files_erased.update(pkg.files) + for plugin in self.plugins[:]: + if inspect.getfile(plugin.__class__) in files_erased: + self.plugins.remove(plugin) + + +def _plugin_classes(): + return Plugin.__subclasses__() + + +def _import_modules(package, py_files): + for fn in py_files: + path, module = os.path.split(fn) + package.__path__.append(path) + (module, ext) = os.path.splitext(module) + name = '%s.%s' % (package.__name__, module) + try: + module = importlib.import_module(name) + except Exception as e: + logger.error(_('Failed loading plugin "%s": %s'), module, e) + logger.log(dnf.logging.SUBDEBUG, '', exc_info=True) + + +def _get_plugins_files(paths, disable_plugins, enable_plugins): + plugins = [] + disable_plugins = set(disable_plugins) + enable_plugins = set(enable_plugins) + pattern_enable_found = set() + pattern_disable_found = set() + for p in paths: + for fn in glob.glob('%s/*.py' % p): + (plugin_name, dummy) = os.path.splitext(os.path.basename(fn)) + matched = True + enable_pattern_tested = False + for pattern_skip in disable_plugins: + if fnmatch.fnmatch(plugin_name, pattern_skip): + pattern_disable_found.add(pattern_skip) + matched = False + for pattern_enable in enable_plugins: + if fnmatch.fnmatch(plugin_name, pattern_enable): + matched = True + pattern_enable_found.add(pattern_enable) + enable_pattern_tested = True + if not enable_pattern_tested: + for pattern_enable in enable_plugins: + if fnmatch.fnmatch(plugin_name, pattern_enable): + pattern_enable_found.add(pattern_enable) + if matched: + plugins.append(fn) + enable_not_found = enable_plugins.difference(pattern_enable_found) + if enable_not_found: + logger.warning(_("No matches found for the following enable plugin patterns: {}").format( + ", ".join(sorted(enable_not_found)))) + disable_not_found = disable_plugins.difference(pattern_disable_found) + if disable_not_found: + logger.warning(_("No matches found for the following disable plugin patterns: {}").format( + ", ".join(sorted(disable_not_found)))) + return plugins + + +def register_command(command_class): + # :api + """A class decorator for automatic command registration.""" + def __init__(self, base, cli): + if cli: + cli.register_command(command_class) + plugin_class = type(str(command_class.__name__ + 'Plugin'), + (dnf.Plugin,), + {"__init__": __init__, + "name": command_class.aliases[0]}) + command_class._plugin = plugin_class + return command_class diff --git a/dnf/pycomp.py b/dnf/pycomp.py new file mode 100644 index 0000000..dd304e9 --- /dev/null +++ b/dnf/pycomp.py @@ -0,0 +1,113 @@ +# pycomp.py +# Python 2 and Python 3 compatibility module +# +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from gettext import NullTranslations +from sys import version_info +import base64 +import email.mime.text +import gettext +import itertools +import locale +import sys +import types + +PY3 = version_info.major >= 3 + +if PY3: + from io import StringIO + from configparser import ConfigParser + import queue + import urllib.parse + import shlex + + # functions renamed in py3 + Queue = queue.Queue + basestring = unicode = str + filterfalse = itertools.filterfalse + long = int + NullTranslations.ugettext = NullTranslations.gettext + NullTranslations.ungettext = NullTranslations.ngettext + xrange = range + raw_input = input + base64_decodebytes = base64.decodebytes + urlparse = urllib.parse + urllib_quote = urlparse.quote + shlex_quote = shlex.quote + sys_maxsize = sys.maxsize + + + def gettext_setup(t): + _ = t.gettext + P_ = t.ngettext + return (_, P_) + + # string helpers + def is_py2str_py3bytes(o): + return isinstance(o, bytes) + def is_py3bytes(o): + return isinstance(o, bytes) + + # functions that don't take unicode arguments in py2 + ModuleType = lambda m: types.ModuleType(m) + format = locale.format_string + def setlocale(category, loc=None): + locale.setlocale(category, loc) + def write_to_file(f, content): + f.write(content) + def email_mime(body): + return email.mime.text.MIMEText(body) +else: + # functions renamed in py3 + from __builtin__ import unicode, basestring, long, xrange, raw_input + from StringIO import StringIO + from ConfigParser import ConfigParser + import Queue + import urllib + import urlparse + import pipes + + Queue = Queue.Queue + filterfalse = itertools.ifilterfalse + base64_decodebytes = base64.decodestring + urllib_quote = urllib.quote + shlex_quote = pipes.quote + sys_maxsize = sys.maxint + + def gettext_setup(t): + _ = t.ugettext + P_ = t.ungettext + return (_, P_) + + # string helpers + def is_py2str_py3bytes(o): + return isinstance(o, str) + def is_py3bytes(o): + return False + + # functions that don't take unicode arguments in py2 + ModuleType = lambda m: types.ModuleType(m.encode('utf-8')) + def format(percent, *args, **kwargs): + return locale.format(percent.encode('utf-8'), *args, **kwargs) + def setlocale(category, loc=None): + locale.setlocale(category, loc.encode('utf-8')) + def write_to_file(f, content): + f.write(content.encode('utf-8')) + def email_mime(body): + return email.mime.text.MIMEText(body.encode('utf-8')) diff --git a/dnf/query.py b/dnf/query.py new file mode 100644 index 0000000..ab4139b --- /dev/null +++ b/dnf/query.py @@ -0,0 +1,46 @@ +# query.py +# Implements Query. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +import hawkey + +from hawkey import Query +from dnf.i18n import ucd +from dnf.pycomp import basestring + + + +def _by_provides(sack, patterns, ignore_case=False, get_query=False): + if isinstance(patterns, basestring): + patterns = [patterns] + + q = sack.query() + flags = [] + if ignore_case: + flags.append(hawkey.ICASE) + + q.filterm(*flags, provides__glob=patterns) + if get_query: + return q + return q.run() + +def _per_nevra_dict(pkg_list): + return {ucd(pkg):pkg for pkg in pkg_list} diff --git a/dnf/repo.py b/dnf/repo.py new file mode 100644 index 0000000..dad1304 --- /dev/null +++ b/dnf/repo.py @@ -0,0 +1,658 @@ +# repo.py +# DNF Repository objects. +# +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +from dnf.i18n import ucd, _ + +import dnf.callback +import dnf.conf +import dnf.conf.substitutions +import dnf.const +import dnf.crypto +import dnf.exceptions +import dnf.logging +import dnf.pycomp +import dnf.util +import dnf.yum.misc +import libdnf.repo +import functools +import hashlib +import hawkey +import logging +import operator +import os +import re +import shutil +import string +import sys +import time +import traceback + +_PACKAGES_RELATIVE_DIR = "packages" +_MIRRORLIST_FILENAME = "mirrorlist" +# Chars allowed in a repo ID +_REPOID_CHARS = string.ascii_letters + string.digits + '-_.:' +# Regex pattern that matches a repo cachedir and captures the repo ID +_CACHEDIR_RE = r'(?P[%s]+)\-[%s]{16}' % (re.escape(_REPOID_CHARS), + string.hexdigits) + +# Regex patterns matching any filename that is repo-specific cache data of a +# particular type. The filename is expected to not contain the base cachedir +# path components. +CACHE_FILES = { + 'metadata': r'^%s\/.*(xml(\.gz|\.xz|\.bz2|.zck)?|asc|cachecookie|%s)$' % + (_CACHEDIR_RE, _MIRRORLIST_FILENAME), + 'packages': r'^%s\/%s\/.+rpm$' % (_CACHEDIR_RE, _PACKAGES_RELATIVE_DIR), + 'dbcache': r'^.+(solv|solvx)$', +} + +logger = logging.getLogger("dnf") + + +def repo_id_invalid(repo_id): + # :api + """Return index of an invalid character in the repo ID (if present).""" + first_invalid = libdnf.repo.Repo.verifyId(repo_id) + return None if first_invalid < 0 else first_invalid + + +def _pkg2payload(pkg, progress, *factories): + for fn in factories: + pload = fn(pkg, progress) + if pload is not None: + return pload + raise ValueError(_('no matching payload factory for %s') % pkg) + + +def _download_payloads(payloads, drpm): + # download packages + def _download_sort_key(payload): + return not hasattr(payload, 'delta') + + drpm.err.clear() + targets = [pload._librepo_target() + for pload in sorted(payloads, key=_download_sort_key)] + errs = _DownloadErrors() + try: + libdnf.repo.PackageTarget.downloadPackages(libdnf.repo.VectorPPackageTarget(targets), True) + except RuntimeError as e: + errs._fatal = str(e) + drpm.wait() + + # process downloading errors + errs._recoverable = drpm.err.copy() + for tgt in targets: + err = tgt.getErr() + if err is None or err.startswith('Not finished'): + continue + callbacks = tgt.getCallbacks() + payload = callbacks.package_pload + pkg = payload.pkg + if err == _('Already downloaded'): + errs._skipped.add(pkg) + continue + pkg.repo._repo.expire() + errs._irrecoverable[pkg] = [err] + + return errs + + +def _update_saving(saving, payloads, errs): + real, full = saving + for pload in payloads: + pkg = pload.pkg + if pkg in errs: + real += pload.download_size + continue + real += pload.download_size + full += pload._full_size + return real, full + + +class _DownloadErrors(object): + def __init__(self): + self._val_irrecoverable = {} + self._val_recoverable = {} + self._fatal = None + self._skipped = set() + + @property + def _irrecoverable(self): + if self._val_irrecoverable: + return self._val_irrecoverable + if self._fatal: + return {'': [self._fatal]} + return {} + + @property + def _recoverable(self): + return self._val_recoverable + + @_recoverable.setter + def _recoverable(self, new_dct): + self._val_recoverable = new_dct + + def _bandwidth_used(self, pload): + if pload.pkg in self._skipped: + return 0 + return pload.download_size + + +class _DetailedLibrepoError(Exception): + def __init__(self, librepo_err, source_url): + Exception.__init__(self) + self.librepo_code = librepo_err.args[0] + self.librepo_msg = librepo_err.args[1] + self.source_url = source_url + + +class _NullKeyImport(dnf.callback.KeyImport): + def _confirm(self, id, userid, fingerprint, url, timestamp): + return True + + +class Metadata(object): + def __init__(self, repo): + self._repo = repo + + @property + def fresh(self): + # :api + return self._repo.fresh() + + +class PackageTargetCallbacks(libdnf.repo.PackageTargetCB): + def __init__(self, package_pload): + super(PackageTargetCallbacks, self).__init__() + self.package_pload = package_pload + + def end(self, status, msg): + self.package_pload._end_cb(None, status, msg) + return 0 + + def progress(self, totalToDownload, downloaded): + self.package_pload._progress_cb(None, totalToDownload, downloaded) + return 0 + + def mirrorFailure(self, msg, url): + self.package_pload._mirrorfail_cb(None, msg, url) + return 0 + + +class PackagePayload(dnf.callback.Payload): + def __init__(self, pkg, progress): + super(PackagePayload, self).__init__(progress) + self.callbacks = PackageTargetCallbacks(self) + self.pkg = pkg + + def _end_cb(self, cbdata, lr_status, msg): + """End callback to librepo operation.""" + status = dnf.callback.STATUS_FAILED + if msg is None: + status = dnf.callback.STATUS_OK + elif msg.startswith('Not finished'): + return + elif lr_status == libdnf.repo.PackageTargetCB.TransferStatus_ALREADYEXISTS: + status = dnf.callback.STATUS_ALREADY_EXISTS + + self.progress.end(self, status, msg) + + def _mirrorfail_cb(self, cbdata, err, url): + self.progress.end(self, dnf.callback.STATUS_MIRROR, err) + + def _progress_cb(self, cbdata, total, done): + try: + self.progress.progress(self, done) + except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() + except_list = traceback.format_exception(exc_type, exc_value, exc_traceback) + logger.critical(''.join(except_list)) + + @property + def _full_size(self): + return self.download_size + + def _librepo_target(self): + pkg = self.pkg + pkgdir = pkg.pkgdir + dnf.util.ensure_dir(pkgdir) + + target_dct = { + 'dest': pkgdir, + 'resume': True, + 'cbdata': self, + 'progresscb': self._progress_cb, + 'endcb': self._end_cb, + 'mirrorfailurecb': self._mirrorfail_cb, + } + target_dct.update(self._target_params()) + + return libdnf.repo.PackageTarget( + pkg.repo._repo, + target_dct['relative_url'], + target_dct['dest'], target_dct['checksum_type'], target_dct['checksum'], + target_dct['expectedsize'], target_dct['base_url'], target_dct['resume'], + 0, 0, self.callbacks) + + +class RPMPayload(PackagePayload): + + def __str__(self): + return os.path.basename(self.pkg.location) + + def _target_params(self): + pkg = self.pkg + ctype, csum = pkg.returnIdSum() + ctype_code = libdnf.repo.PackageTarget.checksumType(ctype) + if ctype_code == libdnf.repo.PackageTarget.ChecksumType_UNKNOWN: + logger.warning(_("unsupported checksum type: %s"), ctype) + + return { + 'relative_url': pkg.location, + 'checksum_type': ctype_code, + 'checksum': csum, + 'expectedsize': pkg.downloadsize, + 'base_url': pkg.baseurl, + } + + @property + def download_size(self): + """Total size of the download.""" + return self.pkg.downloadsize + + +class RemoteRPMPayload(PackagePayload): + + def __init__(self, remote_location, conf, progress): + super(RemoteRPMPayload, self).__init__("unused_object", progress) + self.remote_location = remote_location + self.remote_size = 0 + self.conf = conf + s = (self.conf.releasever or "") + self.conf.substitutions.get('basearch') + digest = hashlib.sha256(s.encode('utf8')).hexdigest()[:16] + repodir = "commandline-" + digest + self.pkgdir = os.path.join(self.conf.cachedir, repodir, "packages") + dnf.util.ensure_dir(self.pkgdir) + self.local_path = os.path.join(self.pkgdir, self.__str__().lstrip("/")) + + def __str__(self): + return os.path.basename(self.remote_location) + + def _progress_cb(self, cbdata, total, done): + self.remote_size = total + try: + self.progress.progress(self, done) + except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() + except_list = traceback.format_exception(exc_type, exc_value, exc_traceback) + logger.critical(''.join(except_list)) + + def _librepo_target(self): + return libdnf.repo.PackageTarget( + self.conf._config, os.path.basename(self.remote_location), + self.pkgdir, 0, None, 0, os.path.dirname(self.remote_location), + True, 0, 0, self.callbacks) + + @property + def download_size(self): + """Total size of the download.""" + return self.remote_size + + +class MDPayload(dnf.callback.Payload): + + def __init__(self, progress): + super(MDPayload, self).__init__(progress) + self._text = "" + self._download_size = 0 + self.fastest_mirror_running = False + self.mirror_failures = set() + + def __str__(self): + if dnf.pycomp.PY3: + return self._text + else: + return self._text.encode('utf-8') + + def __unicode__(self): + return self._text + + def _progress_cb(self, cbdata, total, done): + self._download_size = total + self.progress.progress(self, done) + + def _fastestmirror_cb(self, cbdata, stage, data): + if stage == libdnf.repo.RepoCB.FastestMirrorStage_DETECTION: + # pinging mirrors, this might take a while + msg = _('determining the fastest mirror (%s hosts).. ') % data + self.fastest_mirror_running = True + elif stage == libdnf.repo.RepoCB.FastestMirrorStage_STATUS and self.fastest_mirror_running: + # done.. report but ignore any errors + msg = 'error: %s\n' % data if data else 'done.\n' + else: + return + self.progress.message(msg) + + def _mirror_failure_cb(self, cbdata, msg, url, metadata): + self.mirror_failures.add(msg) + msg = 'error: %s (%s).' % (msg, url) + logger.debug(msg) + + @property + def download_size(self): + return self._download_size + + @property + def progress(self): + return self._progress + + @progress.setter + def progress(self, progress): + if progress is None: + progress = dnf.callback.NullDownloadProgress() + self._progress = progress + + def start(self, text): + self._text = text + self.progress.start(1, 0) + + def end(self): + self._download_size = 0 + self.progress.end(self, None, None) + + +# use the local cache even if it's expired. download if there's no cache. +SYNC_LAZY = libdnf.repo.Repo.SyncStrategy_LAZY +# use the local cache, even if it's expired, never download. +SYNC_ONLY_CACHE = libdnf.repo.Repo.SyncStrategy_ONLY_CACHE +# try the cache, if it is expired download new md. +SYNC_TRY_CACHE = libdnf.repo.Repo.SyncStrategy_TRY_CACHE + + +class RepoCallbacks(libdnf.repo.RepoCB): + def __init__(self, repo): + super(RepoCallbacks, self).__init__() + self._repo = repo + self._md_pload = repo._md_pload + + def start(self, what): + self._md_pload.start(what) + + def end(self): + self._md_pload.end() + + def progress(self, totalToDownload, downloaded): + self._md_pload._progress_cb(None, totalToDownload, downloaded) + return 0 + + def fastestMirror(self, stage, ptr): + self._md_pload._fastestmirror_cb(None, stage, ptr) + + def handleMirrorFailure(self, msg, url, metadata): + self._md_pload._mirror_failure_cb(None, msg, url, metadata) + return 0 + + def repokeyImport(self, id, userid, fingerprint, url, timestamp): + return self._repo._key_import._confirm(id, userid, fingerprint, url, timestamp) + + +class Repo(dnf.conf.RepoConf): + # :api + DEFAULT_SYNC = SYNC_TRY_CACHE + + def __init__(self, name=None, parent_conf=None): + # :api + super(Repo, self).__init__(section=name, parent=parent_conf) + + self._config.this.disown() # _repo will be the owner of _config + self._repo = libdnf.repo.Repo(name if name else "", self._config) + + self._md_pload = MDPayload(dnf.callback.NullDownloadProgress()) + self._callbacks = RepoCallbacks(self) + self._callbacks.this.disown() # _repo will be the owner of callbacks + self._repo.setCallbacks(self._callbacks) + + self._pkgdir = None + self._key_import = _NullKeyImport() + self.metadata = None # :api + self._repo.setSyncStrategy(self.DEFAULT_SYNC) + if parent_conf: + self._repo.setSubstitutions(parent_conf.substitutions) + self._substitutions = dnf.conf.substitutions.Substitutions() + self._check_config_file_age = parent_conf.check_config_file_age \ + if parent_conf is not None else True + + @property + def id(self): + # :api + return self._repo.getId() + + @property + def repofile(self): + # :api + return self._repo.getRepoFilePath() + + @repofile.setter + def repofile(self, value): + self._repo.setRepoFilePath(value) + + @property + def pkgdir(self): + # :api + if self._repo.isLocal(): + return dnf.util.strip_prefix(self.baseurl[0], 'file://') + return self.cache_pkgdir() + + def cache_pkgdir(self): + if self._pkgdir is not None: + return self._pkgdir + return os.path.join(self._repo.getCachedir(), _PACKAGES_RELATIVE_DIR) + + @pkgdir.setter + def pkgdir(self, val): + # :api + self._pkgdir = val + + @property + def _pubring_dir(self): + return os.path.join(self._repo.getCachedir(), 'pubring') + + @property + def load_metadata_other(self): + return self._repo.getLoadMetadataOther() + + @load_metadata_other.setter + def load_metadata_other(self, val): + self._repo.setLoadMetadataOther(val) + + def __lt__(self, other): + return self.id < other.id + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, self.id) + + def __setattr__(self, name, value): + super(Repo, self).__setattr__(name, value) + + def disable(self): + # :api + self._repo.disable() + + def enable(self): + # :api + self._repo.enable() + + def add_metadata_type_to_download(self, metadata_type): + # :api + """Ask for additional repository metadata type to download. + + Given metadata_type is appended to the default metadata set when + repository is downloaded. + + Parameters + ---------- + metadata_type: string + + Example: add_metadata_type_to_download("productid") + """ + self._repo.addMetadataTypeToDownload(metadata_type) + + def remove_metadata_type_from_download(self, metadata_type): + # :api + """Stop asking for this additional repository metadata type + in download. + + Given metadata_type is no longer downloaded by default + when this repository is downloaded. + + Parameters + ---------- + metadata_type: string + + Example: remove_metadata_type_from_download("productid") + """ + self._repo.removeMetadataTypeFromDownload(metadata_type) + + def get_metadata_path(self, metadata_type): + # :api + """Return path to the file with downloaded repository metadata of given type. + + Parameters + ---------- + metadata_type: string + """ + return self._repo.getMetadataPath(metadata_type) + + def get_metadata_content(self, metadata_type): + # :api + """Return content of the file with downloaded repository metadata of given type. + + Content of compressed metadata file is returned uncompressed. + + Parameters + ---------- + metadata_type: string + """ + return self._repo.getMetadataContent(metadata_type) + + def load(self): + # :api + """Load the metadata for this repo. + + Depending on the configuration and the age and consistence of data + available on the disk cache, either loads the metadata from the cache or + downloads them from the mirror, baseurl or metalink. + + This method will by default not try to refresh already loaded data if + called repeatedly. + + Returns True if this call to load() caused a fresh metadata download. + + """ + ret = False + try: + ret = self._repo.load() + except RuntimeError as e: + if self._md_pload.mirror_failures: + msg = "Errors during downloading metadata for repository '%s':" % self.id + for failure in self._md_pload.mirror_failures: + msg += "\n - %s" % failure + logger.warning(msg) + raise dnf.exceptions.RepoError(str(e)) + finally: + self._md_pload.mirror_failures = set() + self.metadata = Metadata(self._repo) + return ret + + def _metadata_expire_in(self): + """Get the number of seconds after which the cached metadata will expire. + + Returns a tuple, boolean whether there even is cached metadata and the + number of seconds it will expire in. Negative number means the metadata + has expired already, None that it never expires. + + """ + if not self.metadata: + self._repo.loadCache(False) + if self.metadata: + if self.metadata_expire == -1: + return True, None + expiration = self._repo.getExpiresIn() + if self._repo.isExpired(): + expiration = min(0, expiration) + return True, expiration + return False, 0 + + def _set_key_import(self, key_import): + self._key_import = key_import + + def set_progress_bar(self, progress): + # :api + self._md_pload.progress = progress + + def get_http_headers(self): + # :api + """Returns user defined http headers. + + Returns + ------- + headers : tuple of strings + """ + return self._repo.getHttpHeaders() + + def set_http_headers(self, headers): + # :api + """Sets http headers. + + Sets new http headers and rewrites existing ones. + + Parameters + ---------- + headers : tuple or list of strings + Example: set_http_headers(["User-Agent: Agent007", "MyFieldName: MyFieldValue"]) + """ + self._repo.setHttpHeaders(headers) + + def remote_location(self, location, schemes=('http', 'ftp', 'file', 'https')): + """ + :param location: relative location inside the repo + :param schemes: list of allowed protocols. Default is ('http', 'ftp', 'file', 'https') + :return: absolute url (string) or None + """ + def schemes_filter(url_list): + for url in url_list: + if schemes: + s = dnf.pycomp.urlparse.urlparse(url)[0] + if s in schemes: + return os.path.join(url, location.lstrip('/')) + else: + return os.path.join(url, location.lstrip('/')) + return None + + if not location: + return None + + mirrors = self._repo.getMirrors() + if mirrors: + return schemes_filter(mirrors) + elif self.baseurl: + return schemes_filter(self.baseurl) diff --git a/dnf/repodict.py b/dnf/repodict.py new file mode 100644 index 0000000..ffa0f8e --- /dev/null +++ b/dnf/repodict.py @@ -0,0 +1,143 @@ +# repodict.py +# Managing repo configuration in DNF. +# +# Copyright (C) 2013-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import unicode_literals +from dnf.exceptions import ConfigError +from dnf.i18n import _ + +import dnf.util +import libdnf.conf +import fnmatch +import os + +logger = dnf.util.logger + + +class RepoDict(dict): + # :api + def add(self, repo): + # :api + id_ = repo.id + if id_ in self: + msg = 'Repository %s is listed more than once in the configuration' + raise ConfigError(msg % id_) + try: + repo._repo.verify() + except RuntimeError as e: + raise ConfigError("{0}".format(e)) + self[id_] = repo + + def all(self): + # :api + return dnf.util.MultiCallList(self.values()) + + def _any_enabled(self): + return not dnf.util.empty(self.iter_enabled()) + + def _enable_sub_repos(self, sub_name_fn): + for repo in self.iter_enabled(): + for found in self.get_matching(sub_name_fn(repo.id)): + if not found.enabled: + logger.info(_('enabling %s repository'), found.id) + found.enable() + + def add_new_repo(self, repoid, conf, baseurl=(), **kwargs): + # :api + """ + Creates new repo object and add it into RepoDict. Variables in provided values will be + automatically substituted using conf.substitutions (like $releasever, ...) + + @param repoid: Repo ID - string + @param conf: dnf Base().conf object + @param baseurl: List of strings + @param kwargs: keys and values that will be used to setattr on dnf.repo.Repo() object + @return: dnf.repo.Repo() object + """ + def substitute(values): + if isinstance(values, str): + return libdnf.conf.ConfigParser.substitute(values, conf.substitutions) + elif isinstance(values, list) or isinstance(values, tuple): + substituted = [] + for value in values: + if isinstance(value, str): + substituted.append( + libdnf.conf.ConfigParser.substitute(value, conf.substitutions)) + if substituted: + return substituted + return values + + repo = dnf.repo.Repo(repoid, conf) + for path in baseurl: + if '://' not in path: + path = 'file://{}'.format(os.path.abspath(path)) + repo.baseurl += [substitute(path)] + for (key, value) in kwargs.items(): + setattr(repo, key, substitute(value)) + self.add(repo) + logger.info(_("Added %s repo from %s"), repoid, ', '.join(baseurl)) + return repo + + def enable_debug_repos(self): + # :api + """enable debug repos corresponding to already enabled binary repos""" + + def debug_name(name): + return ("{}-debug-rpms".format(name[:-5]) if name.endswith("-rpms") + else "{}-debuginfo".format(name)) + + self._enable_sub_repos(debug_name) + + def enable_source_repos(self): + # :api + """enable source repos corresponding to already enabled binary repos""" + + def source_name(name): + return ("{}-source-rpms".format(name[:-5]) if name.endswith("-rpms") + else "{}-source".format(name)) + + self._enable_sub_repos(source_name) + + def get_matching(self, key): + # :api + if dnf.util.is_glob_pattern(key): + l = [self[k] for k in self if fnmatch.fnmatch(k, key)] + return dnf.util.MultiCallList(l) + repo = self.get(key, None) + if repo is None: + return dnf.util.MultiCallList([]) + return dnf.util.MultiCallList([repo]) + + def iter_enabled(self): + # :api + return (r for r in self.values() if r.enabled) + + def items(self): + """return repos sorted by priority""" + return (item for item in sorted(super(RepoDict, self).items(), + key=lambda x: (x[1].priority, x[1].cost))) + + def __iter__(self): + return self.keys() + + def keys(self): + return (k for k, v in self.items()) + + def values(self): + return (v for k, v in self.items()) diff --git a/dnf/rpm/CMakeLists.txt b/dnf/rpm/CMakeLists.txt new file mode 100644 index 0000000..13a6af0 --- /dev/null +++ b/dnf/rpm/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB rpm *.py) +INSTALL (FILES ${rpm} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/rpm) diff --git a/dnf/rpm/__init__.py b/dnf/rpm/__init__.py new file mode 100644 index 0000000..696e594 --- /dev/null +++ b/dnf/rpm/__init__.py @@ -0,0 +1,130 @@ +# __init__.py +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from . import transaction +from dnf.pycomp import is_py3bytes +import dnf.const +import dnf.exceptions +import rpm # used by ansible (dnf.rpm.rpm.labelCompare in lib/ansible/modules/packaging/os/dnf.py) + + +def detect_releasever(installroot): + # :api + """Calculate the release version for the system.""" + + ts = transaction.initReadOnlyTransaction(root=installroot) + ts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)) + for distroverpkg in dnf.const.DISTROVERPKG: + if dnf.pycomp.PY3: + distroverpkg = bytes(distroverpkg, 'utf-8') + try: + idx = ts.dbMatch('provides', distroverpkg) + except (TypeError, rpm.error) as e: + raise dnf.exceptions.Error('Error: %s' % str(e)) + if not len(idx): + continue + try: + hdr = next(idx) + except StopIteration: + msg = 'Error: rpmdb failed to list provides. Try: rpm --rebuilddb' + raise dnf.exceptions.Error(msg) + releasever = hdr['version'] + try: + try: + # header returns bytes -> look for bytes + # it may fail because rpm returns a decoded string since 10 Apr 2019 + off = hdr[rpm.RPMTAG_PROVIDENAME].index(distroverpkg) + except ValueError: + # header returns a string -> look for a string + off = hdr[rpm.RPMTAG_PROVIDENAME].index(distroverpkg.decode("utf8")) + flag = hdr[rpm.RPMTAG_PROVIDEFLAGS][off] + ver = hdr[rpm.RPMTAG_PROVIDEVERSION][off] + if flag == rpm.RPMSENSE_EQUAL and ver: + if hdr['name'] not in (distroverpkg, distroverpkg.decode("utf8")): + # override the package version + releasever = ver + except (ValueError, KeyError, IndexError): + pass + + if is_py3bytes(releasever): + releasever = str(releasever, "utf-8") + return releasever + return None + + +def _header(path): + """Return RPM header of the file.""" + ts = transaction.initReadOnlyTransaction() + with open(path) as package: + fdno = package.fileno() + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error as e: + raise dnf.exceptions.Error("{0}: '{1}'".format(e, path)) + return hdr + + +def _invert(dct): + return {v: k for k in dct for v in dct[k]} + +_BASEARCH_MAP = _invert({ + 'aarch64': ('aarch64',), + 'alpha': ('alpha', 'alphaev4', 'alphaev45', 'alphaev5', 'alphaev56', + 'alphaev6', 'alphaev67', 'alphaev68', 'alphaev7', 'alphapca56'), + 'arm': ('armv5tejl', 'armv5tel', 'armv5tl', 'armv6l', 'armv7l', 'armv8l'), + 'armhfp': ('armv6hl', 'armv7hl', 'armv7hnl', 'armv8hl'), + 'i386': ('i386', 'athlon', 'geode', 'i386', 'i486', 'i586', 'i686'), + 'ia64': ('ia64',), + 'mips': ('mips',), + 'mipsel': ('mipsel',), + 'mips64': ('mips64',), + 'mips64el': ('mips64el',), + 'noarch': ('noarch',), + 'ppc': ('ppc',), + 'ppc64': ('ppc64', 'ppc64iseries', 'ppc64p7', 'ppc64pseries'), + 'ppc64le': ('ppc64le',), + 'riscv32' : ('riscv32',), + 'riscv64' : ('riscv64',), + 'riscv128' : ('riscv128',), + 's390': ('s390',), + 's390x': ('s390x',), + 'sh3': ('sh3',), + 'sh4': ('sh4', 'sh4a'), + 'sparc': ('sparc', 'sparc64', 'sparc64v', 'sparcv8', 'sparcv9', + 'sparcv9v'), + 'x86_64': ('x86_64', 'amd64', 'ia32e'), +}) + + +def basearch(arch): + # :api + return _BASEARCH_MAP[arch] + + +def getheader(rpm_hdr, key): + ''' + Returns value of rpm_hdr[key] as a string. Rpm has switched from bytes to str + and we need to handle both properly. + ''' + value = rpm_hdr[key] + if is_py3bytes(value): + value = str(value, "utf-8") + return value diff --git a/dnf/rpm/connection.py b/dnf/rpm/connection.py new file mode 100644 index 0000000..dd8b787 --- /dev/null +++ b/dnf/rpm/connection.py @@ -0,0 +1,34 @@ +# connection.py +# Maintain RPMDB connections. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +from .transaction import initReadOnlyTransaction +import dnf.util + +class RpmConnection(object): + def __init__(self, root): + self.root = root + + @property + @dnf.util.lazyattr("_readonly_ts") + def readonly_ts(self): + return initReadOnlyTransaction(self.root) diff --git a/dnf/rpm/error.py b/dnf/rpm/error.py new file mode 100644 index 0000000..f4d2122 --- /dev/null +++ b/dnf/rpm/error.py @@ -0,0 +1,22 @@ +# error.py +# RpmUtilsError +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +class RpmUtilsError(Exception): + pass diff --git a/dnf/rpm/miscutils.py b/dnf/rpm/miscutils.py new file mode 100644 index 0000000..9038f98 --- /dev/null +++ b/dnf/rpm/miscutils.py @@ -0,0 +1,65 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# Copyright 2003 Duke University + +from __future__ import print_function, absolute_import +from __future__ import unicode_literals + +import rpm +import os + +from dnf.i18n import ucd + + +def checkSig(ts, package): + """Takes a transaction set and a package, check it's sigs, + return 0 if they are all fine + return 1 if the gpg key can't be found + return 2 if the header is in someway damaged + return 3 if the key is not trusted + return 4 if the pkg is not gpg or pgp signed""" + + value = 0 + currentflags = ts.setVSFlags(0) + fdno = os.open(package, os.O_RDONLY) + try: + hdr = ts.hdrFromFdno(fdno) + except rpm.error as e: + if str(e) == "public key not available": + value = 1 + if str(e) == "public key not trusted": + value = 3 + if str(e) == "error reading package header": + value = 2 + else: + # checks signature from an hdr + string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:' \ + '{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|' + try: + siginfo = hdr.sprintf(string) + siginfo = ucd(siginfo) + if siginfo == '(none)': + value = 4 + except UnicodeDecodeError: + pass + + del hdr + + try: + os.close(fdno) + except OSError as e: # if we're not opened, don't scream about it + pass + + ts.setVSFlags(currentflags) # put things back like they were before + return value diff --git a/dnf/rpm/transaction.py b/dnf/rpm/transaction.py new file mode 100644 index 0000000..bcc2a70 --- /dev/null +++ b/dnf/rpm/transaction.py @@ -0,0 +1,126 @@ +# +# Client code for Update Agent +# Copyright (c) 1999-2002 Red Hat, Inc. Distributed under GPL. +# +# Adrian Likins +# Some Edits by Seth Vidal +# +# a couple of classes wrapping up transactions so that we +# can share transactions instead of creating new ones all over +# + +from __future__ import absolute_import +from __future__ import unicode_literals +from dnf.i18n import _ +import rpm + +read_ts = None +ts = None + +# wrapper/proxy class for rpm.Transaction so we can +# instrument it, etc easily +class TransactionWrapper(object): + def __init__(self, root='/'): + self.ts = rpm.TransactionSet(root) + self._methods = ['check', + 'order', + 'addErase', + 'addInstall', + 'addReinstall', + 'run', + 'pgpImportPubkey', + 'pgpPrtPkts', + 'problems', + 'setFlags', + 'setVSFlags', + 'setProbFilter', + 'hdrFromFdno', + 'next', + 'clean'] + self.tsflags = [] + self.open = True + + def __del__(self): + # Automatically close the rpm transaction when the reference is lost + self.close() + + def close(self): + if self.open: + self.ts.closeDB() + self.ts = None + self.open = False + + def dbMatch(self, *args, **kwds): + if 'patterns' in kwds: + patterns = kwds.pop('patterns') + else: + patterns = [] + + mi = self.ts.dbMatch(*args, **kwds) + for (tag, tp, pat) in patterns: + mi.pattern(tag, tp, pat) + return mi + + def __getattr__(self, attr): + if attr in self._methods: + return self.getMethod(attr) + else: + raise AttributeError(attr) + + def __iter__(self): + return self.ts + + def getMethod(self, method): + # in theory, we can override this with + # profile/etc info + return getattr(self.ts, method) + + # push/pop methods so we dont lose the previous + # set value, and we can potentiall debug a bit + # easier + def pushVSFlags(self, flags): + self.tsflags.append(flags) + self.ts.setVSFlags(self.tsflags[-1]) + + def addTsFlag(self, flag): + curflags = self.ts.setFlags(0) + self.ts.setFlags(curflags | flag) + + def getTsFlags(self): + curflags = self.ts.setFlags(0) + self.ts.setFlags(curflags) + return curflags + + def isTsFlagSet(self, flag): + val = self.getTsFlags() + return bool(flag & val) + + def setScriptFd(self, fd): + self.ts.scriptFd = fd.fileno() + + def test(self, cb, conf={}): + """tests the ts we've setup, takes a callback function and a conf dict + for flags and what not""" + + origflags = self.getTsFlags() + self.addTsFlag(rpm.RPMTRANS_FLAG_TEST) + # FIXME GARBAGE - remove once this is reimplemented elsewhere + # KEEPING FOR API COMPLIANCE ONLY + if conf.get('diskspacecheck') == 0: + self.ts.setProbFilter(rpm.RPMPROB_FILTER_DISKSPACE) + tserrors = self.ts.run(cb.callback, '') + self.ts.setFlags(origflags) + + reserrors = [] + if tserrors is not None: + for (descr, (etype, mount, need)) in tserrors: + reserrors.append(descr) + if not reserrors: + reserrors.append(_('Errors occurred during test transaction.')) + + return reserrors + +def initReadOnlyTransaction(root='/'): + read_ts = TransactionWrapper(root=root) + read_ts.pushVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)) + return read_ts diff --git a/dnf/sack.py b/dnf/sack.py new file mode 100644 index 0000000..fb8c707 --- /dev/null +++ b/dnf/sack.py @@ -0,0 +1,76 @@ +# sack.py +# The dnf.Sack class, derived from hawkey.Sack +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals +import dnf.util +import dnf.package +import dnf.query +import hawkey +import os +from dnf.pycomp import basestring + + +class Sack(hawkey.Sack): + # :api + + def __init__(self, *args, **kwargs): + super(Sack, self).__init__(*args, **kwargs) + + def _configure(self, installonly=None, installonly_limit=0): + if installonly: + self.installonly = installonly + self.installonly_limit = installonly_limit + + def query(self, flags=0): + # :api + """Factory function returning a DNF Query.""" + return dnf.query.Query(self, flags) + + +def _build_sack(base): + cachedir = base.conf.cachedir + # create the dir ourselves so we have the permissions under control: + dnf.util.ensure_dir(cachedir) + return Sack(pkgcls=dnf.package.Package, pkginitval=base, + arch=base.conf.substitutions["arch"], + cachedir=cachedir, rootdir=base.conf.installroot, + logfile=os.path.join(base.conf.logdir, dnf.const.LOG_HAWKEY), + logdebug=base.conf.debuglevel > 2) + + +def _rpmdb_sack(base): + # used by subscription-manager (src/dnf-plugins/product-id.py) + sack = _build_sack(base) + try: + # It can fail if rpmDB is not present + sack.load_system_repo(build_cache=False) + except IOError: + pass + return sack + + +def rpmdb_sack(base): + # :api + """ + Returns a new instance of sack containing only installed packages (@System repo) + Useful to get list of the installed RPMs after transaction. + """ + return _rpmdb_sack(base) diff --git a/dnf/selector.py b/dnf/selector.py new file mode 100644 index 0000000..4f7cb68 --- /dev/null +++ b/dnf/selector.py @@ -0,0 +1,24 @@ +# selector.py +# DNF specific hawkey.Selector handling. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +from hawkey import Selector diff --git a/dnf/subject.py b/dnf/subject.py new file mode 100644 index 0000000..c0d4443 --- /dev/null +++ b/dnf/subject.py @@ -0,0 +1,25 @@ +# subject.py +# Implements Subject. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals +from hawkey import Subject # :api + diff --git a/dnf/transaction.py b/dnf/transaction.py new file mode 100644 index 0000000..9c90633 --- /dev/null +++ b/dnf/transaction.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- + +# transaction.py +# Managing the transaction to be passed to RPM. +# +# Copyright (C) 2013-2018 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import absolute_import +from __future__ import unicode_literals + +import libdnf.transaction + +from dnf.i18n import _, C_ + +# :api - all action constants are considered an API + +# per-package actions - from libdnf +PKG_DOWNGRADE = libdnf.transaction.TransactionItemAction_DOWNGRADE +PKG_DOWNGRADED = libdnf.transaction.TransactionItemAction_DOWNGRADED +PKG_INSTALL = libdnf.transaction.TransactionItemAction_INSTALL +PKG_OBSOLETE = libdnf.transaction.TransactionItemAction_OBSOLETE +PKG_OBSOLETED = libdnf.transaction.TransactionItemAction_OBSOLETED +PKG_REINSTALL = libdnf.transaction.TransactionItemAction_REINSTALL +PKG_REINSTALLED = libdnf.transaction.TransactionItemAction_REINSTALLED +PKG_REMOVE = libdnf.transaction.TransactionItemAction_REMOVE +PKG_UPGRADE = libdnf.transaction.TransactionItemAction_UPGRADE +PKG_UPGRADED = libdnf.transaction.TransactionItemAction_UPGRADED + +# compatibility +PKG_ERASE = PKG_REMOVE + +# per-package actions - additional +PKG_CLEANUP = 101 +PKG_VERIFY = 102 +PKG_SCRIPTLET = 103 + +# transaction-wide actions +TRANS_PREPARATION = 201 +TRANS_POST = 202 + + +# packages that appeared on the system +FORWARD_ACTIONS = [ + libdnf.transaction.TransactionItemAction_INSTALL, + libdnf.transaction.TransactionItemAction_DOWNGRADE, + libdnf.transaction.TransactionItemAction_OBSOLETE, + libdnf.transaction.TransactionItemAction_UPGRADE, + libdnf.transaction.TransactionItemAction_REINSTALL, +] + + +# packages that got removed from the system +BACKWARD_ACTIONS = [ + libdnf.transaction.TransactionItemAction_DOWNGRADED, + libdnf.transaction.TransactionItemAction_OBSOLETED, + libdnf.transaction.TransactionItemAction_UPGRADED, + libdnf.transaction.TransactionItemAction_REMOVE, +# TODO: REINSTALLED may and may not belong here; the same NEVRA is in FORWARD_ACTIONS already +# libdnf.transaction.TransactionItemAction_REINSTALLED, +] + + +ACTIONS = { + # TRANSLATORS: This is for a single package currently being downgraded. + PKG_DOWNGRADE: C_('currently', 'Downgrading'), + PKG_DOWNGRADED: _('Cleanup'), + # TRANSLATORS: This is for a single package currently being installed. + PKG_INSTALL: C_('currently', 'Installing'), + PKG_OBSOLETE: _('Obsoleting'), + PKG_OBSOLETED: _('Obsoleting'), + # TRANSLATORS: This is for a single package currently being reinstalled. + PKG_REINSTALL: C_('currently', 'Reinstalling'), + PKG_REINSTALLED: _('Cleanup'), + # TODO: 'Removing'? + PKG_REMOVE: _('Erasing'), + # TRANSLATORS: This is for a single package currently being upgraded. + PKG_UPGRADE: C_('currently', 'Upgrading'), + PKG_UPGRADED: _('Cleanup'), + + PKG_CLEANUP: _('Cleanup'), + PKG_VERIFY: _('Verifying'), + PKG_SCRIPTLET: _('Running scriptlet'), + + TRANS_PREPARATION: _('Preparing'), + # TODO: TRANS_POST +} + + +# untranslated strings, logging to /var/log/dnf/dnf.rpm.log +FILE_ACTIONS = { + PKG_DOWNGRADE: 'Downgrade', + PKG_DOWNGRADED: 'Downgraded', + PKG_INSTALL: 'Installed', + PKG_OBSOLETE: 'Obsolete', + PKG_OBSOLETED: 'Obsoleted', + PKG_REINSTALL: 'Reinstall', + PKG_REINSTALLED: 'Reinstalled', + # TODO: 'Removed'? + PKG_REMOVE: 'Erase', + PKG_UPGRADE: 'Upgrade', + PKG_UPGRADED: 'Upgraded', + + PKG_CLEANUP: 'Cleanup', + PKG_VERIFY: 'Verified', + PKG_SCRIPTLET: 'Running scriptlet', + + TRANS_PREPARATION: 'Preparing', + # TODO: TRANS_POST +} diff --git a/dnf/util.py b/dnf/util.py new file mode 100644 index 0000000..8cf3627 --- /dev/null +++ b/dnf/util.py @@ -0,0 +1,506 @@ +# util.py +# Basic dnf utils. +# +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +from __future__ import print_function +from __future__ import absolute_import +from __future__ import unicode_literals + +from .pycomp import PY3, basestring +from dnf.i18n import _, ucd +from functools import reduce +import argparse +import dnf +import dnf.callback +import dnf.const +import dnf.pycomp +import errno +import itertools +import locale +import logging +import os +import pwd +import shutil +import sys +import tempfile +import time +import libdnf.repo + +logger = logging.getLogger('dnf') + +MAIN_PROG = argparse.ArgumentParser().prog if argparse.ArgumentParser().prog == "yum" else "dnf" +MAIN_PROG_UPPER = MAIN_PROG.upper() + +"""DNF Utilities.""" + + +def _parse_specs(namespace, values): + """ + Categorize :param values list into packages, groups and filenames + + :param namespace: argparse.Namespace, where specs will be stored + :param values: list of specs, whether packages ('foo') or groups/modules ('@bar') + or filenames ('*.rmp', 'http://*', ...) + + To access packages use: specs.pkg_specs, + to access groups use: specs.grp_specs, + to access filenames use: specs.filenames + """ + + setattr(namespace, "filenames", []) + setattr(namespace, "grp_specs", []) + setattr(namespace, "pkg_specs", []) + tmp_set = set() + for value in values: + if value in tmp_set: + continue + tmp_set.add(value) + schemes = dnf.pycomp.urlparse.urlparse(value)[0] + if value.endswith('.rpm'): + namespace.filenames.append(value) + elif schemes and schemes in ('http', 'ftp', 'file', 'https'): + namespace.filenames.append(value) + elif value.startswith('@'): + namespace.grp_specs.append(value[1:]) + else: + namespace.pkg_specs.append(value) + + +def _urlopen_progress(url, conf, progress=None): + if progress is None: + progress = dnf.callback.NullDownloadProgress() + pload = dnf.repo.RemoteRPMPayload(url, conf, progress) + if os.path.exists(pload.local_path): + return pload.local_path + est_remote_size = sum([pload.download_size]) + progress.start(1, est_remote_size) + targets = [pload._librepo_target()] + try: + libdnf.repo.PackageTarget.downloadPackages(libdnf.repo.VectorPPackageTarget(targets), True) + except RuntimeError as e: + if conf.strict: + raise IOError(str(e)) + logger.error(str(e)) + return pload.local_path + +def _urlopen(url, conf=None, repo=None, mode='w+b', **kwargs): + """ + Open the specified absolute url, return a file object + which respects proxy setting even for non-repo downloads + """ + if PY3 and 'b' not in mode: + kwargs.setdefault('encoding', 'utf-8') + fo = tempfile.NamedTemporaryFile(mode, **kwargs) + + try: + if repo: + repo._repo.downloadUrl(url, fo.fileno()) + else: + libdnf.repo.Downloader.downloadURL(conf._config if conf else None, url, fo.fileno()) + except RuntimeError as e: + raise IOError(str(e)) + + fo.seek(0) + return fo + +def rtrim(s, r): + if s.endswith(r): + s = s[:-len(r)] + return s + + +def am_i_root(): + # used by ansible (lib/ansible/modules/packaging/os/dnf.py) + return os.geteuid() == 0 + +def clear_dir(path): + """Remove all files and dirs under `path` + + Also see rm_rf() + + """ + for entry in os.listdir(path): + contained_path = os.path.join(path, entry) + rm_rf(contained_path) + +def ensure_dir(dname): + # used by ansible (lib/ansible/modules/packaging/os/dnf.py) + try: + os.makedirs(dname, mode=0o755) + except OSError as e: + if e.errno != errno.EEXIST or not os.path.isdir(dname): + raise e + +def empty(iterable): + try: + l = len(iterable) + except TypeError: + l = len(list(iterable)) + return l == 0 + +def first(iterable): + """Returns the first item from an iterable or None if it has no elements.""" + it = iter(iterable) + try: + return next(it) + except StopIteration: + return None + + +def first_not_none(iterable): + it = iter(iterable) + try: + return next(item for item in it if item is not None) + except StopIteration: + return None + + +def file_age(fn): + return time.time() - file_timestamp(fn) + +def file_timestamp(fn): + return os.stat(fn).st_mtime + +def get_effective_login(): + try: + return pwd.getpwuid(os.geteuid())[0] + except KeyError: + return "UID: %s" % os.geteuid() + +def get_in(dct, keys, not_found): + """Like dict.get() for nested dicts.""" + for k in keys: + dct = dct.get(k) + if dct is None: + return not_found + return dct + +def group_by_filter(fn, iterable): + def splitter(acc, item): + acc[not bool(fn(item))].append(item) + return acc + return reduce(splitter, iterable, ([], [])) + +def insert_if(item, iterable, condition): + """Insert an item into an iterable by a condition.""" + for original_item in iterable: + if condition(original_item): + yield item + yield original_item + +def is_exhausted(iterator): + """Test whether an iterator is exhausted.""" + try: + next(iterator) + except StopIteration: + return True + else: + return False + +def is_glob_pattern(pattern): + if is_string_type(pattern): + pattern = [pattern] + return (isinstance(pattern, list) and any(set(p) & set("*[?") for p in pattern)) + +def is_string_type(obj): + if PY3: + return isinstance(obj, str) + else: + return isinstance(obj, basestring) + +def lazyattr(attrname): + """Decorator to get lazy attribute initialization. + + Composes with @property. Force reinitialization by deleting the . + """ + def get_decorated(fn): + def cached_getter(obj): + try: + return getattr(obj, attrname) + except AttributeError: + val = fn(obj) + setattr(obj, attrname, val) + return val + return cached_getter + return get_decorated + + +def mapall(fn, *seq): + """Like functools.map(), but return a list instead of an iterator. + + This means all side effects of fn take place even without iterating the + result. + + """ + return list(map(fn, *seq)) + +def normalize_time(timestamp): + """Convert time into locale aware datetime string object.""" + t = time.strftime("%c", time.localtime(timestamp)) + if not dnf.pycomp.PY3: + current_locale_setting = locale.getlocale()[1] + if current_locale_setting: + t = t.decode(current_locale_setting) + return t + +def on_ac_power(): + """Decide whether we are on line power. + + Returns True if we are on line power, False if not, None if it can not be + decided. + + """ + try: + with open("/sys/class/power_supply/AC/online") as ac_status: + data = ac_status.read() + return int(data) == 1 + except (IOError, ValueError): + return None + + +def on_metered_connection(): + """Decide whether we are on metered connection. + + Returns: + True: if on metered connection + False: if not + None: if it can not be decided + """ + try: + import dbus + except ImportError: + return None + try: + bus = dbus.SystemBus() + proxy = bus.get_object("org.freedesktop.NetworkManager", + "/org/freedesktop/NetworkManager") + iface = dbus.Interface(proxy, "org.freedesktop.DBus.Properties") + metered = iface.Get("org.freedesktop.NetworkManager", "Metered") + except dbus.DBusException: + return None + if metered == 0: # NM_METERED_UNKNOWN + return None + elif metered in (1, 3): # NM_METERED_YES, NM_METERED_GUESS_YES + return True + elif metered in (2, 4): # NM_METERED_NO, NM_METERED_GUESS_NO + return False + else: # Something undocumented (at least at this moment) + raise ValueError("Unknown value for metered property: %r", metered) + +def partition(pred, iterable): + """Use a predicate to partition entries into false entries and true entries. + + Credit: Python library itertools' documentation. + + """ + t1, t2 = itertools.tee(iterable) + return dnf.pycomp.filterfalse(pred, t1), filter(pred, t2) + +def rm_rf(path): + try: + shutil.rmtree(path) + except OSError: + pass + +def split_by(iterable, condition): + """Split an iterable into tuples by a condition. + + Inserts a separator before each item which meets the condition and then + cuts the iterable by these separators. + + """ + separator = object() # A unique object. + # Create a function returning tuple of objects before the separator. + def next_subsequence(it): + return tuple(itertools.takewhile(lambda e: e != separator, it)) + + # Mark each place where the condition is met by the separator. + marked = insert_if(separator, iterable, condition) + + # The 1st subsequence may be empty if the 1st item meets the condition. + yield next_subsequence(marked) + + while True: + subsequence = next_subsequence(marked) + if not subsequence: + break + yield subsequence + +def strip_prefix(s, prefix): + if s.startswith(prefix): + return s[len(prefix):] + return None + + +def touch(path, no_create=False): + """Create an empty file if it doesn't exist or bump it's timestamps. + + If no_create is True only bumps the timestamps. + """ + if no_create or os.access(path, os.F_OK): + return os.utime(path, None) + with open(path, 'a'): + pass + + +def _terminal_messenger(tp='write', msg="", out=sys.stdout): + try: + if tp == 'write': + out.write(msg) + elif tp == 'flush': + out.flush() + elif tp == 'write_flush': + out.write(msg) + out.flush() + elif tp == 'print': + print(msg, file=out) + else: + raise ValueError('Unsupported type: ' + tp) + except IOError as e: + logger.critical('{}: {}'.format(type(e).__name__, ucd(e))) + pass + + +def _format_resolve_problems(resolve_problems): + """ + Format string about problems in resolve + + :param resolve_problems: list with list of strings (output of goal.problem_rules()) + :return: string + """ + msg = "" + count_problems = (len(resolve_problems) > 1) + for i, rs in enumerate(resolve_problems, start=1): + if count_problems: + msg += "\n " + _("Problem") + " %d: " % i + else: + msg += "\n " + _("Problem") + ": " + msg += "\n - ".join(rs) + return msg + + +def _te_nevra(te): + nevra = te.N() + '-' + if te.E() is not None and te.E() != '0': + nevra += te.E() + ':' + return nevra + te.V() + '-' + te.R() + '.' + te.A() + + +def _log_rpm_trans_with_swdb(rpm_transaction, swdb_transaction): + logger.debug("Logging transaction elements") + for rpm_el in rpm_transaction: + tsi = rpm_el.Key() + tsi_state = None + if tsi is not None: + tsi_state = tsi.state + msg = "RPM element: '{}', Key(): '{}', Key state: '{}', Failed() '{}': ".format( + _te_nevra(rpm_el), tsi, tsi_state, rpm_el.Failed()) + logger.debug(msg) + for tsi in swdb_transaction: + msg = "SWDB element: '{}', State: '{}', Action: '{}', From repo: '{}', Reason: '{}', " \ + "Get reason: '{}'".format(str(tsi), tsi.state, tsi.action, tsi.from_repo, tsi.reason, + tsi.get_reason()) + logger.debug(msg) + + +def _sync_rpm_trans_with_swdb(rpm_transaction, swdb_transaction): + revert_actions = {libdnf.transaction.TransactionItemAction_DOWNGRADED, + libdnf.transaction.TransactionItemAction_OBSOLETED, + libdnf.transaction.TransactionItemAction_REMOVE, + libdnf.transaction.TransactionItemAction_UPGRADED, + libdnf.transaction.TransactionItemAction_REINSTALLED} + cached_tsi = [tsi for tsi in swdb_transaction] + el_not_found = False + error = False + for rpm_el in rpm_transaction: + te_nevra = _te_nevra(rpm_el) + tsi = rpm_el.Key() + if tsi is None or not hasattr(tsi, "pkg"): + for tsi_candidate in cached_tsi: + if tsi_candidate.state != libdnf.transaction.TransactionItemState_UNKNOWN: + continue + if tsi_candidate.action not in revert_actions: + continue + if str(tsi_candidate) == te_nevra: + tsi = tsi_candidate + break + if tsi is None or not hasattr(tsi, "pkg"): + logger.critical(_("TransactionItem not found for key: {}").format(te_nevra)) + el_not_found = True + continue + if rpm_el.Failed(): + tsi.state = libdnf.transaction.TransactionItemState_ERROR + error = True + else: + tsi.state = libdnf.transaction.TransactionItemState_DONE + for tsi in cached_tsi: + if tsi.state == libdnf.transaction.TransactionItemState_UNKNOWN: + logger.critical(_("TransactionSWDBItem not found for key: {}").format(str(tsi))) + el_not_found = True + if error: + logger.debug(_('Errors occurred during transaction.')) + if el_not_found: + _log_rpm_trans_with_swdb(rpm_transaction, cached_tsi) + + +class tmpdir(object): + # used by subscription-manager (src/dnf-plugins/product-id.py) + def __init__(self): + prefix = '%s-' % dnf.const.PREFIX + self.path = tempfile.mkdtemp(prefix=prefix) + + def __enter__(self): + return self.path + + def __exit__(self, exc_type, exc_value, traceback): + rm_rf(self.path) + +class Bunch(dict): + """Dictionary with attribute accessing syntax. + + In DNF, prefer using this over dnf.yum.misc.GenericHolder. + + Credit: Alex Martelli, Doug Hudgeon + + """ + def __init__(self, *args, **kwds): + super(Bunch, self).__init__(*args, **kwds) + self.__dict__ = self + + def __hash__(self): + return id(self) + + +class MultiCallList(list): + def __init__(self, iterable): + super(MultiCallList, self).__init__() + self.extend(iterable) + + def __getattr__(self, what): + def fn(*args, **kwargs): + def call_what(v): + method = getattr(v, what) + return method(*args, **kwargs) + return list(map(call_what, self)) + return fn + + def __setattr__(self, what, val): + def setter(item): + setattr(item, what, val) + return list(map(setter, self)) diff --git a/dnf/yum/CMakeLists.txt b/dnf/yum/CMakeLists.txt new file mode 100644 index 0000000..c0c5edd --- /dev/null +++ b/dnf/yum/CMakeLists.txt @@ -0,0 +1,2 @@ +FILE(GLOB yum_SRCS *.py) +INSTALL (FILES ${yum_SRCS} DESTINATION ${PYTHON_INSTALL_DIR}/dnf/yum) diff --git a/dnf/yum/__init__.py b/dnf/yum/__init__.py new file mode 100644 index 0000000..ef2acfe --- /dev/null +++ b/dnf/yum/__init__.py @@ -0,0 +1,19 @@ +# __init__.py +# The legacy YUM subpackage. +# +# Copyright (C) 2013 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# diff --git a/dnf/yum/misc.py b/dnf/yum/misc.py new file mode 100644 index 0000000..0f92235 --- /dev/null +++ b/dnf/yum/misc.py @@ -0,0 +1,457 @@ +# misc.py +# Copyright (C) 2012-2016 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# + +""" +Assorted utility functions for yum. +""" + +from __future__ import print_function, absolute_import +from __future__ import unicode_literals +from dnf.exceptions import MiscError +from dnf.pycomp import base64_decodebytes, basestring, unicode +from stat import * +import libdnf.utils +import dnf.const +import dnf.crypto +import dnf.exceptions +import dnf.i18n +import errno +import glob +import hashlib +import io +import os +import os.path +import pwd +import re +import shutil +import tempfile + +_available_checksums = set(['md5', 'sha1', 'sha256', 'sha384', 'sha512']) +_default_checksums = ['sha256'] + + +_re_compiled_glob_match = None +def re_glob(s): + """ Tests if a string is a shell wildcard. """ + global _re_compiled_glob_match + if _re_compiled_glob_match is None: + _re_compiled_glob_match = re.compile(r'[*?]|\[.+\]').search + return _re_compiled_glob_match(s) + +_re_compiled_full_match = None +def re_full_search_needed(s): + """ Tests if a string needs a full nevra match, instead of just name. """ + global _re_compiled_full_match + if _re_compiled_full_match is None: + # A glob, or a "." or "-" separator, followed by something (the ".") + one = re.compile(r'.*([-.*?]|\[.+\]).').match + # Any epoch, for envra + two = re.compile('[0-9]+:').match + _re_compiled_full_match = (one, two) + for rec in _re_compiled_full_match: + if rec(s): + return True + return False + + +class Checksums(object): + """ Generate checksum(s), on given pieces of data. Producing the + Length and the result(s) when complete. """ + + def __init__(self, checksums=None, ignore_missing=False, ignore_none=False): + if checksums is None: + checksums = _default_checksums + self._sumalgos = [] + self._sumtypes = [] + self._len = 0 + + done = set() + for sumtype in checksums: + if sumtype == 'sha': + sumtype = 'sha1' + if sumtype in done: + continue + + if sumtype in _available_checksums: + sumalgo = hashlib.new(sumtype) + elif ignore_missing: + continue + else: + raise MiscError('Error Checksumming, bad checksum type %s' % + sumtype) + done.add(sumtype) + self._sumtypes.append(sumtype) + self._sumalgos.append(sumalgo) + if not done and not ignore_none: + raise MiscError('Error Checksumming, no valid checksum type') + + def __len__(self): + return self._len + + # Note that len(x) is assert limited to INT_MAX, which is 2GB on i686. + length = property(fget=lambda self: self._len) + + def update(self, data): + self._len += len(data) + for sumalgo in self._sumalgos: + data = data.encode('utf-8') if isinstance(data, unicode) else data + sumalgo.update(data) + + def read(self, fo, size=2**16): + data = fo.read(size) + self.update(data) + return data + + def hexdigests(self): + ret = {} + for sumtype, sumdata in zip(self._sumtypes, self._sumalgos): + ret[sumtype] = sumdata.hexdigest() + return ret + + def hexdigest(self, checksum=None): + if checksum is None: + if not self._sumtypes: + return None + checksum = self._sumtypes[0] + if checksum == 'sha': + checksum = 'sha1' + return self.hexdigests()[checksum] + + def digests(self): + ret = {} + for sumtype, sumdata in zip(self._sumtypes, self._sumalgos): + ret[sumtype] = sumdata.digest() + return ret + + def digest(self, checksum=None): + if checksum is None: + if not self._sumtypes: + return None + checksum = self._sumtypes[0] + if checksum == 'sha': + checksum = 'sha1' + return self.digests()[checksum] + +def get_default_chksum_type(): + return _default_checksums[0] + +def checksum(sumtype, file, CHUNK=2**16, datasize=None): + """takes filename, hand back Checksum of it + sumtype = md5 or sha/sha1/sha256/sha512 (note sha == sha1) + filename = /path/to/file + CHUNK=65536 by default""" + + # chunking brazenly lifted from Ryan Tomayko + + if isinstance(file, basestring): + try: + with open(file, 'rb', CHUNK) as fo: + return checksum(sumtype, fo, CHUNK, datasize) + except (IOError, OSError): + raise MiscError('Error opening file for checksum: %s' % file) + + try: + # assumes file is a file-like-object + data = Checksums([sumtype]) + while data.read(file, CHUNK): + if datasize is not None and data.length > datasize: + break + + # This screws up the length, but that shouldn't matter. We only care + # if this checksum == what we expect. + if datasize is not None and datasize != data.length: + return '!%u!%s' % (datasize, data.hexdigest(sumtype)) + + return data.hexdigest(sumtype) + except (IOError, OSError) as e: + raise MiscError('Error reading file for checksum: %s' % file) + +class GenericHolder(object): + """Generic Holder class used to hold other objects of known types + It exists purely to be able to do object.somestuff, object.someotherstuff + or object[key] and pass object to another function that will + understand it""" + + def __init__(self, iter=None): + self.__iter = iter + + def __iter__(self): + if self.__iter is not None: + return iter(self[self.__iter]) + + def __getitem__(self, item): + if hasattr(self, item): + return getattr(self, item) + else: + raise KeyError(item) + + def all_lists(self): + """Return a dictionary of all lists.""" + return {key: list_ for key, list_ in vars(self).items() + if type(list_) is list} + + def merge_lists(self, other): + """ Concatenate the list attributes from 'other' to ours. """ + for (key, val) in other.all_lists().items(): + vars(self).setdefault(key, []).extend(val) + return self + +def procgpgkey(rawkey): + '''Convert ASCII-armored GPG key to binary + ''' + + # Normalise newlines + rawkey = re.sub(b'\r\n?', b'\n', rawkey) + + # Extract block + block = io.BytesIO() + inblock = 0 + pastheaders = 0 + for line in rawkey.split(b'\n'): + if line.startswith(b'-----BEGIN PGP PUBLIC KEY BLOCK-----'): + inblock = 1 + elif inblock and line.strip() == b'': + pastheaders = 1 + elif inblock and line.startswith(b'-----END PGP PUBLIC KEY BLOCK-----'): + # Hit the end of the block, get out + break + elif pastheaders and line.startswith(b'='): + # Hit the CRC line, don't include this and stop + break + elif pastheaders: + block.write(line + b'\n') + + # Decode and return + return base64_decodebytes(block.getvalue()) + + +def keyInstalled(ts, keyid, timestamp): + ''' + Return if the GPG key described by the given keyid and timestamp are + installed in the rpmdb. + + The keyid and timestamp should both be passed as integers. + The ts is an rpm transaction set object + + Return values: + - -1 key is not installed + - 0 key with matching ID and timestamp is installed + - 1 key with matching ID is installed but has an older timestamp + - 2 key with matching ID is installed but has a newer timestamp + + No effort is made to handle duplicates. The first matching keyid is used to + calculate the return result. + ''' + # Search + for hdr in ts.dbMatch('name', 'gpg-pubkey'): + if hdr['version'] == keyid: + installedts = int(hdr['release'], 16) + if installedts == timestamp: + return 0 + elif installedts < timestamp: + return 1 + else: + return 2 + + return -1 + + +def import_key_to_pubring(rawkey, keyid, gpgdir=None, make_ro_copy=True): + if not os.path.exists(gpgdir): + os.makedirs(gpgdir) + + with dnf.crypto.pubring_dir(gpgdir), dnf.crypto.Context() as ctx: + # import the key + with open(os.path.join(gpgdir, 'gpg.conf'), 'wb') as fp: + fp.write(b'') + ctx.op_import(rawkey) + + if make_ro_copy: + + rodir = gpgdir + '-ro' + if not os.path.exists(rodir): + os.makedirs(rodir, mode=0o755) + for f in glob.glob(gpgdir + '/*'): + basename = os.path.basename(f) + ro_f = rodir + '/' + basename + shutil.copy(f, ro_f) + os.chmod(ro_f, 0o755) + # yes it is this stupid, why do you ask? + opts = """lock-never + no-auto-check-trustdb + trust-model direct + no-expensive-trust-checks + no-permission-warning + preserve-permissions + """ + with open(os.path.join(rodir, 'gpg.conf'), 'w', 0o755) as fp: + fp.write(opts) + + + return True + + +def getCacheDir(): + """return a path to a valid and safe cachedir - only used when not running + as root or when --tempcache is set""" + + uid = os.geteuid() + try: + usertup = pwd.getpwuid(uid) + username = dnf.i18n.ucd(usertup[0]) + prefix = '%s-%s-' % (dnf.const.PREFIX, username) + except KeyError: + prefix = '%s-%s-' % (dnf.const.PREFIX, uid) + + # check for /var/tmp/prefix-* - + dirpath = '%s/%s*' % (dnf.const.TMPDIR, prefix) + cachedirs = sorted(glob.glob(dirpath)) + for thisdir in cachedirs: + stats = os.lstat(thisdir) + if S_ISDIR(stats[0]) and S_IMODE(stats[0]) == 448 and stats[4] == uid: + return thisdir + + # make the dir (tempfile.mkdtemp()) + cachedir = tempfile.mkdtemp(prefix=prefix, dir=dnf.const.TMPDIR) + return cachedir + +def seq_max_split(seq, max_entries): + """ Given a seq, split into a list of lists of length max_entries each. """ + ret = [] + num = len(seq) + seq = list(seq) # Trying to use a set/etc. here is bad + beg = 0 + while num > max_entries: + end = beg + max_entries + ret.append(seq[beg:end]) + beg += max_entries + num -= max_entries + ret.append(seq[beg:]) + return ret + +def unlink_f(filename): + """ Call os.unlink, but don't die if the file isn't there. This is the main + difference between "rm -f" and plain "rm". """ + try: + os.unlink(filename) + except OSError as e: + if e.errno != errno.ENOENT: + raise + +def stat_f(filename, ignore_EACCES=False): + """ Call os.stat(), but don't die if the file isn't there. Returns None. """ + try: + return os.stat(filename) + except OSError as e: + if e.errno in (errno.ENOENT, errno.ENOTDIR): + return None + if ignore_EACCES and e.errno == errno.EACCES: + return None + raise + +def _getloginuid(): + """ Get the audit-uid/login-uid, if available. os.getuid() is returned + instead if there was a problem. Note that no caching is done here. """ + # We might normally call audit.audit_getloginuid(), except that requires + # importing all of the audit module. And it doesn't work anyway: BZ 518721 + try: + with open("/proc/self/loginuid") as fo: + data = fo.read() + return int(data) + except (IOError, ValueError): + return os.getuid() + +_cached_getloginuid = None +def getloginuid(): + """ Get the audit-uid/login-uid, if available. os.getuid() is returned + instead if there was a problem. The value is cached, so you don't + have to save it. """ + global _cached_getloginuid + if _cached_getloginuid is None: + _cached_getloginuid = _getloginuid() + return _cached_getloginuid + +def decompress(filename, dest=None, fn_only=False, check_timestamps=False): + """take a filename and decompress it into the same relative location. + if the file is not compressed just return the file""" + + ztype = None + out = filename # If the file is not compressed, it returns the same file + + dot_pos = filename.rfind('.') + if dot_pos > 0: + ext = filename[dot_pos:] + if ext in ('.zck', '.xz', '.bz2', '.gz'): + ztype = ext + out = dest if dest else filename[:dot_pos] + + if ztype and not fn_only: + if check_timestamps: + fi = stat_f(filename) + fo = stat_f(out) + if fi and fo and fo.st_mtime == fi.st_mtime: + return out + + try: + libdnf.utils.decompress(filename, out, 0o644, ztype) + except RuntimeError as e: + raise dnf.exceptions.MiscError(str(e)) + + if check_timestamps and fi: + os.utime(out, (fi.st_mtime, fi.st_mtime)) + + return out + +def calculate_repo_gen_dest(filename, generated_name): + dest = os.path.dirname(filename) + dest += '/gen' + if not os.path.exists(dest): + os.makedirs(dest, mode=0o755) + return dest + '/' + generated_name + +def repo_gen_decompress(filename, generated_name, cached=False): + """ This is a wrapper around decompress, where we work out a cached + generated name, and use check_timestamps. filename _must_ be from + a repo. and generated_name is the type of the file. """ + + dest = calculate_repo_gen_dest(filename, generated_name) + return decompress(filename, dest=dest, check_timestamps=True, fn_only=cached) + +def read_in_items_from_dot_dir(thisglob, line_as_list=True): + """ Takes a glob of a dir (like /etc/foo.d/\\*.foo) returns a list of all + the lines in all the files matching that glob, ignores comments and blank + lines, optional paramater 'line_as_list tells whether to treat each line + as a space or comma-separated list, defaults to True. + """ + results = [] + for fname in glob.glob(thisglob): + with open(fname) as f: + for line in f: + if re.match(r'\s*(#|$)', line): + continue + line = line.rstrip() # no more trailing \n's + line = line.lstrip() # be nice + if not line: + continue + if line_as_list: + line = line.replace('\n', ' ') + line = line.replace(',', ' ') + results.extend(line.split()) + continue + results.append(line) + return results diff --git a/dnf/yum/rpmtrans.py b/dnf/yum/rpmtrans.py new file mode 100644 index 0000000..57f71e6 --- /dev/null +++ b/dnf/yum/rpmtrans.py @@ -0,0 +1,420 @@ +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# Copyright 2005 Duke University +# Parts Copyright 2007 Red Hat, Inc + +from __future__ import print_function, absolute_import +from __future__ import unicode_literals + +import libdnf.transaction + +from dnf.i18n import _, ucd +import dnf.callback +import dnf.transaction +import dnf.util +import rpm +import os +import logging +import sys +import tempfile +import traceback +import warnings + + +# TODO: merge w/ libdnf +# transaction set states +TS_UPDATE = 10 +TS_INSTALL = 20 +TS_ERASE = 40 +TS_OBSOLETED = 50 +TS_OBSOLETING = 60 +TS_AVAILABLE = 70 +TS_UPDATED = 90 +TS_FAILED = 100 + +TS_INSTALL_STATES = [TS_INSTALL, TS_UPDATE, TS_OBSOLETING] +TS_REMOVE_STATES = [TS_ERASE, TS_OBSOLETED, TS_UPDATED] + +logger = logging.getLogger('dnf') + + +def _add_deprecated_action(name): + """ + Wrapper to return a deprecated action constant + while printing a deprecation warning. + """ + @property + def _func(self): + msg = "%s.%s is deprecated. Use dnf.callback.%s instead." \ + % (self.__class__.__name__, name, name) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + value = getattr(dnf.callback, name) + return value + return _func + + +class TransactionDisplay(object): + # :api + + def __init__(self): + # :api + pass + + # use constants from dnf.callback which are the official API + PKG_CLEANUP = _add_deprecated_action("PKG_CLEANUP") + PKG_DOWNGRADE = _add_deprecated_action("PKG_DOWNGRADE") + PKG_REMOVE = _add_deprecated_action("PKG_REMOVE") + PKG_ERASE = PKG_REMOVE + PKG_INSTALL = _add_deprecated_action("PKG_INSTALL") + PKG_OBSOLETE = _add_deprecated_action("PKG_OBSOLETE") + PKG_REINSTALL = _add_deprecated_action("PKG_REINSTALL") + PKG_UPGRADE = _add_deprecated_action("PKG_UPGRADE") + PKG_VERIFY = _add_deprecated_action("PKG_VERIFY") + TRANS_PREPARATION = _add_deprecated_action("TRANS_PREPARATION") + PKG_SCRIPTLET = _add_deprecated_action("PKG_SCRIPTLET") + TRANS_POST = _add_deprecated_action("TRANS_POST") + + def progress(self, package, action, ti_done, ti_total, ts_done, ts_total): + """Report ongoing progress on a transaction item. :api + + :param package: a package being processed + :param action: the action being performed + :param ti_done: number of processed bytes of the transaction + item being processed + :param ti_total: total number of bytes of the transaction item + being processed + :param ts_done: number of actions processed in the whole + transaction + :param ts_total: total number of actions in the whole + transaction + + """ + pass + + def scriptout(self, msgs): + """msgs is the messages that were output (if any).""" + pass + + def error(self, message): + """Report an error that occurred during the transaction. :api""" + pass + + def filelog(self, package, action): + # check package object type - if it is a string - just output it + """package is the same as in progress() - a package object or simple + string action is also the same as in progress()""" + pass + + def verify_tsi_package(self, pkg, count, total): + # TODO: replace with verify_tsi? + self.progress(pkg, dnf.transaction.PKG_VERIFY, 100, 100, count, total) + + +class ErrorTransactionDisplay(TransactionDisplay): + + """An RPMTransaction display that prints errors to standard output.""" + + def error(self, message): + super(ErrorTransactionDisplay, self).error(message) + dnf.util._terminal_messenger('print', message, sys.stderr) + + +class LoggingTransactionDisplay(ErrorTransactionDisplay): + ''' + Base class for a RPMTransaction display callback class + ''' + def __init__(self): + super(LoggingTransactionDisplay, self).__init__() + self.rpm_logger = logging.getLogger('dnf.rpm') + + def error(self, message): + self.rpm_logger.error(message) + + def filelog(self, package, action): + action_str = dnf.transaction.FILE_ACTIONS[action] + msg = '%s: %s' % (action_str, package) + self.rpm_logger.log(dnf.logging.SUBDEBUG, msg) + + +class RPMTransaction(object): + def __init__(self, base, test=False, displays=()): + if not displays: + displays = [ErrorTransactionDisplay()] + self.displays = displays + self.base = base + self.test = test # are we a test? + self.trans_running = False + self.fd = None + self.total_actions = 0 + self.total_installed = 0 + self.complete_actions = 0 + self.installed_pkg_names = set() + self.total_removed = 0 + + self._setupOutputLogging(base.conf.rpmverbosity) + self._te_list = [] + # Index in _te_list of the transaction element being processed (for use + # in callbacks) + self._te_index = 0 + self._tsi_cache = None + + def _setupOutputLogging(self, rpmverbosity="info"): + # UGLY... set up the transaction to record output from scriptlets + io_r = tempfile.NamedTemporaryFile() + self._readpipe = io_r + self._writepipe = open(io_r.name, 'w+b') + self.base._ts.setScriptFd(self._writepipe) + rpmverbosity = {'critical' : 'crit', + 'emergency' : 'emerg', + 'error' : 'err', + 'information' : 'info', + 'warn' : 'warning'}.get(rpmverbosity, rpmverbosity) + rpmverbosity = 'RPMLOG_' + rpmverbosity.upper() + if not hasattr(rpm, rpmverbosity): + rpmverbosity = 'RPMLOG_INFO' + rpm.setVerbosity(getattr(rpm, rpmverbosity)) + rpm.setLogFile(self._writepipe) + + def _shutdownOutputLogging(self): + # reset rpm bits from recording output + rpm.setVerbosity(rpm.RPMLOG_NOTICE) + rpm.setLogFile(sys.stderr) + try: + self._writepipe.close() + except: + pass + + def _scriptOutput(self): + try: + # XXX ugly workaround of problem which started after upgrading glibc + # from glibc-2.27-32.fc28.x86_64 to glibc-2.28-9.fc29.x86_64 + # After this upgrade nothing is read from _readpipe, so every + # posttrans and postun scriptlet output is lost. The problem + # only occurs when using dnf-2, dnf-3 is OK. + # I did not find the root cause of this error yet. + self._readpipe.seek(self._readpipe.tell()) + out = self._readpipe.read() + if not out: + return None + return out + except IOError: + pass + + def messages(self): + messages = self._scriptOutput() + if messages: + for line in messages.splitlines(): + yield ucd(line) + + def _scriptout(self): + msgs = self._scriptOutput() + for display in self.displays: + display.scriptout(msgs) + self.base.history.log_scriptlet_output(msgs) + + def __del__(self): + self._shutdownOutputLogging() + + def _extract_cbkey(self, cbkey): + """Obtain the package related to the calling callback.""" + + if hasattr(cbkey, "pkg"): + tsi = cbkey + return [tsi] + + te = self._te_list[self._te_index] + te_nevra = dnf.util._te_nevra(te) + if self._tsi_cache: + if str(self._tsi_cache[0]) == te_nevra: + return self._tsi_cache + items = [] + for tsi in self.base.transaction: + if tsi.action == libdnf.transaction.TransactionItemAction_REINSTALL: + # skip REINSTALL in order to return REINSTALLED + continue + if str(tsi) == te_nevra: + items.append(tsi) + if items: + self._tsi_cache = items + return items + raise RuntimeError("TransactionItem not found for key: %s" % cbkey) + + def callback(self, what, amount, total, key, client_data): + try: + if isinstance(key, str): + key = ucd(key) + if what == rpm.RPMCALLBACK_TRANS_START: + self._transStart(total) + elif what == rpm.RPMCALLBACK_TRANS_STOP: + pass + elif what == rpm.RPMCALLBACK_TRANS_PROGRESS: + self._trans_progress(amount, total) + elif what == rpm.RPMCALLBACK_ELEM_PROGRESS: + # This callback type is issued every time the next transaction + # element is about to be processed by RPM, before any other + # callbacks are issued. "amount" carries the index of the element. + self._elemProgress(key, amount) + elif what == rpm.RPMCALLBACK_INST_OPEN_FILE: + return self._instOpenFile(key) + elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE: + self._instCloseFile(key) + elif what == rpm.RPMCALLBACK_INST_START: + self._inst_start(key) + elif what == rpm.RPMCALLBACK_INST_STOP: + self._inst_stop(key) + elif what == rpm.RPMCALLBACK_INST_PROGRESS: + self._instProgress(amount, total, key) + elif what == rpm.RPMCALLBACK_UNINST_START: + self._uninst_start(key) + elif what == rpm.RPMCALLBACK_UNINST_STOP: + self._unInstStop(key) + elif what == rpm.RPMCALLBACK_UNINST_PROGRESS: + self._uninst_progress(amount, total, key) + elif what == rpm.RPMCALLBACK_CPIO_ERROR: + self._cpioError(key) + elif what == rpm.RPMCALLBACK_UNPACK_ERROR: + self._unpackError(key) + elif what == rpm.RPMCALLBACK_SCRIPT_ERROR: + self._scriptError(amount, total, key) + elif what == rpm.RPMCALLBACK_SCRIPT_START: + self._script_start(key) + elif what == rpm.RPMCALLBACK_SCRIPT_STOP: + self._scriptStop() + except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() + except_list = traceback.format_exception(exc_type, exc_value, exc_traceback) + logger.critical(''.join(except_list)) + + def _transStart(self, total): + self.total_actions = total + if self.test: return + self.trans_running = True + self._te_list = list(self.base._ts) + + def _trans_progress(self, amount, total): + action = dnf.transaction.TRANS_PREPARATION + for display in self.displays: + display.progress('', action, amount + 1, total, 1, 1) + + def _elemProgress(self, key, index): + self._te_index = index + self.complete_actions += 1 + if not self.test: + transaction_list = self._extract_cbkey(key) + for display in self.displays: + display.filelog(transaction_list[0].pkg, transaction_list[0].action) + + def _instOpenFile(self, key): + self.lastmsg = None + transaction_list = self._extract_cbkey(key) + pkg = transaction_list[0].pkg + rpmloc = pkg.localPkg() + try: + self.fd = open(rpmloc) + except IOError as e: + for display in self.displays: + display.error("Error: Cannot open file %s: %s" % (rpmloc, e)) + else: + if self.trans_running: + self.total_installed += 1 + self.installed_pkg_names.add(pkg.name) + return self.fd.fileno() + + def _instCloseFile(self, key): + self.fd.close() + self.fd = None + + def _inst_start(self, key): + pass + + def _inst_stop(self, key): + if self.test or not self.trans_running: + return + + self._scriptout() + + if self.complete_actions == self.total_actions: + # RPM doesn't explicitly report when post-trans phase starts + action = dnf.transaction.TRANS_POST + for display in self.displays: + display.progress(None, action, None, None, None, None) + + def _instProgress(self, amount, total, key): + transaction_list = self._extract_cbkey(key) + pkg = transaction_list[0].pkg + action = transaction_list[0].action + for display in self.displays: + display.progress(pkg, action, amount, total, self.complete_actions, self.total_actions) + + def _uninst_start(self, key): + self.total_removed += 1 + + def _uninst_progress(self, amount, total, key): + transaction_list = self._extract_cbkey(key) + pkg = transaction_list[0].pkg + action = transaction_list[0].action + for display in self.displays: + display.progress(pkg, action, amount, total, self.complete_actions, self.total_actions) + + def _unInstStop(self, key): + if self.test: + return + + self._scriptout() + + def _cpioError(self, key): + transaction_list = self._extract_cbkey(key) + msg = "Error in cpio payload of rpm package %s" % transaction_list[0].pkg + for display in self.displays: + display.error(msg) + + def _unpackError(self, key): + transaction_list = self._extract_cbkey(key) + msg = "Error unpacking rpm package %s" % transaction_list[0].pkg + for display in self.displays: + display.error(msg) + + def _scriptError(self, amount, total, key): + # "amount" carries the failed scriptlet tag, + # "total" carries fatal/non-fatal status + scriptlet_name = rpm.tagnames.get(amount, "") + + transaction_list = self._extract_cbkey(key) + name = transaction_list[0].pkg.name + + msg = ("Error in %s scriptlet in rpm package %s" % (scriptlet_name, name)) + + for display in self.displays: + display.error(msg) + + def _script_start(self, key): + # TODO: this doesn't fit into libdnf TransactionItem use cases + action = dnf.transaction.PKG_SCRIPTLET + if key is None and self._te_list == []: + pkg = 'None' + else: + transaction_list = self._extract_cbkey(key) + pkg = transaction_list[0].pkg + complete = self.complete_actions if self.total_actions != 0 and self.complete_actions != 0 \ + else 1 + total = self.total_actions if self.total_actions != 0 and self.complete_actions != 0 else 1 + for display in self.displays: + display.progress(pkg, action, 100, 100, complete, total) + + def _scriptStop(self): + self._scriptout() + + def verify_tsi_package(self, pkg, count, total): + for display in self.displays: + display.verify_tsi_package(pkg, count, total) diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt new file mode 100644 index 0000000..19107b9 --- /dev/null +++ b/doc/CMakeLists.txt @@ -0,0 +1,36 @@ +# html and man documentation are separate targets, apparently there's no way to +# tell sphinx-build to do them both in one go: + +if (${PYTHON_VERSION_MAJOR} STREQUAL "2") + SET(SPHINX_BUILD_NAME "sphinx-build") +else() + SET(SPHINX_BUILD_NAME "sphinx-build-3") +endif() + + +ADD_CUSTOM_TARGET (doc-html + PYTHONPATH=${CMAKE_SOURCE_DIR} ${SPHINX_BUILD_NAME} -b html + ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Building html documentation") +ADD_CUSTOM_TARGET (doc-man + PYTHONPATH=${CMAKE_SOURCE_DIR} ${SPHINX_BUILD_NAME} -b man + ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Building manpage documentation") +ADD_CUSTOM_TARGET (doc) +ADD_DEPENDENCIES (doc doc-html doc-man) + +if (NOT WITH_MAN EQUAL 0) + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/dnf.8 + ${CMAKE_CURRENT_BINARY_DIR}/dnf-automatic.8 + ${CMAKE_CURRENT_BINARY_DIR}/yum2dnf.8 + ${CMAKE_CURRENT_BINARY_DIR}/yum.8 + ${CMAKE_CURRENT_BINARY_DIR}/yum-shell.8 + DESTINATION share/man/man8) + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/dnf.conf.5 + ${CMAKE_CURRENT_BINARY_DIR}/yum.conf.5 + DESTINATION share/man/man5) + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/yum-aliases.1 + DESTINATION share/man/man1) + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/dnf.modularity.7 + DESTINATION share/man/man7) +endif() diff --git a/doc/__init__.py b/doc/__init__.py new file mode 100644 index 0000000..5f7701b --- /dev/null +++ b/doc/__init__.py @@ -0,0 +1,19 @@ +# __init__.py +# DNF documentation package. +# +# Copyright (C) 2012-2013 Red Hat, Inc. +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# the GNU General Public License v.2, or (at your option) any later version. +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY expressed or implied, including the implied warranties of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. You should have received a copy of the +# GNU General Public License along with this program; if not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the +# source code or documentation are not subject to the GNU General Public +# License and may only be used or replicated with the express permission of +# Red Hat, Inc. +# diff --git a/doc/api.rst b/doc/api.rst new file mode 100644 index 0000000..77cd3ad --- /dev/null +++ b/doc/api.rst @@ -0,0 +1,79 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +################### + DNF API Reference +################### + +.. contents:: + +============== + Introduction +============== + +The provided Python API to DNF is supposed to mainly allow writing the following two categories of programs: + +1. :doc:`plugins ` to DNF which extend functionality of the system's DNF installation. +2. extension applications that embed DNF (by importing its Python modules) to perform specific package management tasks. + +Please refer to the :doc:`use_cases` where you can find examples of API usage. + +.. NOTE:: + + The API consists of exactly those elements described in this document, items not documented here can change release to release. Opening a `bugzilla`_ if certain needed functionality is not exposed is the right thing to do. + +============ + Versioning +============ + +DNF follows the Semantic Versioning as defined at http://semver.org/. + +This basically means that if your piece of software depends on e.g. DNF 1.1, the requirement can be specified as ``1.1 <= dnf < 2``. In other words, you can be sure that your software will be API-compatible with any later release of DNF until the next major version is issued. The same applies for the CLI compatibility. + +.. _deprecating-label: + +Incompatible API changes are subject to our deprecation policy. Deprecated API items (classes, methods, etc.) are designated as such in the :doc:`release_notes`. The first release where support for such items can be dropped entirely must have, relative to the deprecating release, a higher major version number. DNF will log a warning when a deprecated item is used. + +=========== + Contents +=========== + +API Documentation Contents + +.. toctree:: + :maxdepth: 2 + + api_common + api_base + api_exceptions + api_conf + api_repos + api_sack + api_queries + api_selector + api_package + api_transaction + api_comps + api_plugins + api_callback + api_rpm + api_cli + api_module + +Indices: + +* :ref:`genindex` diff --git a/doc/api_base.rst b/doc/api_base.rst new file mode 100644 index 0000000..5fd5b4c --- /dev/null +++ b/doc/api_base.rst @@ -0,0 +1,249 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +=================================== + ``Base``---The centerpiece of DNF +=================================== + +.. class:: dnf.Base + + Instances of :class:`dnf.Base` are the central point of functionality supplied by DNF. An application will typically create a single instance of this class which it will keep for the runtime needed to accomplish its packaging tasks. Plugins are managed by DNF and get a reference to :class:`dnf.Base` object when they run. + + :class:`.Base` instances are stateful objects holding references to various data sources and data sinks. To properly finalize and close off any handles the object may hold, client code should either call :meth:`.Base.close` when it has finished operations with the instance, or use the instance as a context manager. After the object has left the context, or its :meth:`.Base.close` has been called explicitly, it must not be used. :meth:`.Base.close` will delete all downloaded packages upon successful transaction. + + .. attribute:: comps + + Is ``None`` by default. Explicit load via :meth:`read_comps` initializes this attribute to a :class:`dnf.comps.Comps` instance. + + .. attribute:: conf + + An instance of :class:`dnf.conf.Conf`, concentrates all the different configuration options. :meth:`__init__` initializes this to usable defaults. + + .. attribute:: repos + + A :class:`dnf.repodict.RepoDict` instance, this member object contains all the repositories available. + + .. attribute:: sack + + The :class:`Sack` that this :class:`Base` object is using. It needs to be explicitly initialized by :meth:`fill_sack`. + + .. attribute:: transaction + + A resolved transaction object, a :class:`dnf.transaction.Transaction` instance, or ``None`` if no transaction has been prepared yet. + + .. method:: __init__() + + Init an instance with a reasonable default configuration. The constructor takes no arguments. + + .. method:: add_remote_rpms(path_list, strict=True, progress=None) + + Add RPM files at list `path_list` to the :attr:`sack` and return the list of respective + :class:`dnf.package.Package` instances. Does the download to a temporary files for each path if + `path` is a remote URL. Raises :exc:`IOError` if there are problems obtaining during reading + files and `strict=True`. `progress`, if given, should be a :class:`.DownloadProgress` and can be + used by the caller to monitor the progress of the download. + + .. method:: close() + + Close all external handles the object holds. This is called automatically via context manager mechanism if the instance is handled using the ``with`` statement. + + .. method:: init_plugins([disabled_glob=None, cli=None]) + + Initialize plugins. If you want to disable some plugins pass the list of their name patterns to + `disabled_glob`. When run from interactive script then also pass your :class:`dnf.cli.Cli` instance. + + .. method:: pre_configure_plugins() + + Configure plugins by running their pre_configure() method. It makes possible to change + variables before repo files and rpmDB are loaded. It also makes possible to create internal + repositories that will be affected by ``--disablerepo`` and ``--enablerepo``. + + .. method:: configure_plugins() + + Configure plugins by running their configure() method. + + .. method:: fill_sack([load_system_repo=True, load_available_repos=True]) + + Setup the package sack. If `load_system_repo` is ``True``, load information about packages in the local RPMDB into the sack. Else no package is considered installed during dependency solving. If `load_available_repos` is ``True``, load information about packages from the available repositories into the sack. + + This operation will call :meth:`load() ` for repos as necessary and can take a long time. Adding repositories or changing repositories' configuration does not affect the information within the sack until :meth:`fill_sack` has been called. + + Before this method is invoked, the client application should setup any explicit configuration relevant to the operation. This will often be at least :attr:`conf.cachedir <.Conf.cachedir>` and the substitutions used in repository URLs. See :attr:`.Conf.substitutions`. + + Throws `IOError` exception in case cached metadata could not be opened. + + Example:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + conf = base.conf + conf.cachedir = '/tmp/my_cache_dir' + conf.substitutions['releasever'] = '30' + conf.substitutions['basearch'] = 'x86_64' + + base.repos.add_new_repo('my-repo', conf, + baseurl=["http://download.fedoraproject.org/pub/fedora/linux/releases/$releasever/Everything/$basearch/os/"]) + base.fill_sack() + + print("Enabled repositories:") + for repo in base.repos.iter_enabled(): + print("id: {}".format(repo.id)) + print("baseurl: {}".format(repo.baseurl)) + + + .. method:: do_transaction([display]) + + Perform the resolved transaction. Use the optional `display` object(s) to report the progress. `display` can be either an instance of a subclass of :class:`dnf.callback.TransactionProgress` or a sequence of such instances. Raise :exc:`dnf.exceptions.Error` or dnf.exceptions.TransactionCheckError. + + .. method:: download_packages(pkglist, progress=None, callback_total=None) + + Download packages in `pkglist` from remote repositories. Packages from local repositories or from the command line are not downloaded. `progress`, if given, should be a :class:`.DownloadProgress` and can be used by the caller to monitor the progress of the download. `callback_total` is a function accepting two parameters: total size of the downloaded content in bytes and time when the download process started, in seconds since the epoch. Raises :exc:`.DownloadError` if some packages failed to download. + + .. method:: group_install(group_id, pkg_types, exclude=None, strict=True) + + Mark group with corresponding `group_id` installed and mark the packages in the group for installation. Return the number of packages that the operation has marked for installation. `pkg_types` is a sequence of strings determining the kinds of packages to be installed, where the respective groups can be selected by including ``"mandatory"``, ``"default"`` or ``"optional"`` in it. If `exclude` is given, it has to be an iterable of package name glob patterns: :meth:`.group_install` will then not mark the respective packages for installation whenever possible. Parameter `strict` is a boolean indicating whether group packages that exist but are non-installable due to e.g. dependency issues should be skipped (False) or cause transaction to fail to resolve (True). + + .. method:: group_remove(group_id) + + Mark group with corresponding `group_id` not installed. All the packages marked as belonging to this group will be marked for removal. Return the number of packages marked for removal in this call. + + .. method:: group_upgrade(group_id) + + Upgrade group with corresponding `group_id`. If there has been packages added to the group's comps information since installing on the system, they will be marked for installation. Similarly, removed packages get marked for removal. The remaining packages in the group are marked for an upgrade. The operation respects the package types from the original installation of the group. + + .. method:: environment_install(env_id, types, exclude=None, strict=True, exclude_groups=None) + + Similar to :meth:`.group_install` but operates on environmental groups. `exclude_groups` is an iterable of group IDs that will not be marked as installed. + + .. method:: environment_remove(env_id) + + Similar to :meth:`.group_remove` but operates on environmental groups. + + .. method:: environment_upgrade(env_id) + + Similar to :meth:`.group_upgrade` but operates on environmental groups. + + .. method:: read_all_repos() + + Read repository configuration from the main configuration file specified by :attr:`dnf.conf.Conf.config_file_path` and any ``.repo`` files under :attr:`dnf.conf.Conf.reposdir`. All the repositories found this way are added to :attr:`~.Base.repos`. + + .. method:: read_comps(arch_filter=False) + + Read comps data from all the enabled repositories and initialize the :attr:`comps` object. If `arch_filter` is set to ``True``, the result is limited to system basearch. + + .. method:: reset(\*\*kwargs) + + Reset the state of different :class:`.Base` attributes. Selecting attributes to reset is controlled by passing the method keyword arguments set to ``True``. When called with no arguments the method has no effect. + + =============== ================================================= + argument passed effect + =============== ================================================= + `goal=True` drop all the current :ref:`packaging requests ` + `repos=True` drop the current repositories (see :attr:`.repos`). This won't + affect the package data already loaded into the :attr:`.sack`. + `sack=True` drop the current sack (see :attr:`.sack`) + =============== ================================================= + + .. method:: resolve(allow_erasing=False) + + Resolve the marked requirements and store the resulting :class:`dnf.transaction.Transaction` into :attr:`transaction`. Raise :exc:`dnf.exceptions.DepsolveError` on a depsolving error. Return ``True`` if the resolved transaction is non-empty. + + Enabling `allow_erasing` lets the solver remove other packages while looking to fulfill the current packaging requests. For instance, this is used to allow the solver to remove dependants of a package being removed. + + The exact operation of the solver further depends on the :attr:`dnf.conf.Conf.best` setting. + + .. method:: update_cache(timer=False) + + Downloads and caches in binary format metadata for all known repos. Tries to avoid downloading + whenever possible (e.g. when the local metadata hasn’t expired yet or when the metadata + timestamp hasn’t changed). + + If 'timer' equals 'True', DNF becomes more resource-aware, meaning DNF will not do anything if + running on battery power and will terminate immediately if it’s too soon after the last + successful update_cache operation. + + When the method is used after :meth:`fill_sack`, information about packages will not be updated. + + .. _package_marking-label: + + The :class:`.Base` class provides a number of methods to make packaging requests that can later be resolved and turned into a transaction. The `pkg_spec` argument some of them take must be a package specification recognized by :class:`dnf.subject.Subject`. If these methods fail to find suitable packages for the operation they raise a :exc:`~dnf.exceptions.MarkingError`. Note that successful completion of these methods does not necessarily imply that the desired transaction can be carried out (e.g. for dependency reasons). + + .. method:: downgrade(pkg_spec) + + Mark packages matching `pkg_spec` for downgrade. + + .. method:: install(pkg_spec, reponame=None, strict=True, forms=None) + + Mark packages matching `pkg_spec` for installation. + `reponame` can be a name of a repository or a list of repository names. If given, the selection of available packages is limited to packages from these repositories. If strict is set to False, the installation ignores packages with dependency solving problems. Parameter `forms` has the same meaning as in :meth:`dnf.subject.Subject.get_best_query`. + + .. method:: package_downgrade(pkg, strict=False) + + If `pkg` is a :class:`dnf.package.Package` in an available repository, mark the matching installed package for downgrade to `pkg`. If strict=False it ignores problems with dep-solving. + + .. method:: package_install(pkg, strict=True) + + Mark `pkg` (a :class:`dnf.package.Package` instance) for installation. Ignores package that is already installed. `strict` has the same meaning as in :meth:`install`. + + .. method:: package_upgrade(pkg) + + If `pkg` is a :class:`dnf.package.Package` in an available repository, mark the matching installed package for upgrade to `pkg`. + + .. method:: autoremove() + + Removes all 'leaf' packages from the system that were originally installed as dependencies of user-installed packages but which are no longer required by any such package. + + .. method:: remove(pkg_spec, reponame=None, forms=None) + + Mark packages matching `pkg_spec` for removal. `reponame` and `forms` have the same meaning as in :meth:`install`. + + .. method:: upgrade(pkg_spec, reponame=None) + + Mark packages matching `pkg_spec` for upgrade. `reponame` has the same meaning as in :meth:`install`. + + .. method:: upgrade_all(reponame=None) + + Mark all installed packages for an upgrade. `reponame` has the same meaning as in :meth:`install`. + + .. method:: urlopen(url, repo=None, mode='w+b', \*\*kwargs): + + Open the specified absolute `url` and return a file object which respects proxy setting even for non-repo downloads + + .. method:: install_specs(install, exclude=None, reponame=None, strict=True, forms=None) + + Provides unified way to mark packages, groups or modules for installation. The `install` and `exclude` arguments have to be iterables containing specifications of packages (e.g. 'dnf') or groups/modules (e.g. '\@core'). Specifications from the `exclude` list will not be marked for installation. The `reponame`, `strict` and `forms` parameters have the same meaning as in :meth:`install`. In case of errors the method raises :exc:`dnf.exceptions.MarkingErrors`. + + Example to install two groups and a package:: + + #!/usr/bin/python3 + import dnf + import dnf.cli.progress + + base = dnf.Base() + base.read_all_repos() + base.fill_sack() + + base.install_specs(['acpi', '@Web Server', '@core']) + print("Resolving transaction...",) + base.resolve() + print("Downloading packages...") + progress = dnf.cli.progress.MultiFileProgressMeter() + base.download_packages(base.transaction.install_set, progress) + print("Installing...") + base.do_transaction() diff --git a/doc/api_callback.rst b/doc/api_callback.rst new file mode 100644 index 0000000..5828d00 --- /dev/null +++ b/doc/api_callback.rst @@ -0,0 +1,102 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +=================================== + Progress Reporting with Callbacks +=================================== + +.. module:: dnf.callback + +.. class:: Payload + + Represents one item (file) from the download batch. + + .. method:: __str__ + + Provide concise, human-readable representation of this Payload. + + .. attribute:: download_size + + Total size of this Payload when transferred (e.g. over network). + +.. class:: DownloadProgress + + Base class providing callbacks to receive information about an ongoing download. + + .. method:: start(total_files, total_size, total_drpms=0) + + Report start of a download batch. `total_files` is the total number of payloads in the batch. + `total_size` is the total number of bytes to be downloaded. `total_drpms` is the total number + of drpms payloads in the batch. + + .. method:: progress(payload, done) + + Report ongoing progress on the given `payload`. `done` is the number of bytes already downloaded from `payload`. + + .. method:: end(payload, status, msg) + + Report finished download of a `payload`, :class:`.Payload` instance. `status` is a constant with the following meaning: + + ====================== ======================================================= + `status` value meaning + ====================== ======================================================= + STATUS_OK Download finished successfully. + STATUS_DRPM DRPM rebuilt successfully. + STATUS_ALREADY_EXISTS Download skipped because the local file already exists. + STATUS_MIRROR Download failed on the current mirror, will try to use + next mirror in the list. + STATUS_FAILED Download failed because of another error. + ====================== ======================================================= + + `msg` is an optional string error message further explaining the `status`. + +.. class:: TransactionProgress + + Base class providing callbacks to receive information about an ongoing transaction. + + .. method:: error(message) + + Report an error that occurred during the transaction. `message` is a string which describes the error. + + .. method:: progress(package, action, ti_done, ti_total, ts_done, ts_total) + + Report ongoing progress on the given transaction item. `package` is the :class:`dnf.package.Package` being processed and `action` is a constant with the following meaning: + + ================== ================================================================================= =========== + `action` value meaning Appearance* + ================== ================================================================================= =========== + PKG_CLEANUP `package` cleanup is being performed. 3 + PKG_DOWNGRADE `package` is being installed as a downgrade. 2 + PKG_DOWNGRADED installed `package` is being downgraded. 2 + PKG_INSTALL `package` is being installed. 2 + PKG_OBSOLETE `package` is obsoleting another package. 2 + PKG_OBSOLETED installed `package` is being obsoleted. 2 + PKG_REINSTALL `package` is installed as a reinstall. 2 + PKG_REINSTALLED installed `package` is being reinstalled. 2 + PKG_REMOVE `package` is being removed. 2 + PKG_UPGRADE `package` is installed as an upgrade. 2 + PKG_UPGRADED installed `package` is being upgraded. 2 + PKG_VERIFY `package` is being verified. 5 + PKG_SCRIPTLET `package` scriptlet is being performed. Anytime + TRANS_PREPARATION `transaction` is being prepared. 1 + TRANS_POST The post-trans phase started. In this case, all the other arguments are ``None``. 4 + ================== ================================================================================= =========== + + \*\ This is order in which state of transaction which callback action can appear. Only PKG_SCRIPTLET + can appear anytime during transaction even before transaction starts. + + `ti_done` is the number of processed bytes of the transaction item, `ti_total` is the total number of bytes of the transaction item, `ts_done` is the number of actions processed in the whole transaction and `ts_total` is the total number of actions in the whole transaction. diff --git a/doc/api_cli.rst b/doc/api_cli.rst new file mode 100644 index 0000000..3b875d1 --- /dev/null +++ b/doc/api_cli.rst @@ -0,0 +1,143 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + + +============================== + Command Line Interface Hooks +============================== + + +.. module:: dnf.cli + +:mod:`dnf.cli` is a part of DNF that contains code handling the command line tasks for DNF, like for instance ``dnf install emacs``, and outputs the results to the terminal. It is usually of no interest for DNF extension applications, but some parts of it described here can be used by the :doc:`api_plugins` to hook up custom commands. + +When packaging your custom command, we recommend you to define a virtual provide in the form of ``Provides: dnf-command()`` in the spec file. See :ref:`the virtual provides usage ` for the details. + +.. exception:: CliError + + Signals a CLI-specific problem (reading configuration, parsing user input, etc.). Derives from :exc:`dnf.exceptions.Error`. + +.. class:: dnf.cli.demand.DemandSheet + + Instances are used to track requests of commands and plugins about how CLI should set up/handle other parts of CLI processing that are not under the command's/plugin's direct control. The boolean attributes of the sheet can not be reset once explicitly set, doing so raises an :exc:`AttributeError`. + + .. attribute:: allow_erasing + + If ``True``, the dependency solver is allowed to look for solutions that include removing other packages while looking to fulfill the current packaging requests. Defaults to ``False``. Also see :meth:`dnf.Base.resolve`. + + .. attribute:: available_repos + + If ``True``, during sack creation (:attr:`.sack_activation`), download and load into the sack the available repositories. Defaults to ``False``. + + .. attribute:: resolving + + If ``True``, at a place where the CLI would otherwise successfully exit, resolve the transaction for any outstanding packaging requests before exiting. Defaults to ``False``. + + .. attribute:: root_user + + ``True`` informs the CLI that the command can only succeed if the process's effective user id is ``0``, i.e. root. Defaults to ``False``. + + .. attribute:: sack_activation + + If ``True``, demand that the CLI sets up the :class:`~.Sack` before the command's :meth:`~.Command.run` method is executed. Defaults to ``False``. + + Depending on other demands and the user's configuration, this might or might not correctly trigger metadata download for the available repositories. + + .. attribute:: load_system_repo + + If ``True``, DNF will load information about installed packages from the local RPMDB into the sack during :meth:`dnf.Base.fill_sack`. Defaults to ``True``. + + .. attribute:: cacheonly + + When ``True``, DNF will run entirely from the system cache (equivalent of ``-C`` command line option). Defaults to ``False``. + + .. attribute:: fresh_metadata + + ``False`` means that (even expired) cached repository metadata will be used. When ``True``, the expired repository metadata caches are synchronized with server. Defaults to ``True``. + + .. attribute:: freshest_metadata + + If ``True``, metadata caches for all enabled repositories are forcibly expired before the sack is activated. Defaults to ``False``. + + .. attribute:: changelogs + + If ``True``, also the repository metadata containing changelogs for packages will be downloaded. Defaults to ``False``. + + .. attribute:: success_exit_status + + The return status of the DNF command on success. Defaults to ``0``. + + .. attribute:: transaction_display + + An additional instance of a subclass of :class:`dnf.callback.TransactionProgress` used to report information about an ongoing transaction. Defaults to ``None``. + +.. class:: Command + + Base class of every DNF command. + + .. attribute:: aliases + + Sequence of strings naming the command from the command line. Must be a class variable. The list has to contain at least one string, the first string in the list is considered the canonical name. A command name can be contain only letters and dashes providing the name doesn't start with a dash. + + .. attribute:: base + + The :class:`dnf.Base` instance to use with this command. + + .. attribute:: cli + + The :class:`dnf.cli.Cli` instance to use with this command. + + .. attribute:: summary + + One line summary for the command as displayed by the CLI help. + + .. method:: __init__(cli) + + Command constructor which can be overridden. The constructor is called during + CLI configure phase when one of the command's aliases is parsed from `dnf` + commandline. `cli` is an instance of :class:`dnf.cli.Cli`. + + .. method:: pre_configure() + + Perform any pre-configuration on the command itself and on the CLI. Typically, the command + implements this call to set up releasever or enable/disable repository. This method is called + before configuration of repos. + + .. method:: configure() + + Perform any configuration on the command itself and on the CLI. Typically, the command implements this call to set up any :class:`demands <.DemandSheet>`, tweak the global configuration or the repository configuration. This method is called immediately after the CLI/extension is finished configuring DNF. + + .. method:: run() + + Run the command. This method is invoked by the CLI when this command is executed. Should raise :exc:`dnf.exceptions.Error` with a proper message if the command fails. Otherwise should return ``None``. Custom commands typically override this method and put their main work code here. + +.. class:: Cli + + Manages the CLI, including reading configuration, parsing the command line and running commands. + + .. attribute:: demands + + An instance of :class:`~dnf.cli.demand.DemandSheet`, exposed to allow custom commands and plugins influence how the CLI will operate. + + .. method:: register_command(command_cls): + + Register new command. `command_cls` is a subclass of :class:`.Command`. + + .. method:: redirect_logger(self, stdout=None, stderr=None): + + Change minimal logger level for terminal output to stdout and stderr according to specific + command requirements. For stdout and stderr use logging.INFO, logging.WARNING, etc. diff --git a/doc/api_common.rst b/doc/api_common.rst new file mode 100644 index 0000000..c365fe1 --- /dev/null +++ b/doc/api_common.rst @@ -0,0 +1,34 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +================================== + Common Provisions of the DNF API +================================== + +.. _logging_label: + +--------- + Logging +--------- + +DNF uses the standard `Python logging module `_ to do its logging. Three standard loggers are provided: + +* ``dnf``, used by the core and CLI components of DNF. Messages logged via this logger can end up written to the stdout (console) the DNF process is attached too. For this reason messages logged on the ``INFO`` level or above should be marked for localization (if the extension uses it). +* ``dnf.plugin`` should be used by plugins for debugging and similar messages that are generally not written to the standard output streams but logged into the DNF logfile. +* ``dnf.rpm`` is a logger used by RPM transaction callbacks. Plugins and extensions should not manipulate this logger. + +Extensions and plugins can add or remove logging handlers of these loggers at their own discretion. \ No newline at end of file diff --git a/doc/api_comps.rst b/doc/api_comps.rst new file mode 100644 index 0000000..1989e59 --- /dev/null +++ b/doc/api_comps.rst @@ -0,0 +1,132 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +============================================= + Comps, or the Distribution Compose Metadata +============================================= + +.. module:: dnf.comps + +.. class:: Comps + + An object of this class can merge comps information from arbitrary repositories. It typically is instantiated from :class:`dnf.Base` and covers all the available repositories. + + The ``*_by_pattern`` methods all take a `pattern` and an optional `case_sensitive` parameter. The pattern is matched against names and IDs of objects in the domain (groups, categories, environments), the globbing characters in `pattern` retain their usual expanding meaning. If `case_sensitive` is ``True``, matching is done in a case-sensitive manner. + + .. attribute:: categories + + List of all contained :class:`dnf.comps.Category` objects. + + .. attribute:: environments + + List of all contained :class:`dnf.comps.Environment` objects ordered by `display_order` tag defined in comps.xml file. + + .. attribute:: groups + + List of all contained :class:`dnf.comps.Group` objects ordered by `display_order` tag defined in comps.xml file. + + .. method:: category_by_pattern(pattern, case_sensitive=False) + + Returns a :class:`dnf.comps.Category` object matching `pattern`, or ``None``. + + .. method:: categories_by_pattern(pattern, case_sensitive=False) + + Return an iterable of :class:`dnf.comps.Category` objects matching `pattern`. + + .. method:: categories_iter + + Return iterator over all contained :class:`dnf.comps.Category` objects. + + .. method:: environment_by_pattern(pattern, case_sensitive=False) + + Return a :class:`dnf.comps.Environment` object matching `pattern`, or ``None``. + + .. method:: environments_by_pattern(pattern, case_sensitive=False) + + Return an iterable of :class:`dnf.comps.Environment` objects matching `pattern` ordered by `display_order` tag defined in comps.xml file. + + .. attribute:: environments_iter + + Return iterator over all contained :class:`dnf.comps.Environment` objects in order they appear in comps.xml file. + + .. method:: group_by_pattern(pattern, case_sensitive=False) + + Return a :class:`dnf.comps.Group` object matching `pattern`, or ``None``. + + .. method:: groups_by_pattern(pattern, case_sensitive=False) + + Return an iterable of :class:`dnf.comps.Group` objects matching `pattern` ordered by `display_order` tag defined in comps.xml file. + + .. attribute:: groups_iter + + Return iterator over all contained :class:`dnf.comps.Group` objects in order they appear in comps.xml file. + +.. class:: Package + + Represents comps package data. + + .. NOTE:: + + Should not be confused with :class:`dnf.package.Package` which represents a package contained in a :class:`~.Sack`. There is no guarantee whether the comps package has a corresponding real sack package, i.e. there can be no package of given name in the sack, one such package, or more than one. For this reason two separate types are introduced. + + .. attribute:: name + + Name of the package. + + .. attribute:: option_type + + The type of inclusion of this particular package in its group. Must be one of the :data:`inclusion types `. + +.. class:: Category + + .. attribute:: id + + Unique identifier of the category. + + .. attribute:: name + + Name of the category. + + .. attribute:: ui_name + + The name of the category translated to the language given by the current locale. + + .. attribute:: ui_description + + The description of the category translated to the language given by the current locale. + +.. class:: Environment + + Has the same set of attributes as :class:`dnf.comps.Category`. + +.. class:: Group + + Has the same set of attributes as :class:`dnf.comps.Category`. + + .. method:: packages_iter() + + Return iterator over all :class:`packages <.Package>` belonging in this group. + +Following types of inclusions of objects in their parent objects are defined: + +.. data:: CONDITIONAL + +.. data:: DEFAULT + +.. data:: MANDATORY + +.. data:: OPTIONAL diff --git a/doc/api_conf.rst b/doc/api_conf.rst new file mode 100644 index 0000000..e9e9851 --- /dev/null +++ b/doc/api_conf.rst @@ -0,0 +1,90 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +=============== + Configuration +=============== + +Configurable settings of the :class:`dnf.Base` object are stored into a :class:`dnf.conf.Conf` instance. The various options are described here. + +.. class:: dnf.conf.Conf + + This object has attributes corresponding to all configuration options from both :ref:`"[main] Options" ` and :ref:`"Options for both [main] and Repo" ` sections. For example setting a proxy to access all repositories:: + + import dnf + + base = dnf.Base() + conf = base.conf + conf.proxy = "http://the.proxy.url:3128" + conf.proxy_username = "username" + conf.proxy_password = "secret" + base.read_all_repos() + base.fill_sack() + + + .. attribute:: get_reposdir + + Returns the value of the first valid reposdir or if unavailable the value of created reposdir (string) + + .. attribute:: substitutions + + A mapping of substitutions used in repositories' remote URL configuration. The commonly used ones are: + + ========== ============================================== ============ + key meaning default + ========== ============================================== ============ + arch architecture of the machine autodetected + basearch the architecture family of the current "arch" autodetected + releasever release name of the system distribution ``None`` + ========== ============================================== ============ + + :func:`dnf.rpm.detect_releasever` can be used to detect the ``releasever`` value. + + Following example shows recommended method how to override autodetected architectures:: + + import dnf + import hawkey + + arch = hawkey.detect_arch() + base = dnf.Base() + base.conf.substitutions['arch'] = arch + base.conf.substitutions['basearch'] = dnf.rpm.basearch(arch) + base.fill_sack() + ... + + + .. method:: exclude_pkgs(pkgs) + + Exclude all packages in the `pkgs` list from all operations. + + .. method:: prepend_installroot(option) + + Prefix config option named `option` with :attr:`installroot`. + + .. method:: read(filename=None) + + Read configuration options from the ``main`` section in `filename`. Option values not present there are left at their current values. If `filename` is ``None``, :attr:`config_file_path` is used. Conversely, the configuration path used to load the configuration file that was used is stored into :attr:`config_file_path` before the function returns. + + .. method:: dump() + + Print configuration values, including inherited values. + + .. method:: write_raw_configfile(filename, section_id, substitutions, modify) + + Update or create config file. Where `filename` represents name of config file (.conf or .repo); `section_id` + represents id of modified section (e.g. main, fedora, updates); `substitutions` represents an instance of + base.conf.substitutions; `modify` represents dict of modified options. diff --git a/doc/api_exceptions.rst b/doc/api_exceptions.rst new file mode 100644 index 0000000..a588427 --- /dev/null +++ b/doc/api_exceptions.rst @@ -0,0 +1,52 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +============ + Exceptions +============ + +.. exception:: dnf.exceptions.Error + + Base class for all DNF Errors. + +.. exception:: dnf.exceptions.CompsError + + Used for errors of comps groups like trying to work with group which is not available. + +.. exception:: dnf.exceptions.DeprecationWarning + + Used to emit deprecation warnings using Python's :func:`warnings.warning` function. + +.. exception:: dnf.exceptions.DepsolveError + + Error during transaction dependency resolving. + +.. exception:: dnf.exceptions.DownloadError + + Error during downloading packages from the repositories. + +.. exception:: dnf.exceptions.MarkingError + + Error when DNF was unable to find a match for given package / group / module specification. + +.. exception:: dnf.exceptions.MarkingErrors + + Categorized errors during processing of the request. The available error categories are ``no_match_pkg_specs`` for missing packages, ``error_pkg_specs`` for broken packages, ``no_match_group_specs`` for missing groups or modules, ``error_group_specs`` for broken groups or modules and ``module_depsolv_errors`` for modular dependency problems. + +.. exception:: dnf.exceptions.RepoError + + Error when loading repositories. diff --git a/doc/api_module.rst b/doc/api_module.rst new file mode 100644 index 0000000..3e4ab9b --- /dev/null +++ b/doc/api_module.rst @@ -0,0 +1,236 @@ +.. + Copyright (C) 2019 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +===================== + Modularity Interface +===================== + +.. module:: dnf.module.module_base + + +.. class:: dnf.module.module_base.ModuleBase + +Basic class for handling modules. + + .. method:: __init__(base) + + Initialize :class:`dnf.module.module_base.ModuleBase` object. `base` is an instance of the :class:`dnf.Base` class. + + .. method:: enable(module_specs) + + Mark module streams matching the `module_specs` list and also all required modular dependencies for enabling. + For specs that do not specify the stream, the default stream is used. In case that the module has only one stream available, this stream is used regardles of whether it is the default or not. + Note that only one stream of any given module can be enabled on a system. + The method raises :exc:`dnf.exceptions.MarkingErrors` in case of errors. + + Example:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + base.read_all_repos() + base.fill_sack() + + module_base = dnf.module.module_base.ModuleBase(base) + module_base.enable(['nodejs:11']) + + base.do_transaction() + + .. method:: disable(module_specs) + + Mark modules matching the `module_specs` list for disabling. Only the name part of the module specification is relevant. Stream, version, context, arch and profile parts are ignored (if given). All streams of the module will be disabled and all installed profiles will be removed. Packages previously installed from these modules will remain installed on the system. + The method raises :exc:`dnf.exceptions.MarkingErrors` in case of errors. + + Example:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + base.read_all_repos() + base.fill_sack() + + module_base = dnf.module.module_base.ModuleBase(base) + module_base.disable(['nodejs']) + + base.do_transaction() + + .. method:: reset(module_specs) + + Mark module for resetting so that it will no longer be enabled or disabled. All installed profiles of streams that have been reset will be removed. + The method raises :exc:`dnf.exceptions.MarkingErrors` in case of errors. + + .. method:: install(module_specs, strict=True) + + Mark module profiles matching `module_specs` for installation and enable all required streams. If the stream or profile part of specification is not specified, the defaults are chosen. All packages of installed profiles are also marked for installation. + If `strict` is set to ``False``, the installation skips modules with dependency solving problems. + The method raises :exc:`dnf.exceptions.MarkingErrors` in case of errors. + + Example:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + base.read_all_repos() + base.fill_sack() + + module_base = dnf.module.module_base.ModuleBase(base) + module_base.install(['nodejs:11/minimal']) + + base.resolve() + base.download_packages(base.transaction.install_set) + base.do_transaction() + + .. method:: remove(module_specs) + + Mark module profiles matching `module_spec` for removal. All packages installed from removed profiles (unless they are required by other profiles or user-installed packages) are also marked for removal. + + .. method:: upgrade(module_specs) + + Mark packages of module streams (or profiles) matching `module_spec` for upgrade. + + .. method:: get_modules(module_spec) + + Get information about modules matching `module_spec`. Returns tuple (module_packages, nsvcap), where `nsvcap` is a hawkey.NSVCAP object parsed from `module_spec` and `module_packages` is a tuple of :class:`libdnf.module.ModulePackage` objects matching this `nsvcap`. + + Example:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + base.read_all_repos() + base.fill_sack() + + module_base = dnf.module.module_base.ModuleBase(base) + module_packages, nsvcap = module_base.get_modules('nodejs:11/minimal') + + print("Parsed NSVCAP:") + print("name:", nsvcap.name) + print("stream:", nsvcap.stream) + print("version:", nsvcap.version) + print("context:", nsvcap.context) + print("arch:", nsvcap.arch) + print("profile:", nsvcap.profile) + + print("Matching modules:") + for mpkg in module_packages: + print(mpkg.getFullIdentifier()) + + + + +.. class:: libdnf.module.ModulePackage + +This class represents a record identified by NSVCA from the repository modular metadata. See also https://github.com/fedora-modularity/libmodulemd/blob/master/spec.v2.yaml. + + .. method:: getName() + + Return the name of the module. + + .. method:: getStream() + + Return the stream of the module. + + .. method:: getVersion() + + Return the version of the module as a string. + + .. method:: getVersionNum() + + Return the version of the module as a number. + + .. method:: getContext() + + Return the context of the module. + + .. method:: getArch() + + Return the architecture of the module. + + .. method:: getNameStream() + + Return string in the form of 'name:stream' for the module. + + .. method:: getNameStreamVersion() + + Return string in the form of 'name:stream:version' for the module. + + .. method:: getFullIdentifier() + + Return string in the form of 'name:stream:version:context:architecture' for the module. + + .. method:: getProfiles(name=None) + + Return tuple of :class:`libdnf.module.ModuleProfile` instancies representing each of the individual profiles of the module. If the `name` is given, only profiles matching the `name` pattern are returned. + + .. method:: getSummary() + + Return the summary of the module. + + .. method:: getDescription() + + Return the description of the module. + + .. method:: getRepoID() + + Return the identifier of source repository of the module. + + .. method:: getArtifacts() + + Return tuple of the artifacts of the module. + + .. method:: getModuleDependencies() + + Return tuple of :class:`libdnf.module.ModuleDependencies` objects representing modular dependencies of the module. + + .. method:: getYaml() + + Return repomd yaml representing the module. + + + +.. class:: libdnf.module.ModuleProfile + + .. method:: getName() + + Return the name of the profile. + + .. method:: getDescription() + + Return the description of the profile. + + .. method:: getContent() + + Return tuple of package names to be installed with this profile. + + + +.. class:: libdnf.module.ModuleDependencies + + .. method:: getRequires() + + Return tuple of MapStringVectorString objects. These objects behave like standard python dictionaries and represent individual dependencies of the given module. Keys are names of required modules, values are tuples of required streams specifications. + + + +.. class:: libdnf.module.ModulePackageContainer + + This class is under development and should be considered unstable at the moment. + diff --git a/doc/api_package.rst b/doc/api_package.rst new file mode 100644 index 0000000..e5e90c7 --- /dev/null +++ b/doc/api_package.rst @@ -0,0 +1,184 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +========= + Package +========= + +.. class:: dnf.package.Package + + Represents a unit of software management, typically corresponds to an RPM file. + + .. attribute:: arch + + Architecture of the package (string). + + .. attribute:: baseurl + + Baseurl of the package (string). + + .. attribute:: buildtime + + Seconds since the epoch when the package was built (integer). + + .. attribute:: chksum + + Tuple with package checksum and checksum type or ``None``. The checksum is returned only for + packages from repository. The checksum is not returned for installed package or packages from + commandline repository. The checksum represents @pkgid value which links primary metadata with + other repository metadata files. + + .. attribute:: conflicts + + Packages that the package conflicts with (list of Hawkey.Reldep). + + .. attribute:: debug_name + + The name of the debug-info package (string). + + .. attribute:: description + + The description of the package (string). + + .. attribute:: downloadsize + + The size of rpm package in bytes (integer). + + .. attribute:: epoch + + Epoch of the package (integer). + + .. attribute:: enhances + + Packages that the package enhances (list of Hawkey.Reldep). + + .. attribute:: evr + + EVR (epoch:version-revision) of the package (string). + + .. attribute:: files + + Files the package provides (list of strings). + + .. attribute:: group + + Group of the package (string). + + .. attribute:: hdr_chksum + + Tuple with package header checksum and checksum type or ``None``. The checksum is returned only for installed packages. + + .. attribute:: hdr_end + + Header end index for the package. Returns 0 for not known (integer). + + .. attribute:: changelogs + + Changelogs for the package (list of dictionaries with "timestamp", "author" and "text" keys). + + .. attribute:: installed + + Returns ``True`` if the package is installed (boolean). + + .. attribute:: installtime + + Seconds since the epoch when the package was installed (integer). + + .. attribute:: installsize + + Space in bytes the package takes on the system after installation (integer). + + .. attribute:: license + + License of the package (string). + + .. attribute:: medianr + + Media number for the package (integer). + + .. attribute:: name + + The name of the package (string). + + .. attribute:: obsoletes + + Packages that are obsoleted by the package (list of Hawkey.Reldep). + + .. attribute:: provides + + Package's provides (list of Hawkey.Reldep). + + .. attribute:: recommends + + Packages that are recommended by the package (list of Hawkey.Reldep). + + .. attribute:: release + + Release of the package (string). + + .. attribute:: reponame + + Id of repository the package was installed from (string). + + .. attribute:: requires + + Package's requirements (list of Hawkey.Reldep). + + .. attribute:: requires_pre + + Package's install-time requirements (list of Hawkey.Reldep). + + .. attribute:: rpmdbid + + The rpmdb ID for the package (integer). + + .. attribute:: source_debug_name + + The name of the source debug-info package (string). + + .. attribute:: source_name + + The name of the source package (string). + + .. attribute:: sourcerpm + + Full name of the SRPM used to build this package (string). + + .. attribute:: suggests + + Packages that are suggested by the package (list of Hawkey.Reldep). + + .. attribute:: summary + + Summary for the package (string). + + .. attribute:: supplements + + Packages that the package supplements (list of Hawkey.Reldep). + + .. attribute:: url + + URL for the package (string). + + .. attribute:: version + + Version of the package (string). + + .. method:: remote_location(schemes=('http', 'ftp', 'file', 'https')) + + The location from where the package can be downloaded from (string). If the information is unavailable + it returns ``None``. ``schemes`` limits result to list of protocols. diff --git a/doc/api_plugins.rst b/doc/api_plugins.rst new file mode 100644 index 0000000..9203308 --- /dev/null +++ b/doc/api_plugins.rst @@ -0,0 +1,95 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +================== + Plugin Interface +================== + +DNF plugin can be any Python class fulfilling the following criteria: + +1. it derives from :class:`dnf.Plugin`, +2. it is made available in a Python module stored in one of the :attr:`.Conf.pluginpath`, +3. provides its own :attr:`~.Plugin.name` and :meth:`~.Plugin.__init__`. + +When DNF CLI runs it loads the plugins found in the paths during the CLI's initialization. + +.. class:: dnf.Plugin + + The base class all DNF plugins must derive from. + + .. attribute:: name + + The plugin must set this class variable to a string identifying the plugin. The string can only contain alphanumeric characters and underscores. + + .. staticmethod:: read_config(conf) + + Read plugin's configuration into a `ConfigParser `_ compatible instance. `conf` is a :class:`.Conf` instance used to look up the plugin configuration directory. + + .. method:: __init__(base, cli) + + The plugin must override this. Called immediately after all the plugins are loaded. `base` is an instance of :class:`dnf.Base`. `cli` is an instance of :class:`dnf.cli.Cli` but can also be ``None`` in case DNF is running without a CLI (e.g. from an extension). + + .. method:: pre_config() + + This hook is called before configuring the repos. + + .. method:: config() + + This hook is called immediately after the CLI/extension is finished configuring DNF. The plugin can use this to tweak the global configuration or the repository configuration. + + .. method:: resolved() + + This hook is called immediately after the CLI has finished resolving a transaction. The plugin can use this to inspect the resolved but not yet executed :attr:`Base.transaction`. + + .. method:: sack() + + This hook is called immediately after :attr:`.Base.sack` is initialized with data from all the enabled repos. + + .. method:: pre_transaction() + + This hook is called just before transaction execution. This means after a successful transaction test. RPMDB is locked during that time. + + .. method:: transaction() + + This hook is called immediately after a successful transaction. + Plugins that were removed or obsoleted by the transaction will not run the transaction hook. + +.. method:: register_command(command_class) + + A class decorator for automatic command registration. + + Example of a plugin that provides a hello-world dnf command (the file must be placed in one of the :ref:`pluginpath ` directories:: + + import dnf + + @dnf.plugin.register_command + class HelloWorldCommand(dnf.cli.Command): + aliases = ('hello-world',) + summary = 'The example command' + + def run(self): + print('Hello world!') + + To run the command:: + + $ dnf hello-world + Hello world! + + +You may want to see the comparison with `yum plugin hook API`_. + +.. _yum plugin hook API: http://dnf.readthedocs.org/en/latest/api_vs_yum.html diff --git a/doc/api_queries.rst b/doc/api_queries.rst new file mode 100644 index 0000000..5c8804e --- /dev/null +++ b/doc/api_queries.rst @@ -0,0 +1,219 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +====================== + Queries and Subjects +====================== + +.. module:: dnf.query + +.. class:: Query + + Facilitates lookup of packages in a :class:`~dnf.sack.Sack` based on given criteria. Query actually does not consult the information in the :class:`~!dnf.sack.Sack` until it is evaluated. The evaluation happens either explicitly using :meth:`~dnf.query.Query.run` or by iterating the query, for example:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + base.fill_sack() + + q = base.sack.query() + i = q.installed() + i = i.filter(name='dnf') + + packages = list(i) # i only gets evaluated here + print("Installed dnf package:") + for pkg in packages: + print(pkg, pkg.reponame) + + or:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + base.read_all_repos() + base.fill_sack() + + q = base.sack.query() + a = q.available() + a = a.filter(name='dnf') + + print("Available dnf packages:") + for pkg in a: # a only gets evaluated here + print('{} in repo {}'.format(pkg, pkg.reponame)) + + + Notice that none of the filtering methods mutates the state of the :class:`~dnf.query.Query` but produces a new object instead. + + .. method:: available() + + Returns a new query limiting the original query to the packages available from the repositories. + + .. method:: difference(other) + + Returns a new query that contains only those results of original query that are not in the results of the ``other`` query. + + .. method:: downgrades() + + Returns a new query that limits the result only to packages that can be downgrade candidates to other packages in the current set. Downgrade candidate has the same name, lower EVR and the architecture of the original and the downgrade candidate are suitable for a downgrade. Specifically, the filtering does not take any steps to establish that the downgrade candidate can actually be installed. + + .. method:: duplicated() + + Returns a new query that limits the result only to installed packages of same name and different version. Optional argument exclude accepts a list of package names that will be excluded from result. + + .. method:: extras() + + Returns a new query that limits the result to installed packages that are not present in any repo + + .. method:: filter(\*\*kwargs) + + Returns a new query limiting the original query to the key/value pairs from `kwargs`. Multiple `kwargs` can be passed, the filter then works by applying all of them together (logical AND). Values inside of list or query are cumulative (logical OR). + + Allowed keys are: + + =============== ============== ====================================================== + key value type value meaning + =============== ============== ====================================================== + arch string match against packages' architecture + downgrades boolean see :meth:`downgrades`. Defaults to ``False``. + empty boolean ``True`` limits to empty result set. + Defaults to ``False``. + epoch integer match against packages' epoch. + file string match against packages' files + latest integer limit to all packages of number of versions + latest_per_arch integer see :meth:`latest`. + name string match against packages' names + release string match against packages' releases + reponame string match against packages repositories' names + version string match against packages' versions + obsoletes Query match packages that obsolete any package from query + pkg Query match against packages in query + pkg* list match against hawkey.Packages in list + provides string match against packages' provides + provides* Hawkey.Reldep match against packages' provides + requires string match against packages' requirements + requires* Hawkey.Reldep match against packages' requirements + sourcerpm string match against packages' source rpm + upgrades boolean see :meth:`upgrades`. Defaults to ``False``. + =============== ============== ====================================================== + + \* The key can also accept a list of values with specified type. + + The key name can be supplemented with a relation-specifying suffix, separated by ``__``: + + ========== =========== ========================================================== + key suffix value type semantics + ========== =========== ========================================================== + eq any exact match; This is the default if no suffix is specified. + glob string shell-style wildcard match + gt integer the actual value is greater than specified + gte integer the actual value is greater than or equal to specified + lt integer the actual value is less than specified + lte integer the actual value is less than or equal to specified + neq any does not equal + substr string the specified value is contained in the actual value + ========== =========== ========================================================== + + For example, the following creates a query that matches all packages containing the string "club" in its name:: + + q = base.sack.query().filter(name__substr="club") + + .. method:: filterm(\*\*kwargs) + + Similar to :meth:`dnf.query.Query.filter` but it modifies the query in place. + + .. method:: installed() + + Returns a new query that limits the result to the installed packages only. + + .. method:: intersection(other) + + Returns a new query where the result contains only packages that are found in both original and ``other`` queries. + + .. method:: latest(limit=1) + + Returns a new query that limits the result to ``limit`` highest version of packages per package + name and per architecture. In case the limit is negative number, it excludes the number of + latest versions according to limit. + + .. method:: run() + + Evaluate the query. Returns a list of matching :class:`dnf.package.Package` instances. + + .. method:: union(other) + + Returns a new query where the results of the ``other`` query are added to the results of the original query. + + .. method:: upgrades() + + Returns a new query that limits the result only to packages that can be upgrade candidates to at least one package in the current set. Upgrade candidate has the same name, higher EVR and the architectures of the original and the upgrade candidate package are suitable for an upgrade. Specifically, the filtering does not take any steps to establish that the upgrade candidate can actually be installed. + +.. module:: dnf.subject + +.. class:: Subject + + As :ref:`explained on the DNF man page `, users of the CLI are able to select packages for an operation in different formats, leaving seemingly arbitrary parts out of the spec and even using globbing characters. This class implements a common approach to parsing such input and produce a :class:`~dnf.query.Query` listing all packages matching the input or a :class:`~dnf.selector.Selector` selecting a single package that best matches the input given a transaction operation. + + .. method:: __init__(pkg_spec, ignore_case=False) + + Initialize the :class:`Subject` with `pkg_spec` input string with following :ref:`semantic `. If `ignore_case` is ``True`` ignore the case of characters in `pkg_spec`. + + .. method:: get_best_query(sack, with_nevra=True, with_provides=True, with_filenames=True, forms=None) + + Returns a :class:`~Query` yielding packages matching the given input. The result of the returned + query can be an empty set if no package matches. `sack` is the :class:`~dnf.sack.Sack` that the + returned query will search. `with_nevra` enable search by nevra, `with_provides` indicates + whether besides package names also packages' provides are searched for a match, and + `with_filenames` indicates whether besides package provides also packages' file provides are + searched for a match. `forms` is a list of pattern forms from `hawkey`_. Leaving the parameter + to ``None`` results in using a reasonable default list of forms. + + .. method:: get_best_selector(sack, forms=None, obsoletes=True, reponame=None, reports=False) + + Returns a :class:`~dnf.selector.Selector` that will select a single best-matching package when + used in a transaction operation. `sack` and `forms` have the same meaning as in + :meth:`get_best_query`. If ``obsoletes``, selector will also contain packages that obsoletes + requested packages (default is True). If ``reponame``, the selection of available packages is + limited to packages from that repo (default is None). Attribute ``reports`` is deprecated and + not used any more. Will be removed on 2018-01-01. + + .. method:: get_nevra_possibilities(self, forms=None) + + Returns generator for every possible nevra. Each possible nevra is represented by NEVRA class + (libdnf) that has attributes name, epoch, version, release, arch. `forms` have the same + meaning as in :meth:`get_best_query`. + + Example how to use it when it is known that string could be full NEVRA or NEVR:: + + #!/usr/bin/python3 + import dnf + import hawkey + + nevra_string = "dnf-0:4.2.2-2.fc30.noarch" + subject = dnf.subject.Subject(nevra_string) + possible_nevra = subject.get_nevra_possibilities( + forms=[hawkey.FORM_NEVRA, hawkey.FORM_NEVR]) + + for i,nevra in enumerate(possible_nevra): + print("Possibility {} for \"{}\":".format(i+1, nevra_string)) + print("name: {}".format(nevra.name)) + print("epoch: {}".format(nevra.epoch)) + print("version: {}".format(nevra.version)) + print("release: {}".format(nevra.release)) + print("architecture: {}".format(nevra.arch)) + print() diff --git a/doc/api_repos.rst b/doc/api_repos.rst new file mode 100644 index 0000000..b1c941f --- /dev/null +++ b/doc/api_repos.rst @@ -0,0 +1,159 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +======================== +Repository Configuration +======================== + + +.. class:: dnf.repodict.RepoDict + + Dictionary mapping repository IDs to the respective :class:`dnf.repo.Repo` objects. Derived from the standard :class:`dict`. + + .. method:: add(repo) + + Add a :class:`.Repo` to the repodict. + + .. method:: add_new_repo(repoid, conf, baseurl=(), \*\*kwargs) + + Initialize new :class:`.Repo` object and add it to the repodict. It requires ``repoid`` + (string), and :class:`dnf.conf.Conf` object. Optionally it can be specified baseurl (list), and + additionally key/value pairs from `kwargs` to set additional attribute of the :class:`.Repo` + object. Variables in provided values (``baseurl`` or ``kwargs``) will be automatically + substituted using conf.substitutions (like ``$releasever``, ...). It returns the :class:`.Repo` + object. + + .. method:: all() + + Return a list of all contained repositories. + + See the note at :meth:`get_matching` for special semantics of the returned object. + + .. method:: enable_debug_repos() + + Enable debug repos corresponding to already enabled binary repos. + + .. method:: enable_source_repos() + + Enable source repos corresponding to already enabled binary repos. + + .. method:: get_matching(key) + + Return a list of repositories which ID matches (possibly globbed) `key` or an empty list if no matching repository is found. + + The returned list acts as a `composite `_, transparently forwarding all method calls on itself to the contained repositories. The following thus disables all matching repos:: + + #!/usr/bin/python3 + import dnf + + base = dnf.Base() + base.read_all_repos() + base.fill_sack() + + repos = base.repos.get_matching('*-debuginfo') + repos.disable() + + .. method:: iter_enabled() + + Return an iterator over all enabled repos from the dict. + +.. module:: dnf.repo + +.. function:: repo_id_invalid(repo_id) + + Return index of the first invalid character in the `repo_id` or ``None`` if all characters are valid. This function is used to validate the section names in ``.repo`` files. + +.. class:: Metadata + + Represents the metadata files. + + .. attribute:: fresh + + Boolean. ``True`` if the metadata was loaded from the origin, ``False`` if it was loaded from the cache. + +.. class:: Repo + + Repository object used for metadata download. To configure it properly one has to give it either :attr:`metalink`, :attr:`mirrorlist` or :attr:`baseurl` parameter. + This object has attributes corresponding to all configuration options from both :ref:`"Repo Options" ` and :ref:`"Options for both [main] and Repo" ` sections. + + .. IMPORTANT:: + Some :class:`.Repo` attributes have non-native Python types. + Duck typing works (objects have identical behavior), but ``isinstance()`` + and ``type()`` doesn't work as expected because of different types. + For example :ref:`excludepkgs ` and :ref:`includepkgs ` return a ``VectorString``, which + is s SWIG wrapper on top of underlying libdnf C++ code. + + .. attribute:: id + + ID of this repo. This attribute is read-only. + + .. attribute:: metadata + + If :meth:`~load` has been called and succeeded, this contains the relevant :class:`Metadata` instance. + + .. attribute:: pkgdir + + Directory where packages of a remote repo will be downloaded to. By default it is derived from `cachedir` in :meth:`.__init__` but can be overridden by assigning to this attribute. + + .. attribute:: repofile + + The path to configuration file of the class. + + .. method:: __init__(name=None, parent_conf=None) + + Init repository with ID `name` and the `parent_conf` which an instance of :class:`dnf.conf.Conf` + holding main dnf configuration. Repository ID must be a string that can contain ASCII letters, digits, and `-_.:` characters. + + .. method:: add_metadata_type_to_download(metadata_type) + + Ask for additional repository metadata type to download. Given `metadata_type` is appended to the default metadata set when repository is downloaded. + + .. method:: disable() + + Disable the repository. Repositories are enabled by default. + + .. method:: dump() + + Print repository configuration, including inherited values. + + .. method:: enable() + + Enable the repository (the default). + + .. method:: get_http_headers() + + Return user defined http headers. Return tuple of strings. + + .. method:: get_metadata_content(metadata_type) + + Return contents of the repository's metadata file of the given metadata type. Contents of compressed files are returned uncompressed. + + .. method:: get_metadata_path(metadata_type) + + Return path to the file with downloaded repository metadata of given type. + + .. method:: load() + + Load the metadata of this repository. Will try to use local cache if possible and initiate and finish download if not. Returns ``True`` if fresh metadata has been downloaded and ``False`` if cache was used. Raises :exc:`dnf.exceptions.RepoError` if the repo metadata could not be obtained. + + .. method:: set_http_headers(headers) + + Set new user headers and rewrite existing ones. `headers` must be an instance of tuple of strings or list of strings. + + .. method:: set_progress_bar(progress) + + Set the download progress reporting object for this repo during :meth:`load`. `progress` must be an instance of :class:`dnf.callback.DownloadProgress`. diff --git a/doc/api_rpm.rst b/doc/api_rpm.rst new file mode 100644 index 0000000..c59ed67 --- /dev/null +++ b/doc/api_rpm.rst @@ -0,0 +1,32 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +=============== + RPM Interface +=============== + +.. module:: dnf.rpm + +.. function:: detect_releasever(installroot) + + Return the release name of the distribution of the tree rooted at `installroot`. The function uses information from RPMDB found under the tree. + + Returns ``None`` if the information can not be determined (perhaps because the tree has no RPMDB). + +.. function:: basearch(arch) + + Return base architecture of the processor based on `arch` type given. E.g. when `arch` i686 is given then the returned value will be i386. diff --git a/doc/api_sack.rst b/doc/api_sack.rst new file mode 100644 index 0000000..7971987 --- /dev/null +++ b/doc/api_sack.rst @@ -0,0 +1,34 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +====== + Sack +====== + +.. module:: dnf.sack + +.. class:: Sack + + The package sack. Contains metadata information about all known packages, installed and available. + + .. method:: query() + + Return a :class:`Query` for querying packages contained in this sack. + +.. function:: rpmdb_sack(base) + + Returns a new instance of sack containing only installed packages (@System repo). Useful to get list of the installed RPMs after transaction. diff --git a/doc/api_selector.rst b/doc/api_selector.rst new file mode 100644 index 0000000..45b639c --- /dev/null +++ b/doc/api_selector.rst @@ -0,0 +1,32 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +========== + Selector +========== + +.. class:: dnf.selector.Selector + + Specify a target of a transaction operation. + + .. method:: set + + Set content of Selector similarly like :meth:`dnf.query.Query.filter` + + .. method:: matches + + Returns packages that represents the content of Selector diff --git a/doc/api_transaction.rst b/doc/api_transaction.rst new file mode 100644 index 0000000..fb99745 --- /dev/null +++ b/doc/api_transaction.rst @@ -0,0 +1,37 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +=========== +Transaction +=========== + +.. module:: dnf.db.group + + +.. class:: RPMTransaction + + Instances of this class describe a resolved transaction set. The transaction object can be iterated for the contained :class:`items <.TransactionItem>`. + + The packaging requests from the contained items are later passed to the core package manager (RPM) as they are without further dependency resolving. If the set is not fit for an actual transaction (e.g. introduces conflicts, has inconsistent dependencies) RPM then by default refuses to proceed. + + .. attribute:: install_set + + Read-only property which contains set of :class:`Packages <.package.Package>` to be installed. + + .. attribute:: remove_set + + Read-only property which contains set of :class:`Packages <.package.Package>` to be removed. diff --git a/doc/api_vs_yum.rst b/doc/api_vs_yum.rst new file mode 100644 index 0000000..452b306 --- /dev/null +++ b/doc/api_vs_yum.rst @@ -0,0 +1,51 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +############################################# + Changes in the DNF hook API compared to YUM +############################################# + + +.. only :: html + + +This table provides what alternative hooks are available in DNF compared to +YUM. + +=========== ================= ============================== +Hook Number YUM hook DNF hook +----------- ----------------- ------------------------------ +``1`` ``config`` ``init`` +``2`` ``postconfig`` ``init`` +``3`` ``init`` ``init`` +``4`` ``predownload`` +``5`` ``postdownload`` +``6`` ``prereposetup`` +``7`` ``postreposetup`` ``sack`` +``8`` ``exclude`` ``resolved`` +``9`` ``preresolve`` +``10`` ``postresolve`` ``resolved but no re-resolve`` +``11`` ``pretrans`` ``pre_transaction`` +``12`` ``postrans`` ``transaction`` +``13`` ``close`` ``transaction`` +``14`` ``clean`` +=========== ================= ============================== + +Feel free to file an RFE_ for missing functionality if you need it. + +.. _RFE: https://github.com/rpm-software-management/dnf/wiki/Bug-Reporting#new-feature-request + diff --git a/doc/automatic.rst b/doc/automatic.rst new file mode 100644 index 0000000..d94d849 --- /dev/null +++ b/doc/automatic.rst @@ -0,0 +1,174 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +############### + DNF Automatic +############### + +========== + Synopsis +========== + +``dnf-automatic []`` + +============= + Description +============= + +Alternative CLI to ``dnf upgrade`` with specific facilities to make it suitable to be executed automatically and regularly from systemd timers, cron jobs and similar. + +The operation of the tool is usually controlled by the configuration file or the function-specific timer units (see below). The command only accepts a single optional argument pointing to the config file, and some control arguments intended for use by the services that back the timer units. If no configuration file is passed from the command line, ``/etc/dnf/automatic.conf`` is used. + +The tool synchronizes package metadata as needed and then checks for updates available for the given system and then either exits, downloads the packages or downloads and applies the packages. The outcome of the operation is then reported by a selected mechanism, for instance via the standard output, email or MOTD messages. + +The systemd timer unit ``dnf-automatic.timer`` will behave as the configuration file specifies (see below) with regard to whether to download and apply updates. Some other timer units are provided which override the configuration file with some standard behaviours: + +- dnf-automatic-notifyonly +- dnf-automatic-download +- dnf-automatic-install + +Regardless of the configuration file settings, the first will only notify of available updates. The second will download, but not install them. The third will download and install them. + +=================== + Run dnf-automatic +=================== + +You can select one that most closely fits your needs, customize ``/etc/dnf/automatic.conf`` for any specific behaviors, and enable the timer unit. + +For example: ``systemctl enable dnf-automatic-notifyonly.timer && systemctl start dnf-automatic-notifyonly.timer`` + +=========================== + Configuration File Format +=========================== + +The configuration file is separated into topical sections. + +---------------------- +``[commands]`` section +---------------------- + +Setting the mode of operation of the program. + +``apply_updates`` + boolean, default: False + + Whether packages comprising the available updates should be applied by ``dnf-automatic.timer``, i.e. installed via RPM. Implies ``download_updates``. Note that if this is set to ``False``, downloaded packages will be left in the cache till the next successful DNF transaction. Note that the other timer units override this setting. + +``download_updates`` + boolean, default: False + + Whether packages comprising the available updates should be downloaded by ``dnf-automatic.timer``. Note that the other timer units override this setting. + +.. _upgrade_type_automatic-label: + +``upgrade_type`` + either one of ``default``, ``security``, default: ``default`` + + What kind of upgrades to look at. ``default`` signals looking for all available updates, ``security`` only those with an issued security advisory. + +``random_sleep`` + time in seconds, default: 0 + + Maximal random delay before downloading. Note that, by default, the ``systemd`` timers also apply a random delay of up to 5 minutes. + +---------------------- +``[emitters]`` section +---------------------- + +Choosing how the results should be reported. + +.. _emit_via_automatic-label: + +``emit_via`` + list, default: ``email, stdio, motd`` + + List of emitters to report the results through. Available emitters are ``stdio`` to print the result to standard output, ``command`` to send the result to a custom command, ``command_email`` to send an email using a command, and ``email`` to send the report via email and ``motd`` sends the result to */etc/motd* file. + +``system_name`` + string, default: hostname of the given system + + How the system is called in the reports. + +--------------------- +``[command]`` section +--------------------- + +The command emitter configuration. Variables usable in format string arguments are ``body`` with the message body. + +``command_format`` + format string, default: ``cat`` + + The shell command to execute. + +``stdin_format`` + format string, default: ``{body}`` + + The data to pass to the command on stdin. + +--------------------------- +``[command_email]`` section +--------------------------- + +The command email emitter configuration. Variables usable in format string arguments are ``body`` with message body, ``subject`` with email subject, ``email_from`` with the "From:" address and ``email_to`` with a space-separated list of recipients. + +``command_format`` + format string, default: ``mail -s {subject} -r {email_from} {email_to}`` + + The shell command to execute. + +``stdin_format`` + format string, default: ``{body}`` + + The data to pass to the command on stdin. + +``email_from`` + string, default: ``root`` + + Message's "From:" address. + +``email_to`` + list, default: ``root`` + + List of recipients of the message. + +------------------- +``[email]`` section +------------------- + +The email emitter configuration. + +``email_from`` + string, default: ``root`` + + Message's "From:" address. + +``email_to`` + list, default: ``root`` + + List of recipients of the message. + +``email_host`` + string, default: ``localhost`` + + Hostname of the SMTP server used to send the message. + +------------------ +``[base]`` section +------------------ + +Can be used to override settings from DNF's main configuration file. See :doc:`conf_ref`. + diff --git a/doc/cli_vs_yum.rst b/doc/cli_vs_yum.rst new file mode 100644 index 0000000..5694586 --- /dev/null +++ b/doc/cli_vs_yum.rst @@ -0,0 +1,446 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +#################################### + Changes in DNF CLI compared to YUM +#################################### + +.. only :: html + + .. contents:: + +====================== + ``--skip-broken`` +====================== + +For install command: + +The ``--skip-broken`` option is an alias for ``--setopt=strict=0``. Both options could be used +with DNF to skip all unavailable packages or packages with broken dependencies given to DNF +without raising an error causing the whole operation to fail. This behavior can be set as default +in dnf.conf file. See :ref:`strict conf option `. + +For upgrade command: + +The semantics that were supposed to trigger in YUM with ``--skip-broken`` are now set for plain +``dnf update`` as a default. There is no need to use ``--skip-broken`` with the ``dnf upgrade`` +command. To use only the latest versions of packages in transactions, there is the ``--best`` +command line switch. + +======================================== +Update and Upgrade Commands are the Same +======================================== + +Invoking ``dnf update`` or ``dnf upgrade``, in all their forms, has the same +effect in DNF, with the latter being preferred. In YUM ``yum upgrade`` was +exactly like ``yum --obsoletes update``. + +================================================ + ``clean_requirements_on_remove`` on by default +================================================ + +The :ref:`clean_requirements_on_remove ` +switch is on by default in DNF. It can thus be confusing to compare the "remove" +operation results between DNF and YUM as by default DNF is often going to remove +more packages. + +=========================== + No ``resolvedep`` command +=========================== + +The YUM version of this command is maintained for legacy reasons only. The user +can just use ``dnf provides`` to find out what package provides a particular file. + +=========================== + No ``deplist`` command +=========================== + +An alternative to the YUM ``deplist`` command to find out dependencies of a package +is ``dnf repoquery --deplist`` using :ref:`repoquery command +`. + +.. note:: Alternatively there is a YUM compatibility support where + ``yum deplist`` is alias for ``dnf repoquery --deplist`` command + +==================================================== + Excludes and repo excludes apply to all operations +==================================================== + +YUM only respects excludes during installs and upgrades. DNF extends this to all +operations, among others erasing and listing. If you e.g. want to see a list of +all installed ``python-f*`` packages but not any of the Flask packages, the +following will work:: + + dnf -x '*flask*' list installed 'python-f*' + +========================================================== + YUM's conf directive ``includepkgs`` is just ``include`` +========================================================== + +``include`` directive name of [main] and Repo configuration is a more logical and better named counterpart of ``exclude`` in DNF. + +==================================================== +``dnf provides /bin/`` is not fully supported +==================================================== + +After `UsrMove `_ there's no +directory ``/bin`` on Fedora systems and no files get installed there, +``/bin`` is only a symlink created by the ``filesystem`` package to point to +``/usr/bin``. Resolving the symlinks to their real path would only give the +user a false sense that this works, while in fact provides requests using globs +such as:: + + dnf provides /b*/ + +will fail still (as they do in YUM now). To find what provides a particular +binary, use the actual path for binaries on Fedora:: + + dnf provides /usr/bin/ + +Also see related Fedora bugzillas `982947 +`_ and `982664 +`_. + +.. _skip_if_unavailable_default: + +==================================================== + ``skip_if_unavailable`` could be enabled by default +==================================================== + +In some distributions DNF is shipped with ``skip_if_unavailable=True`` in +the :ref:`DNF configuration file `. The reason for the change +is that third-party repositories can often be unavailable. Without this setting +in the relevant repository configuration file YUM immediately stops on a +repository synchronization error, confusing and bothering the user. + +See the related `Fedora bug 984483 `_. + +============================================================================ + ``overwrite_groups`` dropped, comps functions acting as if always disabled +============================================================================ + +This config option has been dropped. When DNF sees several groups with the same +group ID it merges the groups' contents together. + +=============================== + ``mirrorlist_expire`` dropped +=============================== + +To simplify things for the user, DNF uses ``metadata_expire`` for both expiring +metadata and the mirrorlist file (which is a kind of metadata itself). + +=========================================================== + metalink not recognized in the ``mirrorlist`` repo option +=========================================================== + +The following part of ``yum.conf(5)`` no longer applies for the ``mirrorlist`` +option: + + As a special hack if the mirrorlist URL contains the word "metalink" then + the value of mirrorlist is copied to metalink (if metalink is not set). + +The relevant repository configuration files have been fixed to respect this, see +the related `Fedora bug 948788 +`_. + +================================= + ``alwaysprompt`` dropped +================================= + +Unsupported to simplify the configuration. + +.. _upgrade_requirements_on_install_dropped: + +============================================= + ``upgrade_requirements_on_install`` dropped +============================================= + +Dropping this config option with blurry semantics simplifies the +configuration. DNF behaves as if this was disabled. If the user wanted to +upgrade everything to the latest version she'd simply use ``dnf upgrade``. + +======================================== + ``dnf history rollback`` check dropped +======================================== + +Since DNF tolerates the use of other package managers, it is possible that not +all changes to the RPMDB are stored in the history of transactions. Therefore, DNF +does not fail if such a situation is encountered and thus the ``force`` option +is not needed anymore. + +.. _allowerasing_instead_of_swap: + +============================================================ + Packages replacement without ``yum swap`` +============================================================ + +Time after time one needs to remove an installed package and replace it with a different one, providing the same capabilities while other packages depending on these capabilities stay installed. Without (transiently) breaking consistency of the package database this can be done by performing the remove and the install in one transaction. The common way to set up such a transaction in DNF is to use ``dnf shell`` or use the ``--allowerasing`` switch. + +E.g. say you want to replace ``A`` (providing ``P``) with B (also providing ``P``, conflicting with ``A``) without deleting ``C`` (which requires ``P``) in the process. Use:: + + dnf --allowerasing install B + +This command is equal to ``yum swap A B``. + +DNF provides swap command but only ``dnf swap A B`` syntax is supported + +======================================================== + Dependency processing details are not shown in the CLI +======================================================== + +During its depsolving phase, YUM outputs lines similar to:: + + ---> Package rubygem-rhc.noarch 0:1.16.9-1.fc19 will be an update + --> Processing Dependency: rubygem-net-ssh-multi >= 1.2.0 for package: rubygem-rhc-1.16.9-1.fc19.noarch + +DNF does not output information like this. The technical reason is that depsolver below DNF always considers all dependencies for update candidates and the output would be very long. Secondly, even in YUM this output gets confusing very quickly especially for large transactions and so does more harm than good. + +See the related `Fedora bug 1044999 +`_. + +=================================================================== +``dnf provides`` complies with the YUM documentation of the command +=================================================================== + +When one executes:: + + yum provides sandbox + +YUM applies extra heuristics to determine what the user meant by ``sandbox``, for instance it sequentially prepends entries from the ``PATH`` environment variable to it to see if it matches a file provided by some package. This is an undocumented behavior that DNF does not emulate. Just typically use:: + + dnf provides /usr/bin/sandbox + +or even:: + + dnf provides '*/sandbox' + +to obtain similar results. + +================== +Bandwidth limiting +================== + +DNF supports the ``throttle`` and ``bandwidth`` options familiar from YUM. +Contrary to YUM, when multiple downloads run simultaneously the total +downloading speed is throttled. This was not possible in YUM since +downloaders ran in different processes. + +=================================== + ``installonlypkgs`` config option +=================================== + +Compared to YUM, DNF appends list values from the ``installonlypkgs`` config option to DNF defaults, where YUM overwrites the defaults by option values. + +============================== + The usage of Delta RPM files +============================== + +The boolean ``deltarpm`` option controls whether delta RPM files are used. Compared to YUM, DNF does not support ``deltarpm_percentage`` and instead chooses some optimal value of DRPM/RPM ratio to decide whether using deltarpm makes sense in the given case. + +================================================ + Handling .srpm files and non-existent packages +================================================ + +DNF will terminate early with an error if a command is executed requesting an installing operation on a local ``.srpm`` file:: + + $ dnf install fdn-0.4.17-1.fc20.src.rpm tour-4-6.noarch.rpm + Error: Will not install a source rpm package (fdn-0.4.17-1.fc20.src). + +The same applies for package specifications that do not match any available package. + +YUM will only issue a warning in this case and continue installing the "tour" package. The rationale behind the result in DNF is that a program should terminate with an error if it can not fulfill the CLI command in its entirety. + +============================================================= + Promoting package to install to a package that obsoletes it +============================================================= + +DNF will not magically replace a request for installing package ``X`` to installing package ``Y`` if ``Y`` obsoletes ``X``. YUM does this if its ``obsoletes`` config option is enabled but the behavior is not properly documented and can be harmful. + +See the related `Fedora bug 1096506 +`_ and `guidelines for renaming and obsoleting packages in Fedora `_. + +==================================== +Behavior of ``--installroot`` option +==================================== + +DNF offers more predictable behavior of installroot. DNF handles the path differently +from the ``--config`` command-line option, where this path is always related to the host +system (YUM combines this path with installroot). Reposdir is also handled slightly +differently, if one path of the reposdirs exists inside of installroot, then +repos are strictly taken from installroot (YUM tests each path from reposdir +separately and use installroot path if existed). See the detailed description for +\-\ :ref:`-installroot ` option. + +======================================== +Different prompt after transaction table +======================================== + +DNF doesn't provide download functionality after displaying transaction table. It only asks user whether to continue with transaction or not. +If one wants to download packages, they can use the 'download' command. + +======================================== +List command shows all repo alternatives +======================================== + +DNF lists all packages from all repos, which means there can be duplicates package names (with different repo name). This is due to providing users +possibility to choose preferred repo. + + +=============================================== +``yum-langpacks`` subcommands have been removed +=============================================== +Translations became part of core DNF and it is no longer +necessary to manage individual language packs. + +Following sub-commands were removed: + +* langavailable +* langinstall +* langremove +* langlist +* langinfo + + +############################################### + Changes in DNF plugins compared to YUM plugins +############################################### + +====================================== ================================================================ =================================== +Original YUM tool DNF command/option Package +-------------------------------------- ---------------------------------------------------------------- ----------------------------------- +``yum check`` :ref:`dnf repoquery ` ``--unsatisfied`` ``dnf`` +``yum-langpacks`` ``dnf`` +``yum-plugin-aliases`` :ref:`dnf alias ` ``dnf`` +``yum-plugin-auto-update-debug-info`` option in ``debuginfo-install.conf`` ``dnf-plugins-core`` +``yum-plugin-changelog`` ``dnf-plugins-core`` +``yum-plugin-copr`` `dnf copr`_ ``dnf-plugins-core`` +``yum-plugin-fastestmirror`` ``fastestmirror`` option in `dnf.conf`_ ``dnf`` +``yum-plugin-fs-snapshot`` ``dnf-plugins-extras-snapper`` +``yum-plugin-local`` ``dnf-plugins-core`` +``yum-plugin-merge-conf`` ``dnf-plugins-extras-rpmconf`` +``yum-plugin-priorities`` ``priority`` option in `dnf.conf`_ ``dnf`` +``yum-plugin-remove-with-leaves`` :ref:`dnf autoremove ` ``dnf`` +``yum-plugin-show-leaves`` ``dnf-plugins-core`` +``yum-plugin-tmprepo`` ``--repofrompath`` option ``dnf`` +``yum-plugin-tsflags`` ``tsflags`` option in `dnf.conf`_ ``dnf`` +``yum-plugin-versionlock`` ``python3-dnf-plugin-versionlock`` +``yum-rhn-plugin`` ``dnf-plugin-spacewalk`` +====================================== ================================================================ =================================== + +Plugins that have not been ported yet: + +``yum-plugin-filter-data``, +``yum-plugin-keys``, +``yum-plugin-list-data``, +``yum-plugin-post-transaction-actions``, +``yum-plugin-protectbase``, +``yum-plugin-ps``, +``yum-plugin-puppetverify``, +``yum-plugin-refresh-updatesd``, +``yum-plugin-rpm-warm-cache``, +``yum-plugin-upgrade-helper``, +``yum-plugin-verify`` + +Feel free to file an RFE_ for missing functionality if you need it. + +################################################# + Changes in DNF plugins compared to YUM utilities +################################################# + +All ported YUM tools are now implemented as DNF plugins. + +========================= ================================================ ================================= +Original YUM tool New DNF command Package +------------------------- ------------------------------------------------ --------------------------------- +``debuginfo-install`` `dnf debuginfo-install`_ ``dnf-plugins-core`` +``find-repos-of-install`` `dnf list installed`_ ``dnf`` +``needs-restarting`` `dnf tracer`_ ``dnf-plugins-extras-tracer`` +``package-cleanup`` :ref:`dnf list `, + :ref:`dnf repoquery ` ``dnf``, ``dnf-plugins-core`` +``repoclosure`` `dnf repoclosure`_ ``dnf-plugins-extras-repoclosure`` +``repodiff`` `dnf repodiff`_ ``dnf-plugins-core`` +``repo-graph`` `dnf repograph`_ ``dnf-plugins-extras-repograph`` +``repomanage`` `dnf repomanage`_ ``dnf-plugins-extras-repomanage`` +``repoquery`` :ref:`dnf repoquery ` ``dnf`` +``reposync`` `dnf reposync`_ ``dnf-plugins-core`` +``repotrack`` `dnf download`_ --resolve --alldeps ``dnf-plugins-core`` +``yum-builddep`` `dnf builddep`_ ``dnf-plugins-core`` +``yum-config-manager`` `dnf config-manager`_ ``dnf-plugins-core`` +``yum-debug-dump`` `dnf debug-dump`_ ``dnf-plugins-extras-debug`` +``yum-debug-restore`` `dnf debug-restore`_ ``dnf-plugins-extras-debug`` +``yumdownloader`` `dnf download`_ ``dnf-plugins-core`` +========================= ================================================ ================================= + +Detailed table for ``package-cleanup`` replacement: + +================================== ===================================== +``package-cleanup --dupes`` ``dnf repoquery --duplicates`` +``package-cleanup --leaves`` ``dnf repoquery --unneeded`` +``package-cleanup --orphans`` ``dnf repoquery --extras`` +``package-cleanup --oldkernels`` ``dnf repoquery --installonly`` +``package-cleanup --problems`` ``dnf repoquery --unsatisfied`` +``package-cleanup --cleandupes`` ``dnf remove --duplicates`` +``package-cleanup --oldkernels`` ``dnf remove --oldinstallonly`` +================================== ===================================== + +============================= +yum-updateonboot and yum-cron +============================= + +DNF does not have a direct replacement of yum-updateonboot and yum-cron commands. +However, the similar result can be achieved by ``dnf automatic`` command (see :doc:`automatic`). + +You can either use the shortcut:: + + $ systemctl enable dnf-automatic-install.timer && systemctl start dnf-automatic-install.timer + +Or set ``apply_updates`` option of ``/etc/dnf/automatic.conf`` to True and use generic timer unit:: + + $ systemctl enable dnf-automatic.timer && systemctl start dnf-automatic.timer + +The timer in both cases is activated 1 hour after the system was booted up and then repetitively once every 24 hours. There is also a random delay on these timers set to 5 minutes. These values can be tweaked via ``dnf-automatic*.timer`` config files located in the ``/usr/lib/systemd/system/`` directory. + + +======================================= +Utilities that have not been ported yet +======================================= + +``repo-rss``, +``show-changed-rco``, +``show-installed``, +``verifytree``, +``yum-groups-manager`` + +Take a look at the FAQ_ about YUM to DNF migration. Feel free to file an RFE_ for missing functionality if you need it. + +.. _dnf debuginfo-install: http://dnf-plugins-core.readthedocs.org/en/latest/debuginfo-install.html +.. _dnf list installed: http://dnf.readthedocs.org/en/latest/command_ref.html +.. _dnf tracer: http://dnf-plugins-extras.readthedocs.org/en/latest/tracer.html +.. _dnf repoclosure: http://dnf-plugins-extras.readthedocs.org/en/latest/repoclosure.html +.. _dnf repodiff: http://dnf-plugins-core.readthedocs.org/en/latest/repodiff.html +.. _dnf repograph: http://dnf-plugins-extras.readthedocs.org/en/latest/repograph.html +.. _dnf repomanage: http://dnf-plugins-extras.readthedocs.org/en/latest/repomanage.html +.. _dnf reposync: http://dnf-plugins-core.readthedocs.org/en/latest/reposync.html +.. _dnf download: http://dnf-plugins-core.readthedocs.org/en/latest/download.html +.. _dnf builddep: http://dnf-plugins-core.readthedocs.org/en/latest/builddep.html +.. _dnf config-manager: http://dnf-plugins-core.readthedocs.org/en/latest/config_manager.html +.. _dnf debug-dump: http://dnf-plugins-extras.readthedocs.org/en/latest/debug.html +.. _dnf debug-restore: http://dnf-plugins-extras.readthedocs.org/en/latest/debug.html +.. _dnf copr: http://rpm-software-management.github.io/dnf-plugins-core/copr.html +.. _dnf.conf: http://dnf.readthedocs.org/en/latest/conf_ref.html +.. _RFE: https://github.com/rpm-software-management/dnf/wiki/Bug-Reporting#new-feature-request +.. _FAQ: http://dnf.readthedocs.io/en/latest/user_faq.html diff --git a/doc/command_ref.rst b/doc/command_ref.rst new file mode 100644 index 0000000..7141fc2 --- /dev/null +++ b/doc/command_ref.rst @@ -0,0 +1,1882 @@ +.. + Copyright (C) 2014-2018 Red Hat, Inc. + + This copyrighted material is made available to anyone wishing to use, + modify, copy, or redistribute it subject to the terms and conditions of + the GNU General Public License v.2, or (at your option) any later version. + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY expressed or implied, including the implied warranties of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + Public License for more details. You should have received a copy of the + GNU General Public License along with this program; if not, write to the + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. Any Red Hat trademarks that are incorporated in the + source code or documentation are not subject to the GNU General Public + License and may only be used or replicated with the express permission of + Red Hat, Inc. + +.. _command_ref-label: + +####################### + DNF Command Reference +####################### + +======== +Synopsis +======== + +``dnf [options] [...]`` + +=========== +Description +=========== + +.. _command_provides-label: + +`DNF`_ is the next upcoming major version of `YUM`_, a package manager for RPM-based Linux +distributions. It roughly maintains CLI compatibility with YUM and defines a strict API for +extensions and plugins. + +Plugins can modify or extend features of DNF or provide additional CLI commands on top of those +mentioned below. If you know the name of such a command (including commands mentioned below), you +may find/install the package which provides it using the appropriate virtual provide in the form of +``dnf-command()``, where ```` is the name of the command; e.g.``dnf install +'dnf-command(versionlock)'`` installs a ``versionlock`` plugin. This approach also applies to +specifying dependencies of packages that require a particular DNF command. + +Return values: + +* ``0`` : Operation was successful. +* ``1`` : An error occurred, which was handled by dnf. +* ``3`` : An unknown unhandled error occurred during operation. +* ``100``: See :ref:`check-update ` +* ``200``: There was a problem with acquiring or releasing of locks. + +Available commands: + +* :ref:`alias ` +* :ref:`autoremove ` +* :ref:`check ` +* :ref:`check-update ` +* :ref:`clean ` +* :ref:`deplist ` +* :ref:`distro-sync ` +* :ref:`downgrade ` +* :ref:`group ` +* :ref:`help ` +* :ref:`history ` +* :ref:`info ` +* :ref:`install ` +* :ref:`list ` +* :ref:`makecache ` +* :ref:`mark ` +* :ref:`module ` +* :ref:`provides ` +* :ref:`reinstall ` +* :ref:`remove ` +* :ref:`repoinfo ` +* :ref:`repolist ` +* :ref:`repoquery ` +* :ref:`repository-packages ` +* :ref:`search ` +* :ref:`shell ` +* :ref:`swap ` +* :ref:`updateinfo ` +* :ref:`upgrade ` +* :ref:`upgrade-minimal ` +* :ref:`upgrade-to ` + +Additional information: + +* :ref:`Options ` +* :ref:`Specifying Packages ` +* :ref:`Specifying Exact Versions of Packages ` +* :ref:`Specifying Provides ` +* :ref:`Specifying Groups ` +* :ref:`Specifying Transactions ` +* :ref:`Metadata Synchronization ` +* :ref:`Configuration Files Replacement Policy ` +* :ref:`Files ` +* :ref:`See Also ` + +.. _options-label: + +======= +Options +======= + +``-4`` + Resolve to IPv4 addresses only. + +``-6`` + Resolve to IPv6 addresses only. + +``--advisory=, --advisories=`` + Include packages corresponding to the advisory ID, Eg. FEDORA-2201-123. + Applicable for the install, repoquery, updateinfo and upgrade commands. + +``--allowerasing`` + Allow erasing of installed packages to resolve dependencies. This option could be used as an alternative to the ``yum swap`` command where packages to remove are not explicitly defined. + +``--assumeno`` + Automatically answer no for all questions. + +``-b, --best`` + Try the best available package versions in transactions. Specifically during :ref:`dnf upgrade `, which by default skips over updates that can not be installed for dependency reasons, the switch forces DNF to only consider the latest packages. When running into packages with broken dependencies, DNF will fail giving a reason why the latest version can not be installed. + +``--bugfix`` + Include packages that fix a bugfix issue. Applicable for the install, repoquery, updateinfo and + upgrade commands. + +``--bz=, --bzs=`` + Include packages that fix a Bugzilla ID, Eg. 123123. Applicable for the install, repoquery, + updateinfo and upgrade commands. + +``-C, --cacheonly`` + Run entirely from system cache, don't update the cache and use it even in case it is expired. + + DNF uses a separate cache for each user under which it executes. The cache for the root user is called the system cache. This switch allows a regular user read-only access to the system cache, which usually is more fresh than the user's and thus he does not have to wait for metadata sync. + +``--color=`` + Control whether color is used in terminal output. Valid values are ``always``, ``never`` and ``auto`` (default). + +``--comment=`` + Add a comment to the transaction history. + +``-c , --config=`` + Configuration file location. + +``--cve=, --cves=`` + Include packages that fix a CVE (Common Vulnerabilities and Exposures) ID + (http://cve.mitre.org/about/), Eg. CVE-2201-0123. Applicable for the install, repoquery, updateinfo, + and upgrade commands. + +``-d , --debuglevel=`` + Debugging output level. This is an integer value between 0 (no additional information strings) and 10 (shows all debugging information, even that not understandable to the user), default is 2. Deprecated, use ``-v`` instead. + +``--debugsolver`` + Dump data aiding in dependency solver debugging into ``./debugdata``. + +.. _disableexcludes-label: + +``--disableexcludes=[all|main|], --disableexcludepkgs=[all|main|]`` + + Disable the configuration file excludes. Takes one of the following three options: + + * ``all``, disables all configuration file excludes + * ``main``, disables excludes defined in the ``[main]`` section + * ``repoid``, disables excludes defined for the given repository + +``--disable, --set-disabled`` + Disable specified repositories (automatically saves). The option has to be used together with the + ``config-manager`` command (dnf-plugins-core). + +.. _disableplugin-label: + +``--disableplugin=`` + Disable the listed plugins specified by names or globs. + +``--disablerepo=`` + Disable specific repositories by an id or a glob. This option is mutually exclusive with ``--repo``. + +``--downloaddir=, --destdir=`` + Redirect downloaded packages to provided directory. The option has to be used together with the \-\ + :ref:`-downloadonly ` command line option, with the + ``download`` command (dnf-plugins-core) or with the ``system-upgrade`` command + (dnf-plugins-extras). + +.. _downloadonly-label: + +``--downloadonly`` + Download the resolved package set without performing any rpm transaction (install/upgrade/erase). + +``-e , --errorlevel=`` + Error output level. This is an integer value between 0 (no error output) and + 10 (shows all error messages), default is 3. Deprecated, use ``-v`` instead. + +``--enable, --set-enabled`` + Enable specified repositories (automatically saves). The option has to be used together with the + ``config-manager`` command (dnf-plugins-core). + +``--enableplugin=`` + Enable the listed plugins specified by names or globs. + +``--enablerepo=`` + Enable additional repositories by an id or a glob. + +``--enhancement`` + Include enhancement relevant packages. Applicable for the install, repoquery, updateinfo and + upgrade commands. + +.. _exclude_option-label: + +``-x , --exclude=`` + Exclude packages specified by ```` from the operation. + +``--excludepkgs=`` + Deprecated option. It was replaced by the \-\ :ref:`-exclude ` option. + +``--forcearch=`` + Force the use of an architecture. Any architecture can be specified. + However, use of an architecture not supported natively by your CPU will + require emulation of some kind. This is usually through QEMU. The behavior of ``--forcearch`` + can be configured by using the :ref:`arch ` and :ref:`ignorearch ` + configuration options with values ```` and ``True`` respectively. + +``-h, --help, --help-cmd`` + Show the help. + +.. _installroot-label: + +``--installroot=`` + Specifies an alternative installroot, relative to where all packages will be + installed. Think of this like doing ``chroot dnf``, except using + ``--installroot`` allows dnf to work before the chroot is created. It requires absolute path. + +- *cachedir*, *log files*, *releasever*, and *gpgkey* are taken from or + stored in the installroot. *Gpgkeys* are imported into the installroot from + a path relative to the host which can be specified in the repository section + of configuration files. + +- *configuration file* and :ref:`reposdir ` are searched inside the installroot first. If + they are not present, they are taken from the host system. + Note: When a path is specified within a command line argument + (``--config=`` in case of *configuration file* and + ``--setopt=reposdir=`` for *reposdir*) then this path is always + relative to the host with no exceptions. + +- *vars* are taken from the host system or installroot according to :ref:`reposdir ` + . When *reposdir* path is specified within a command line argument, vars are taken from the + installroot. When :ref:`varsdir ` paths are specified within a command line + argument (``--setopt=varsdir=``) then those path are always relative to the host with no + exceptions. + +- The *pluginpath* and *pluginconfpath* are relative to the host. + + Note: You may also want to use the command-line option + ``--releasever=`` when creating the installroot, otherwise the + *$releasever* value is taken from the rpmdb within the installroot (and thus + it is empty at the time of creation and the transaction will fail). If ``--releasever=/`` is used, the + releasever will be detected from the host (``/``) system. The new installroot path at the time of creation + does not contain the *repository*, *releasever* and *dnf.conf* files. + + On a modular system you may also want to use the + ``--setopt=module_platform_id=`` command-line option when creating the installroot, + otherwise the :ref:`module_platform_id ` value will be taken from the + ``/etc/os-release`` file within the installroot (and thus it will be empty at the time of creation, the modular + dependency could be unsatisfied and modules content could be excluded). + + Installroot examples: + + ``dnf --installroot= --releasever= install system-release`` + Permanently sets the ``releasever`` of the system in the + ```` directory to ````. + + ``dnf --installroot= --setopt=reposdir= --config /path/dnf.conf upgrade`` + Upgrades packages inside the installroot from a repository described by + ``--setopt`` using configuration from ``/path/dnf.conf``. + +``--newpackage`` + Include newpackage relevant packages. Applicable for the install, repoquery, updateinfo and + upgrade commands. + +``--noautoremove`` + Disable removal of dependencies that are no longer used. It sets + :ref:`clean_requirements_on_remove ` configuration option to ``False``. + +``--nobest`` + Set best option to ``False``, so that transactions are not limited to best candidates only. + +``--nodocs`` + Do not install documentation. Sets the rpm flag 'RPMTRANS_FLAG_NODOCS'. + +``--nogpgcheck`` + Skip checking GPG signatures on packages (if RPM policy allows). + +``--noplugins`` + Disable all plugins. + +.. _obsoletes_option-label: + +``--obsoletes`` + This option has an effect on an install/update, it enables + dnf's obsoletes processing logic. For more information see the + :ref:`obsoletes ` option. + + This option also displays capabilities that the package obsoletes when used together with the :ref:`repoquery ` command. + + Configuration Option: :ref:`obsoletes ` + +``-q, --quiet`` + In combination with a non-interactive command, shows just the relevant content. Suppresses messages notifying about the current state or actions of DNF. + +``-R , --randomwait=`` + Maximum command wait time. + +.. _refresh_command-label: + +``--refresh`` + Set metadata as expired before running the command. + +``--releasever=`` + Configure DNF as if the distribution release was ````. This can + affect cache paths, values in configuration files and mirrorlist URLs. + +.. _repofrompath_options-label: + + +``--repofrompath ,`` + Specify a repository to add to the repositories for this query. + This option can be used multiple times. + +- The repository label is specified by ````. +- The path or url to the repository is specified by ````. + It is the same path as a baseurl and can be also enriched by the + :ref:`repo variables `. +- The configuration for the repository can be adjusted using \-\ + :ref:`-setopt `\=.