././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/0000755000175000017500000000000015001473074012142 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/COPYRIGHT0000644000175000017500000000150615001472663013442 0ustar00caseycaseyCopyright 2012-2014 Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. Copyright 2014-2016 VeriSign, Inc. Copyright 2016-2025 Casey Deccio. DNSViz is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. DNSViz is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with DNSViz. If not, see . ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/LICENSE0000644000175000017500000004325415001472663013162 0ustar00caseycasey GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/MANIFEST.in0000644000175000017500000000106015001472663013700 0ustar00caseycaseyinclude COPYRIGHT LICENSE include requirements.txt include dnsviz/config.py.in exclude dnsviz/config.py include doc/COPYRIGHT include doc/Makefile include doc/src/*dot include doc/src/*svg recursive-include contrib * recursive-exclude external * # The following are for versions of setuptools that don't seem to pull in the # files that are listed in setup.py include README.md include doc/images/warning.png doc/images/error.png doc/images/logo*png include doc/dnsviz-graph.html recursive-include doc/man * recursive-include share * recursive-include tests * ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/PKG-INFO0000644000175000017500000000342515001473074013243 0ustar00caseycaseyMetadata-Version: 2.4 Name: dnsviz Version: 0.11.1 Summary: DNS analysis and visualization tool suite Home-page: https://github.com/dnsviz/dnsviz/ Author: Casey Deccio Author-email: casey@deccio.net License: LICENSE Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Console Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+) Classifier: Natural Language :: English Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: POSIX Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Topic :: Internet :: Name Service (DNS) Classifier: Topic :: Scientific/Engineering :: Visualization Classifier: Topic :: System :: Networking :: Monitoring Requires: pygraphviz (>=1.3) Requires: cryptography (>=36.0.0) Requires: dnspython (>=1.13) License-File: LICENSE License-File: COPYRIGHT Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: license Dynamic: license-file Dynamic: requires Dynamic: summary DNSViz is a tool suite for analysis and visualization of Domain Name System (DNS) behavior, including its security extensions (DNSSEC). This tool suite powers the Web-based analysis available at http://dnsviz.net/ . ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/README.md0000644000175000017500000004114015001472663013424 0ustar00caseycasey# ![DNSViz](doc/images/logo-220x100.png) ## Table of Contents * [Installation](#installation) * [Usage](#usage) * [Pre-Deployment DNS Testing](#pre-deployment-dns-testing) * [Docker Container](#docker-container) ## Description DNSViz is a tool suite for analysis and visualization of Domain Name System (DNS) behavior, including its security extensions (DNSSEC). This tool suite powers the Web-based analysis available at https://dnsviz.net/ ## Installation DNSViz is available in package repositories for popular operating systems, such as Debian, Ubuntu, Fedora, Gentoo, and FreeBSD. It is also available in the Extra Packages for Linux (EPEL) repository for Red Hat Enterprise Linux (RHEL) 8 and 9 and CentOS 8 and 9. (See [notes](#rhel-89-or-centos-stream-89-notes) for installation on RHEL and Centos.) In each case, it can be installed using the package installation commands typical for that operating system. DNSViz can also be installed on Mac OS X using Homebrew or MacPorts. The remainer of this section covers other methods of installation, including a list of [dependencies](#dependencies), installation to a [virtual environment](#installation-in-a-virtual-environment), and [notes for installing on RHEL 8 or 9 or CentOS Stream 8 or 9,](#rhel-89-or-centos-stream-89-notes)). Instructions for running in a Docker container are also available [later in this document](#docker-container). ### Dependencies * Python (2.7, 3.5 - 3.12) - https://www.python.org/ (Note that python 2.7 support will be removed in a future release.) * dnspython (1.13.0 or later) - https://www.dnspython.org/ * pygraphviz (1.3 or later) - https://pygraphviz.github.io/ * cryptography (36.0.0 or later) - https://cryptography.io/ Note that earlier versions of the software listed above might also work with DNSViz, but are not supported. For example, versions of cryptography as early as 2.6 seem to work. Also note that while DNSViz itself still works with Python 2.7, some versions of its software dependencies have moved on: pygraphviz 1.6 and dnspython 2.0.0 dropped support for Python 2.7. ### Optional Software * OpenSSL GOST Engine - https://github.com/gost-engine/engine With OpenSSL version 1.1.0 and later, the OpenSSL GOST Engine is necessary to validate DNSSEC signatures with algorithm 12 (GOST R 34.10-2001) and create digests of type 3 (GOST R 34.11-94). M2Crypto is also needed for GOST support. * M2Crypto - https://gitlab.com/m2crypto/m2crypto While almost all of the cryptgraphic support for DNSViz is handled with the cryptography Python module, support for algorithm 12 (GOST R 34.10-2001) digest type 3 (GOST R 34.11-94) require the OpenSSL GOST Engine. That engine must be loaded dynamically, and there is no support for that with cryptography. Thus, if you need to support algorithm 12 or digest type 3, you must also install M2Crypto. * ISC BIND - https://www.isc.org/bind/ When using DNSViz for [pre-deployment testing](#pre-deployment-dns-testing) by specifying zone files and/or alternate delegation information on the command line (i.e., with `-N`, `-x`, or `-D`), `named(8)` is invoked to serve one or more zones. ISC BIND is only needed in this case, and `named(8)` does not need to be running (i.e., as a server). Note that default AppArmor policies for Debian are known to cause issues when invoking `named(8)` from DNSViz for pre-deployment testing. AppArmor can be temporarily disabled for `named(8)` with the following: ``` $ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.named ``` After pre-deployment testing is finished, AppArmor for `named(8)` can be re-enabled with the following: ``` $ sudo apparmor_parser /etc/apparmor.d/usr.sbin.named ``` ### Installation in a Virtual Environment To install DNSViz to a virtual environment, first create and activate a virtual environment, and install the dependencies: ``` $ virtualenv ~/myenv $ source ~/myenv/bin/activate (myenv) $ pip install -r requirements.txt ``` Note that this installs the dependencies that are Python packages, but some of these packages have non-Python dependencies, such as Graphviz (required for pygraphviz) and OpenSSL (required for cryptography), that are not installed automatically. Next download and install DNSViz from the Python Package Index (PyPI): ``` (myenv) $ pip install dnsviz ``` or locally, from a downloaded or cloned copy of DNSViz: ``` (myenv) $ pip install . ``` ### RHEL 8/9 or CentOS Stream 8/9 Notes DNSViz can be installed on RHEL 8 or 9 or CentOS Stream 8 or 9 from the EPEL repository. Follow the instructions in this section to enable EPEL. *RHEL 8 and 9 only*: Enable CodeReady Linux Builder and Extra Packages for Enterprise Linux (EPEL) with following: ``` $ sudo subscription-manager repos --enable codeready-builder-for-rhel-$(vers)-$(arch)-rpms $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$(vers).noarch.rpm ``` (where `$(vers)` refers to version, either `8` or `9`, and `$(arch)` refers to the architecture, e.g., `x86_64` or `aarch64`. If you are unsure, run `sudo subscription-manager repos --list` to show available options.) *CentOS Stream 8 or 9 only*: Enable PowerTools or CodeReady Linux Builder and EPEL with the following: ``` $ sudo dnf config-manager --set-enabled $(tool) $ sudo dnf install epel-release ``` (where `$(tool)` refers to the tool, either `powertools` for CentOS Stream 8 or `crb` for CentOS Stream 9.) For both RHEL 8 or 9 *and* CentOS Stream 8 or 9, once EPEL is enabled, install DNSViz using `dnf`: ``` $ sudo dnf install dnsviz ``` ## Usage DNSViz is invoked using the `dnsviz` command-line utility. `dnsviz` itself uses several subcommands: `probe`, `grok`, `graph`, `print`, and `query`. See the man pages associated with each subcommand, in the form of "dnsviz- (1)" (e.g., "man dnsviz-probe") for more detailed documentation and usage. ### dnsviz probe `dnsviz probe` takes one or more domain names as input and performs a series of queries to either recursive (default) or authoritative DNS servers, the results of which are serialized into JSON format. #### Examples Analyze the domain name example.com using your configured DNS resolvers (i.e., in `/etc/resolv.conf`) and store the queries and responses in the file named "example.com.json": ``` $ dnsviz probe example.com > example.com.json ``` Same thing: ``` $ dnsviz probe -o example.com.json example.com ``` Analyze the domain name example.com by querying its authoritative servers directly: ``` $ dnsviz probe -A -o example.com.json example.com ``` Analyze the domain name example.com by querying explicitly-defined authoritative servers, rather than learning the servers through referrals from the IANA root servers: ``` $ dnsviz probe -A \ -x example.com:a.iana-servers.org=199.43.132.53,a.iana-servers.org=[2001:500:8c::53] \ -x example.com:b.iana-servers.org=199.43.133.53,b.iana-servers.org=[2001:500:8d::53] \ -o example.com.json example.com ``` Same, but have `dnsviz probe` resolve the names: ``` $ dnsviz probe -A \ -x example.com:a.iana-servers.org,b.iana-servers.org \ -o example.com.json example.com ``` Analyze the domain name example.com and its entire ancestry by querying authoritative servers and following delegations, starting at the root: ``` $ dnsviz probe -A -a . -o example.com.json example.com ``` Analyze multiple names in parallel (four threads) using explicit recursive resolvers (replace *192.0.1.2* and *2001:db8::1* with legitimate resolver addresses): ``` $ dnsviz probe -s 192.0.2.1,[2001:db8::1] -t 4 -o multiple.json \ example.com sandia.gov verisignlabs.com dnsviz.net ``` ### dnsviz grok `dnsviz grok` takes serialized query results in JSON format (i.e., output from `dnsviz probe`) as input and assesses specified domain names based on their corresponding content in the input. The output is also serialized into JSON format. #### Examples Process the query/response output produced by `dnsviz probe`, and store the serialized results in a file named "example.com-chk.json": ``` $ dnsviz grok < example.com.json > example.com-chk.json ``` Same thing: ``` $ dnsviz grok -r example.com.json -o example.com-chk.json example.com ``` Show only info-level information: descriptions, statuses, warnings, and errors: ``` $ dnsviz grok -l info -r example.com.json -o example.com-chk.json ``` Show descriptions only if there are related warnings or errors: ``` $ dnsviz grok -l warning -r example.com.json -o example.com-chk.json ``` Show descriptions only if there are related errors: ``` $ dnsviz grok -l error -r example.com.json -o example.com-chk.json ``` Use root key as DNSSEC trust anchor, to additionally indicate authentication status of responses: ``` $ dig +noall +answer . dnskey | awk '$5 % 2 { print $0 }' > tk.txt $ dnsviz grok -l info -t tk.txt -r example.com.json -o example.com-chk.json ``` Pipe `dnsviz probe` output directly to `dnsviz grok`: ``` $ dnsviz probe example.com | \ dnsviz grok -l info -o example.com-chk.json ``` Same thing, but save the raw output (for re-use) along the way: ``` $ dnsviz probe example.com | tee example.com.json | \ dnsviz grok -l info -o example.com-chk.json ``` Assess multiple names at once with error level: ``` $ dnsviz grok -l error -r multiple.json -o example.com-chk.json ``` ### dnsviz graph `dnsviz graph` takes serialized query results in JSON format (i.e., output from `dnsviz probe`) as input and assesses specified domain names based on their corresponding content in the input. The output is an image file, a `dot` (directed graph) file, or an HTML file, depending on the options passed. #### Examples Process the query/response output produced by `dnsviz probe`, and produce a graph visually representing the results in a png file named "example.com.png". ``` $ dnsviz graph -Tpng < example.com.json > example.com.png ``` Same thing: ``` $ dnsviz graph -Tpng -o example.com.png example.com < example.com.json ``` Same thing, but produce interactive HTML format: interactive HTML output in a file named "example.com.html": ``` $ dnsviz graph -Thtml < example.com.json > example.com.html ``` Same thing (filename is derived from domain name and output format): ``` $ dnsviz graph -Thtml -O -r example.com.json ``` Use alternate DNSSEC trust anchor: ``` $ dig +noall +answer example.com dnskey | awk '$5 % 2 { print $0 }' > tk.txt $ dnsviz graph -Thtml -O -r example.com.json -t tk.txt ``` Pipe `dnsviz probe` output directly to `dnsviz graph`: ``` $ dnsviz probe example.com | \ dnsviz graph -Thtml -O ``` Same thing, but save the raw output (for re-use) along the way: ``` $ dnsviz probe example.com | tee example.com.json | \ dnsviz graph -Thtml -O ``` Process analysis of multiple domain names, creating an image for each name processed: ``` $ dnsviz graph -Thtml -O -r multiple.json ``` Process analysis of multiple domain names, creating a single image for all names. ``` $ dnsviz graph -Thtml -r multiple.json > multiple.html ``` ### dnsviz print `dnsviz print` takes serialized query results in JSON format (i.e., output from `dnsviz probe`) as input and assesses specified domain names based on their corresponding content in the input. The output is textual output suitable for file or terminal display. #### Examples Process the query/response output produced by `dnsviz probe`, and output the results to the terminal: ``` $ dnsviz print < example.com.json ``` Use alternate DNSSEC trust anchor: ``` $ dig +noall +answer example.com dnskey | awk '$5 % 2 { print $0 }' > tk.txt $ dnsviz print -r example.com.json -t tk.txt ``` Pipe `dnsviz probe` output directly to `dnsviz print`: ``` $ dnsviz probe example.com | \ dnsviz print ``` Same thing, but save the raw output (for re-use) along the way: ``` $ dnsviz probe example.com | tee example.com.json | \ dnsviz print ``` ### dnsviz query `dnsviz query` is a wrapper that couples the functionality of `dnsviz probe` and `dnsviz print` into a tool with minimal dig-like usage, used to make analysis queries and return the textual output to terminal or file output in one go. #### Examples Analyze the domain name example.com using the first of your configured DNS resolvers (i.e., in `/etc/resolv.conf`): ``` $ dnsviz query example.com ``` Same, but specify an alternate trust anchor: ``` $ dnsviz query +trusted-key=tk.txt example.com ``` Analyze example.com through the recursive resolver at 192.0.2.1: ``` $ dnsviz query @192.0.2.1 +trusted-key=tk.txt example.com ``` ## Pre-Deployment DNS Testing The examples in this section demonstrate usage of DNSViz for pre-deployment testing. ### Pre-Delegation Testing The following examples involve issuing diagnostic queries for a zone before it is ever delegated. Issue queries against a zone file on the local system (`example.com.zone`). `named(8)` is invoked to serve the file locally: ``` $ dnsviz probe -A -x example.com+:example.com.zone example.com ``` (Note the use of "+", which designates that the parent servers should not be queried for DS records.) Issue queries to a server that is serving the zone: ``` $ dnsviz probe -A -x example.com+:192.0.2.1 example.com ``` (Note that this server doesn't need to be a server in the NS RRset for example.com.) Issue queries to the servers in the authoritative NS RRset, specified by name and/or address: ``` $ dnsviz probe -A \ -x example.com+:ns1.example.com=192.0.2.1 \ -x example.com+:ns2.example.com=192.0.2.1,ns2.example.com=[2001:db8::1] \ example.com ``` Specify the names and addresses corresponding to the future delegation NS records and (as appropriate) A/AAAA glue records in the parent zone (com): ``` $ dnsviz probe -A \ -N example.com:ns1.example.com=192.0.2.1 \ -N example.com:ns2.example.com=192.0.2.1,ns2.example.com=[2001:db8::1] \ example.com ``` Also supply future DS records: ``` $ dnsviz probe -A \ -N example.com:ns1.example.com=192.0.2.1 \ -N example.com:ns2.example.com=192.0.2.1,ns2.example.com=[2001:db8::1] \ -D example.com:dsset-example.com. \ example.com ``` ### Pre-Deployment Testing of Authoritative Zone Changes The following examples involve issuing diagnostic queries for a delegated zone before changes are deployed. Issue diagnostic queries for a new zone file that has been created but not yet been deployed (i.e., with changes to DNSKEY or other records): ``` $ dnsviz probe -A -x example.com:example.com.zone example.com ``` (Note the absence of "+", which designates that the parent servers will be queried for DS records.) Issue queries to a server that is serving the new version of the zone: ``` $ dnsviz probe -A -x example.com:192.0.2.1 example.com ``` (Note that this server doesn't need to be a server in the NS RRset for example.com.) ### Pre-Deployment Testing of Delegation Changes The following examples involve issuing diagnostic queries for a delegated zone before changes are deployed to the delegation, glue, or DS records for that zone. Specify the names and addresses corresponding to the new delegation NS records and (as appropriate) A/AAAA glue records in the parent zone (com): ``` $ dnsviz probe -A \ -N example.com:ns1.example.com=192.0.2.1 \ -N example.com:ns2.example.com=192.0.2.1,ns2.example.com=[2001:db8::1] \ example.com ``` Also supply the replacement DS records: ``` $ dnsviz probe -A \ -N example.com:ns1.example.com=192.0.2.1 \ -N example.com:ns2.example.com=192.0.2.1,ns2.example.com=[2001:db8::1] \ -D example.com:dsset-example.com. \ example.com ``` ## Docker Container A ready-to-use docker container is available for use. ``` docker pull dnsviz/dnsviz ``` This section only covers Docker-related examples, for more information see the [Usage](#usage) section. ### Simple Usage ``` $ docker run dnsviz/dnsviz help $ docker run dnsviz/dnsviz query example.com ``` ### Working with Files It might be useful to mount a local working directory into the container, especially when combining multiple commands or working with zone files. ``` $ docker run -v "$PWD:/data:rw" dnsviz/dnsviz probe dnsviz.net > probe.json $ docker run -v "$PWD:/data:rw" dnsviz/dnsviz graph -r probe.json -T png -O ``` ### Using a Host Network When running authoritative queries, a host network is recommended. ``` $ docker run --network host dnsviz/dnsviz probe -4 -A example.com > example.json ``` Otherwise, you're likely to encounter the following error: `dnsviz.query.SourceAddressBindError: Unable to bind to local address (EADDRNOTAVAIL)` ### Interactive Mode When performing complex analyses, where you need to combine multiple DNSViz commands, use bash redirection, etc., it might be useful to run the container interactively: ``` $ docker run --network host -v "$PWD:/data:rw" --entrypoint /bin/sh -ti dnsviz/dnsviz /data # dnsviz --help ``` ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/bin/0000755000175000017500000000000015001473074012712 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/bin/dnsviz0000755000175000017500000001145615001472663014167 0ustar00caseycasey#!/usr/bin/env python3 # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2015-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import getopt import importlib import logging import os import sys CMD_DIR1 = 'dnsviz' CMD_DIR2 = 'commands' logging.basicConfig(level=logging.WARNING, format='%(message)s') logger = logging.getLogger() def check_deps(): # check dnspython dependency try: import dns.name except ImportError: sys.stderr.write('Error: dnspython does not appear to be installed\n') sys.exit(1) def usage(err=None): if err is not None: err += '\n\n' else: err = '' sys.stderr.write('''%sUsage: dnsviz [options] [args] Options: -v - Show version information and exit. -p - Add path to the python path. Commands: probe - Issue diagnostic DNS queries. grok - Assess diagnostic DNS queries. graph - Graph the assessment of diagnostic DNS queries. print - Process diagnostic DNS queries to textual output. query - Assess a DNS query. help [] - Show usage for a command. ''' % (err)) def main(): check_deps() try: opts, args = getopt.getopt(sys.argv[1:], 'vp:') except getopt.GetoptError as e: usage(str(e) + '\n') sys.exit(1) opts = dict(opts) if '-p' in opts: sys.path.insert(0, opts['-p']) # try importing the main module, to make sure it is reachable with the # current path. try: dnsviz = importlib.import_module('dnsviz') except ImportError: sys.stderr.write('Could not find the dnsviz module.\n') sys.exit(1) if '-v' in opts: try: vers = dnsviz.dist.version except AttributeError: sys.stderr.write('Version information not available.\n') sys.exit(1) sys.stdout.write(dnsviz.dist.version + '\n') sys.exit(0) if len(args) < 1: usage() sys.exit(0) if args[0] == 'help': if len(args) < 2: usage() sys.exit(0) command = args[1] else: command = args[0] # now try importing just the commands module to make sure # dnsviz is properly reachable with the current path importlib.import_module('%s.%s' % (CMD_DIR1, CMD_DIR2)) # now try importing the module for the actual command try: mod = importlib.import_module('%s.%s.%s' % (CMD_DIR1, CMD_DIR2, command)) except ImportError: # See if the filename corresponding to the module we were trying to # import was found in the stack. If so, then the error is with # importing something from inside that file. Otherwise, it was that we # couldn't find the file corresponding to the command, so it was thus # an illegitimate command. exc_frame = sys.exc_info()[2] frame1 = exc_frame.tb_next.tb_next found_file = False while frame1 is not None: filename = frame1.tb_frame.f_code.co_filename cmd_dir2, filename = os.path.split(filename) if filename.endswith('.py'): filename = filename[:-3] cmd_dir1, cmd_dir2 = os.path.split(cmd_dir2) cmd_dir0, cmd_dir1 = os.path.split(cmd_dir1) if cmd_dir1 == CMD_DIR1 and \ cmd_dir2 == CMD_DIR2 and \ filename == command: found_file = True break frame1 = frame1.tb_next if found_file: raise else: sys.stderr.write('Invalid command: %s\n' % command) sys.exit(1) if args[0] == 'help': if hasattr(mod, 'build_helper'): helper = mod.build_helper(logger, sys.argv[0], command) helper.parser.print_help() elif hasattr(mod, 'usage'): mod.usage() else: sys.stderr.write('No help available for command: %s\n' % command) else: mod.main(args) if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/contrib/0000755000175000017500000000000015001473074013602 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/digviz0000755000175000017500000007242115001472663015035 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import datetime import errno import socket import sys # python3/python2 dual compatibility try: import urllib.parse except ImportError: import urlparse else: urlparse = urllib.parse import dns.flags, dns.exception, dns.name, dns.opcode, dns.rdataclass, dns.rdatatype from dnsviz.ipaddr import IPAddr from dnsviz import query as Q from dnsviz import resolver as Resolver from dnsviz import transport from dnsviz.util import get_trusted_keys tm = transport.DNSQueryTransportManager() class CommandLineException(Exception): pass class SemanticException(Exception): pass def _get_nameservers_for_name(addr): nameservers = [] try: addrinfo = socket.getaddrinfo(addr, 53, 0, 0, socket.IPPROTO_TCP) except socket.gaierror: raise SemanticException('Unable to resolve "%s"' % addr) else: for item in addrinfo: nameservers.append(IPAddr(item[4][0])) return nameservers class DigCommandLineQuery: def __init__(self, qname, rdtype, rdclass): self.qname = qname self.rdtype = rdtype self.rdclass = rdclass self.nameservers = [] self.query_options = [] # default query options self.handlers = [Q.UseTCPOnTCFlagHandler()] self.flags = dns.flags.RD | dns.flags.AD self.edns = 0 self.edns_max_udp_payload = 4096 self.edns_flags = 0 self.edns_options = [] self.tcp = False self.query_timeout = 5.0 self.max_attempts = 3 self.lifetime = None self.show_additional = True self.show_answer = True self.show_authority = True self.show_cmd = True self.show_comments = True self.show_question = True self.show_stats = True self.show_class = True self.show_ip_port = False self.multiline = False self.show_rr_comments = False self.short = False self.trusted_keys = () self.show_ttl = True self.lg_url = None self.lg_factory = None def process_query_options(self, global_options): for arg in global_options + self.query_options: if arg in ('+aaflag', '+aaonly', '+aa',): self.flags |= dns.flags.AA elif arg in ('+noaaflag', '+noaaonly', '+noaa'): self.flags &= ~dns.flags.AA elif arg == '+additional': self.show_additional = True elif arg == '+noadditional': self.show_additional = False elif arg in ('+adflag', '+ad'): self.flags |= dns.flags.AD elif arg in ('+noadflag', '+noad'): self.flags &= ~dns.flags.AD elif arg == '+all': self.show_additional = True self.show_answer = True self.show_authority = True self.show_cmd = True self.show_comments = True self.show_question = True self.show_stats = True elif arg == '+noall': self.show_additional = False self.show_answer = False self.show_authority = False self.show_cmd = False self.show_comments = False self.show_question = False self.show_stats = False elif arg == '+answer': self.show_answer = True elif arg == '+noanswer': self.show_answer = False elif arg == '+authority': self.show_authority = True elif arg == '+noauthority': self.show_authority = False #TODO +[no]besteffort elif arg.startswith('+bufsize') and \ (len(arg) <= 8 or arg[8] == '='): if self.edns < 0: self.edns = 0 try: opt, arg = arg.split('=') self.edns_max_udp_payload = int(arg) if self.edns_max_udp_payload < 0 or self.edns_max_udp_payload > 65535: raise ValueError() except ValueError: raise CommandLineException('+bufsize requires an integer argument between 0 and 65535') elif arg in ('+cdflag', '+cd'): self.flags |= dns.flags.CD elif arg in ('+nocdflag', '+nocd'): self.flags &= ~dns.flags.CD elif arg == '+cl': self.show_class = True elif arg == '+nocl': self.show_class = False elif arg == '+cmd': self.show_cmd = True elif arg == '+nocmd': self.show_cmd = False elif arg == '+comments': self.show_comments = True elif arg == '+nocomments': self.show_comments = False #TODO +[no]crypto #TODO +[no]defname elif arg == '+dnssec': if self.edns < 0: self.edns = 0 self.edns_flags |= dns.flags.DO elif arg == '+nodnssec': self.edns_flags &= ~dns.flags.DO #TODO +domain=somename elif arg.startswith('+edns') and \ (len(arg) <= 5 or arg[5] == '='): try: opt, arg = arg.split('=') self.edns = int(arg) except ValueError: raise CommandLineException('+edns requires an integer argument greater than or equal to 0') elif arg == '+noedns': self.edns = -1 #TODO +[no]expire #TODO +[no]fail elif arg == '+identify': self.show_ip_port = True elif arg == '+noidentify': self.show_ip_port = False elif arg == '+ignore': self.handlers = [] elif arg == '+noignore': self.handlers = [Q.UseTCPOnTCFlagHandler()] #TODO +[no]keepopen elif arg == '+multiline': self.multiline = True elif arg == '+nomultiline': self.multiline = False #TODO +ndots=D elif arg == '+nsid': if self.edns < 0: self.edns = 0 if not [x for x in filter(lambda x: x.otype == dns.edns.NSID, self.edns_options)]: self.edns_options.append(dns.edns.GenericOption(dns.edns.NSID, b'')) elif arg == '+nonsid': l = [x for x in filter(lambda x: x.otype == dns.edns.NSID, self.edns_options)] self.edns_options.remove(dns.edns.GenericOption(dns.edns.NSID, b'')) #TODO +[no]nssearch #TODO +[no]onesoa #TODO +[no]qr elif arg == '+question': self.show_question = True elif arg == '+noquestion': self.show_question = False elif arg in ('+recurse', '+rec'): self.flags |= dns.flags.RD elif arg in ('+norecurse', '+norec'): self.flags &= ~dns.flags.RD elif arg.startswith('+retry') and \ (len(arg) <= 6 or arg[6] == '='): try: opt, arg = arg.split('=') self.max_attempts = int(arg) + 1 if self.max_attempts < 1: self.max_attempts = 1 except ValueError: raise CommandLineException('+retry requires an integer argument') elif arg == '+rrcomments': self.show_rr_comments = True elif arg == '+norrcomments': self.show_rr_comments = False #TODO +[no]search elif arg == '+short': self.short = True elif arg == '+noshort': self.short = False #TODO +[no]showsearch #TODO +[no]sigchase #TODO +[no]sit[=####] #TODO +split=W elif arg == '+stats': self.show_stats = True elif arg == '+nostats': self.show_stats = False #TODO +[no]subnet=addr/prefix elif arg in ('+tcp', '+vc'): self.tcp = True elif arg in ('+notcp', '+novc'): self.tcp = False elif arg.startswith('+timeout') and \ (len(arg) <= 8 or arg[8] == '='): try: opt, arg = arg.split('=') self.query_timeout = float(arg) if self.query_timeout < 1.0: self.query_timeout = 1.0 except ValueError: raise CommandLineException('+timeout requires a numerical argument') #TODO +[no]topdown #TODO +[no]trace #TODO +[no]tries elif arg.startswith('+tries') and \ (len(arg) <= 6 or arg[6] == '='): try: opt, arg = arg.split('=') self.max_attempts = int(arg) if self.max_attempts < 1: self.max_attempts = 1 except ValueError: raise CommandLineException('+tries requires an integer argument') elif arg.startswith('+trusted-key') and \ (len(arg) <= 12 or arg[12] == '='): try: opt, arg = arg.split('=') if not arg: raise ValueError() except ValueError: raise CommandLineException('+trusted-key requires a filename argument.') else: try: tk_str = open(arg).read() except IOError as e: raise CommandLineException('%s: "%s"' % (e.strerror, arg)) try: self.trusted_keys = get_trusted_keys(tk_str) except dns.exception.DNSException: raise SemanticException('There was an error parsing the trusted keys file: "%s"' % arg) elif arg in ('+ttlid', '+ttl'): self.show_ttl = True elif arg in ('+nottlid', '+nottl'): self.show_ttl = False elif arg.startswith('+lg') and \ (len(arg) <= 3 or arg[3] == '='): try: opt, arg = arg.split('=') if not arg: raise ValueError() except ValueError: raise CommandLineException('+lg requires a URL argument.') else: self.lg_url = arg else: raise CommandLineException('Option "%s" not recognized.' % arg) def process_nameservers(self, nameservers, use_ipv4, use_ipv6): processed_nameservers = [] for addr in self.nameservers: processed_nameservers.extend(_get_nameservers_for_name(addr)) if not use_ipv4: processed_nameservers = [x for x in processed_nameservers if x.version != 4] if not use_ipv6: processed_nameservers = [x for x in processed_nameservers if x.version != 6] self.nameservers = nameservers + processed_nameservers def process_looking_glass(self, looking_glass_cache, insecure): if self.lg_url is None: return if self.lg_url not in looking_glass_cache: # check that version is >= 2.7.9 if HTTPS is requested if self.lg_url.startswith('https'): vers0, vers1, vers2 = sys.version_info[:3] if (2, 7, 9) > (vers0, vers1, vers2): sys.stderr.write('python version >= 2.7.9 is required to use a DNS looking glass with HTTPS.\n') sys.exit(1) url = urlparse.urlparse(self.lg_url) if url.scheme in ('http', 'https'): fact = transport.DNSQueryTransportHandlerHTTPFactory(self.lg_url, insecure=insecure) elif url.scheme == 'ws': if url.hostname is not None: usage('WebSocket URL must designate a local UNIX domain socket.') sys.exit(1) fact = transport.DNSQueryTransportHandlerWebSocketServerFactory(url.path) elif url.scheme == 'ssh': fact = transport.DNSQueryTransportHandlerRemoteCmdFactory(self.lg_url) else: usage('Unsupported URL scheme: "%s"' % self.lg_url) sys.exit(1) looking_glass_cache[self.lg_url] = fact self.lg_factory = looking_glass_cache[self.lg_url] def _get_resolver(self, options): class CustomQuery(Q.DNSQueryFactory): flags = self.flags edns = self.edns edns_max_udp_payload = self.edns_max_udp_payload edns_flags = self.edns_flags edns_options = self.edns_options tcp = self.tcp response_handlers = self.handlers if self.lg_factory is not None: th_factories = (self.lg_factory,) else: th_factories = None return Resolver.Resolver(self.nameservers, CustomQuery, timeout=self.query_timeout, max_attempts=self.max_attempts, lifetime=self.lifetime, shuffle=False, client_ipv4=options['client_ipv4'], client_ipv6=options['client_ipv6'], port=options['port'], transport_manager=tm, th_factories=th_factories) def _get_name(self): #TODO qualify name, if necessary #TODO check name syntax, etc. return dns.name.from_text(self.qname) def _get_rdtype(self, options): if self.rdtype is None: return options['rdtype'] else: return self.rdtype def _get_rdclass(self, options): if self.rdclass is None: return options['rdclass'] else: return self.rdclass def query(self, options): res = self._get_resolver(options) qname = self._get_name() rdtype = self._get_rdtype(options) rdclass = self._get_rdclass(options) return res.query(qname, rdtype, rdclass) def display(self, response, server, options): if response is None: return ';; no servers were queried\n' elif response.message is not None: if self.short: s = '' if self.show_ip_port: identity = ' from server %s in %d ms.' % (server, int(response.response_time*1000)) else: identity = '' for rrset in response.message.answer: for rr in rrset: s += '%s%s\n' % (rr.to_text(), identity) return s # get counts if response.message.question: question_ct = 1 else: question_ct = 0 answer_ct = 0 for i in response.message.answer: answer_ct += len(i) authority_ct = 0 for i in response.message.authority: authority_ct += len(i) additional_ct = 0 for i in response.message.additional: additional_ct += len(i) if response.message.edns >= 0: additional_ct += 1 #TODO show_cmd, multiline, show_rr_comments s = '' if self.show_comments: s += ';; Got answer:\n' s += ';; ->>HEADER<<- opcode: %s, status: %s, id: %d\n' % (dns.opcode.to_text(response.message.opcode()), dns.rcode.to_text(response.message.rcode()), response.message.id) s += ';; flags: %s; QUERY: %d, ANSWER: %d, AUTHORITY: %d, ADDITIONAL: %d\n' % (dns.flags.to_text(response.message.flags).lower(), question_ct, answer_ct, authority_ct, additional_ct) if (self.flags & dns.flags.RD) and not (response.message.flags & dns.flags.RA): s += ';; WARNING: recursion requested but not available\n' s += '\n' if response.message.edns >= 0: s += ';; OPT PSEUDOSECTION:\n' s += '; EDNS: version: %d, flags: %s; udp: %d\n' % (response.message.edns, dns.flags.edns_to_text(response.message.ednsflags).lower(), response.message.payload) for opt in response.message.options: chars = [] if opt.otype == dns.edns.NSID: s += '; NSID:' for b in opt.data: s += ' %02x' % b chars.append(chr(b)) for c in chars: s += ' (%s)' % c s += '\n' if response.message.question and self.show_question: if self.show_comments: s += ';; QUESTION SECTION:\n' s += ';%s %s %s\n' % (response.message.question[0].name, dns.rdataclass.to_text(response.message.question[0].rdclass), dns.rdatatype.to_text(response.message.question[0].rdtype)) if self.show_comments: s += '\n' for section, title in ((response.message.answer, 'ANSWER'), (response.message.authority, 'AUTHORITY'), (response.message.additional, 'ADDITIONAL')): if section and getattr(self, 'show_%s' % title.lower()): if self.show_comments: s += ';; %s SECTION:\n' % title for rrset in section: for rr in rrset: if self.show_ttl: ttl = '\t%d' % rrset.ttl else: ttl = '' if self.show_class: cls = '\t%s' % dns.rdataclass.to_text(rrset.rdclass) else: cls = '' s += '%s\t%s%s\t%s\t%s\n' % (rrset.name, ttl, cls, dns.rdatatype.to_text(rrset.rdtype), rr.to_text()) if self.show_comments: s += '\n' if self.show_stats: s += ';; Query time: %d msec\n' % int(response.response_time*1000) s += ';; SERVER: %s#%d\n' % (server, options['port']) s += ';; WHEN: %s\n' % datetime.datetime.now().strftime('%a %b %d %H:%M:%S %Y UTC') s += ';; MSG SIZE rcvd: %d\n' % response.msg_size return s elif response.error in (Q.RESPONSE_ERROR_TIMEOUT, Q.RESPONSE_ERROR_NETWORK_ERROR): return ';; connection timed out; no servers could be reached\n' else: return ';; the response from %s was malformed\n' % server def query_and_display(self, options, filehandle): try: server, response = self.query(options) except transport.RemoteQueryTransportError as e: sys.stderr.write('%s\n' % e) else: output = self.display(response, server, options) filehandle.write(output) filehandle.flush() class DigCommandLine: def __init__(self, args): self.args = args self.arg_index = 0 self.options = { 'rdtype': None, 'rdclass': None, 'use_ipv4': None, 'use_ipv6': None, 'client_ipv4': None, 'client_ipv6': None, 'insecure': None, 'port': 53, } self.nameservers = [] self.global_query_options = ['+cmd'] self.queries = [] self._process_args() self._process_network() self._process_nameservers() if not self.queries: self.queries.append(DigCommandLineQuery('.', dns.rdatatype.NS, dns.rdataclass.IN)) looking_glass_cache = {} for q in self.queries: q.process_nameservers(self.nameservers, self.options['use_ipv4'], self.options['use_ipv6']) q.process_query_options(self.global_query_options) q.process_looking_glass(looking_glass_cache, self.options['insecure']) if not q.nameservers: raise SemanticException('No nameservers to query') if self.options['rdtype'] is None: self.options['rdtype'] = dns.rdatatype.A if self.options['rdclass'] is None: self.options['rdclass'] = dns.rdataclass.IN def query_and_display(self): for q in self.queries: q.query_and_display(self.options, sys.stdout) def _get_arg(self, has_arg): try: if len(self.args[self.arg_index]) > 2: if not has_arg: raise CommandLineException('"%s" option does not take arguments' % self.args[self.arg_index][:2]) return self.args[self.arg_index][2:] else: if not has_arg: return None else: self.arg_index += 1 if self.arg_index >= len(self.args): raise CommandLineException('"%s" option requires an argument' % self.args[self.arg_index - 1]) return self.args[self.arg_index] finally: self.arg_index += 1 def _add_server_to_options(self, query): addr = self.args[self.arg_index][1:] self.arg_index += 1 if query is None: self.nameservers.append(addr) else: query.nameservers.append(addr) def _add_reverse_query(self): arg = self._get_arg(True) try: addr = IPAddr(arg) except ValueError: raise SemanticException('Invalid IP address: "%s"' % arg) else: qname = addr.arpa_name() return DigCommandLineQuery(qname, dns.rdatatype.PTR, dns.rdataclass.IN) def _add_qname_from_opt(self): qname = self._get_arg(True) return DigCommandLineQuery(qname, None, None) def _add_default_option(self): if self.options['rdclass'] is None: try: self.options['rdclass'] = dns.rdataclass.from_text(self.args[self.arg_index]) except dns.rdataclass.UnknownRdataclass: pass else: self.arg_index += 1 return True if self.options['rdtype'] is None: try: self.options['rdtype'] = dns.rdatatype.from_text(self.args[self.arg_index]) except dns.rdatatype.UnknownRdatatype: pass else: self.arg_index += 1 return True return False def _add_qname(self): qname = self.args[self.arg_index] self.arg_index += 1 # check for optional type try: rdtype = dns.rdatatype.from_text(self.args[self.arg_index]) except (IndexError, dns.rdatatype.UnknownRdatatype): # no type detected; use default rdtype/rdclass rdtype = None rdclass = None else: self.arg_index += 1 # now check for optional class try: rdclass = dns.rdataclass.from_text(self.args[self.arg_index]) except (IndexError, dns.rdataclass.UnknownRdataclass): # no class detected; use default rdclass rdclass = None else: self.arg_index += 1 return DigCommandLineQuery(qname, rdtype, rdclass) def _add_option(self): if self.args[self.arg_index].startswith('-b'): arg = self._get_arg(True) try: addr = IPAddr(arg) except ValueError: raise SemanticException('Invalid IP address: "%s"' % arg) if addr.version == 6: family = socket.AF_INET6 else: family = socket.AF_INET try: s = socket.socket(family) s.bind((addr, 0)) except socket.error as e: if e.errno == errno.EADDRNOTAVAIL: raise SemanticException('Cannot bind to specified IP address: "%s"' % addr) else: del s if addr.version == 6: self.options['client_ipv6'] = addr else: self.options['client_ipv4'] = addr elif self.args[self.arg_index].startswith('-c'): arg = self._get_arg(True) try: self.options['rdclass'] = dns.rdataclass.from_text(arg) except dns.rdataclass.UnknownRdataclass: raise SemanticException('Unknown class: "%s".' % arg) #TODO -f #TODO -k #TODO -m elif self.args[self.arg_index].startswith('-p'): arg = self._get_arg(True) try: self.options['port'] = int(arg) if self.options['port'] < 0 or self.options['port'] > 65535: raise ValueError() except ValueError: raise CommandLineException('-p requires an integer argument between 0 and 65535') #TODO -v elif self.args[self.arg_index].startswith('-t'): arg = self._get_arg(True) try: self.options['rdtype'] = dns.rdatatype.from_text(arg) except dns.rdatatype.UnknownRdatatype: raise SemanticException('Unknown type: "%s".' % arg) #TODO -y elif self.args[self.arg_index].startswith('-6'): self._get_arg(False) self.options['use_ipv6'] = True elif self.args[self.arg_index].startswith('-4'): self._get_arg(False) self.options['use_ipv4'] = True elif self.args[self.arg_index].startswith('-k'): self._get_arg(False) self.options['insecure'] = True else: raise CommandLineException('Option "%s" not recognized.' % self.args[self.arg_index][:2]) def _add_query_option(self, query): if query is None: self.global_query_options.append(self.args[self.arg_index]) else: query.query_options.append(self.args[self.arg_index]) self.arg_index += 1 def _process_args(self): query = None while self.arg_index < len(self.args): # server address if self.args[self.arg_index][0] == '@': self._add_server_to_options(query) # reverse lookup elif self.args[self.arg_index].startswith('-x'): query = self._add_reverse_query() self.queries.append(query) # forward lookup (with -q) elif self.args[self.arg_index].startswith('-q'): query = self._add_qname_from_opt() self.queries.append(query) # options elif self.args[self.arg_index][0] == '-': self._add_option() # query options elif self.args[self.arg_index][0] == '+': self._add_query_option(query) # global query class/type elif query is None and self._add_default_option(): pass # name to be queried else: query = self._add_qname() self.queries.append(query) def _process_network(self): if self.options['use_ipv4'] is None and self.options['use_ipv6'] is None: self.options['use_ipv4'] = True self.options['use_ipv6'] = True if not self.options['use_ipv4']: self.options['use_ipv4'] = False if not self.options['use_ipv6']: self.options['use_ipv6'] = False def _process_nameservers(self): if not self.nameservers: processed_nameservers = Resolver.get_standard_resolver()._servers else: processed_nameservers = [] for addr in self.nameservers: processed_nameservers.extend(_get_nameservers_for_name(addr)) if not self.options['use_ipv4']: processed_nameservers = [x for x in processed_nameservers if x.version != 4] if not self.options['use_ipv6']: processed_nameservers = [x for x in processed_nameservers if x.version != 6] self.nameservers = processed_nameservers def main(): try: q = DigCommandLine(sys.argv[1:]) q.query_and_display() except (CommandLineException, SemanticException) as e: sys.stderr.write('%s\n' % e) sys.exit(1) except KeyboardInterrupt: pass # explicitly close tm here finally: tm.close() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3825305 dnsviz-0.11.1/contrib/dnsviz-lg-java/0000755000175000017500000000000015001473074016436 5ustar00caseycasey././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3825305 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/0000755000175000017500000000000015001473074017224 5ustar00caseycasey././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3825305 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/0000755000175000017500000000000015001473074020541 5ustar00caseycasey././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/applet/0000755000175000017500000000000015001473074022026 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/applet/DNSLookingGlassApplet.java0000644000175000017500000000436015001472663027006 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.applet; import java.applet.Applet; import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; import java.net.InetAddress; import java.net.UnknownHostException; import net.dnsviz.transport.DNSQueryTransportHandler; import net.dnsviz.util.DNSSettings; import net.dnsviz.lookingglass.DNSLookingGlass; public class DNSLookingGlassApplet extends Applet { static final long serialVersionUID = 0; private Exception err = null; private DNSLookingGlass lg = null; public DNSLookingGlassApplet() { lg = new DNSLookingGlass(); } public DNSQueryTransportHandler getDNSQueryTransportHandler(String req, String dst, int dport, String src, int sport, long timeout, boolean tcp) { err = null; try { return lg.getDNSQueryTransportHandler(req, dst, dport, src, sport, timeout, tcp); } catch (Exception ex) { err = ex; return null; } } public void executeQueries(DNSQueryTransportHandler [] qths) { err = null; try { lg.executeQueries(qths); } catch (Exception ex) { err = ex; } } public InetAddress [] getDNSServers() { return new DNSSettings().getDNSServers(); } public boolean hasError() { return err != null; } public Exception getError() { return err; } public String getErrorTrace() { if (err == null) { return null; } StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); err.printStackTrace(pw); return sw.toString(); } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/lookingglass/0000755000175000017500000000000015001473074023235 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/lookingglass/DNSLookingGlass.java0000644000175000017500000001244015001472663027045 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.lookingglass; import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; import java.net.InetAddress; import java.net.UnknownHostException; import org.json.JSONArray; import org.json.JSONObject; import org.json.JSONException; import net.dnsviz.transport.DNSQueryTransportHandler; import net.dnsviz.transport.DNSQueryTransportHandlerTCP; import net.dnsviz.transport.DNSQueryTransportHandlerUDP; import net.dnsviz.transport.DNSQueryTransportManager; import net.dnsviz.util.Base64Decoder; import net.dnsviz.util.Base64Encoder; import net.dnsviz.websocket.WebSocketClient; public class DNSLookingGlass { public DNSLookingGlass() { } protected DNSQueryTransportHandler [] getDNSQueryTransportHandlers(JSONObject obj) throws JSONException, UnknownHostException { DNSQueryTransportHandler [] ret; JSONArray requests; String [] vers; String src; int sport; JSONObject reqObj; vers = Double.toString(obj.getDouble("version")).split("\\."); if (Integer.parseInt(vers[0]) != 1 || Integer.parseInt(vers[1]) > 0) { throw new JSONException("Version of JSON input is invalid"); } requests = obj.getJSONArray("requests"); ret = new DNSQueryTransportHandler [requests.length()]; for (int i = 0; i < requests.length(); i++) { reqObj = requests.getJSONObject(i); if (reqObj.has("src")) { src = reqObj.getString("src"); } else { src = null; } if (reqObj.has("sport")) { sport = reqObj.getInt("sport"); } else { sport = 0; } ret[i] = getDNSQueryTransportHandler(reqObj.getString("req"), reqObj.getString("dst"), reqObj.getInt("dport"), src, sport, reqObj.getLong("timeout"), reqObj.getBoolean("tcp")); } return ret; } protected JSONObject getEncodedResponses(DNSQueryTransportHandler [] qths) { JSONObject ret; JSONObject response; JSONArray responses = new JSONArray(); for (int i = 0; i < qths.length; i++) { response = new JSONObject(); response.put("res", qths[i].getEncodedResponse()); if (qths[i].getError() != null) { response.put("err", qths[i].getError()); if (qths[i].getErrno() != null) { response.put("errno", qths[i].getErrno()); } } if (qths[i].getSource() != null) { response.put("src", qths[i].getSource().getHostAddress()); } else { response.put("src", (String)null); } if (qths[i].getSPort() != 0) { response.put("sport", qths[i].getSPort()); } else { response.put("sport", (String)null); } response.put("time_elapsed", qths[i].timeElapsed()); responses.put(response); } ret = new JSONObject(); ret.put("version", "1.0"); ret.put("responses", responses); return ret; } public DNSQueryTransportHandler getDNSQueryTransportHandler(String req, String dst, int dport, String src, int sport, long timeout, boolean tcp) throws UnknownHostException { Base64Decoder d = new Base64Decoder(); byte [] byteReq = d.decode(req.getBytes()); InetAddress srcAddr = null; InetAddress dstAddr = null; if (dst != null) { dstAddr = InetAddress.getByName(dst); } if (src != null) { srcAddr = InetAddress.getByName(src); } if (tcp) { return new DNSQueryTransportHandlerTCP(byteReq, dstAddr, dport, srcAddr, sport, timeout); } else { return new DNSQueryTransportHandlerUDP(byteReq, dstAddr, dport, srcAddr, sport, timeout); } } public void executeQueries(DNSQueryTransportHandler [] qths) throws IOException { int i; DNSQueryTransportManager qtm = new DNSQueryTransportManager(); qtm.query(qths); for (i = 0; i < qths.length; i++) { qths[i].finalize(); } } protected void interact(WebSocketClient ws) throws IOException { byte [] input; while ((input = ws.read()).length > 0) { ws.write(run(new String(input)).getBytes()); } } public String run(String json) { JSONObject ret; try { DNSQueryTransportHandler [] qths = getDNSQueryTransportHandlers(new JSONObject(json)); executeQueries(qths); return getEncodedResponses(qths).toString(); } catch (Exception ex) { ret = new JSONObject(); ret.put("version", "1.0"); ret.put("error", getErrorTrace(ex)); return ret.toString(); } } protected String getErrorTrace(Exception err) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); err.printStackTrace(pw); return sw.toString(); } public static void main(String [] args) throws IOException { WebSocketClient ws = new WebSocketClient(args[0], Integer.parseInt(args[1]), args[2], args[3]); DNSLookingGlass lg = new DNSLookingGlass(); lg.interact(ws); } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/transport/0000755000175000017500000000000015001473074022575 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandler.java0000644000175000017500000002056415001472663030337 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.transport; import java.io.IOException; import java.net.ConnectException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.NoRouteToHostException; import java.net.PortUnreachableException; import java.net.BindException; import java.net.SocketAddress; import java.net.SocketException; import java.nio.ByteBuffer; import java.nio.channels.NetworkChannel; import java.nio.channels.SelectableChannel; import java.nio.channels.WritableByteChannel; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.Date; import java.util.Random; import java.net.UnknownHostException; import net.dnsviz.util.Base64Encoder; import net.dnsviz.util.Base64Decoder; public abstract class DNSQueryTransportHandler { private final static int MAX_PORT_BIND_ATTEMPTS = 10; protected ByteBuffer req = null; protected ByteBuffer res = null; protected String err = null; protected String errno = null; protected InetAddress dst = null; protected int dport = 0; protected InetAddress src = null; protected int sport = 0; protected NetworkChannel channel = null; protected long timeout = 0; protected long expiration = 0; protected long startTime = 0; protected long endTime = 0; protected DNSQueryTransportHandler(byte [] req, InetAddress dst, int dport, InetAddress src, int sport, long timeout) { this.dst = dst; this.dport = dport; this.src = src; this.sport = sport; this.timeout = timeout; initRequestBuffer(req); } public abstract int getInitialSelectionOp(); public abstract int getStartOfReqPayload(); public NetworkChannel getChannel() { return channel; } public long getExpiration() { return expiration; } public boolean hasError() { return err != null; } public void setError(IOException ex) throws IOException { if (ex instanceof SocketException) { String m = ex.getMessage(); if (ex instanceof ConnectException) { if (m.contains("timed out")) { err = "TIMEOUT"; } else if (m.contains("refused")) { err = "NETWORK_ERROR"; errno = Errno.getName(Errno.ECONNREFUSED); } } else if (ex instanceof BindException) { if (m.contains("an't assign requested address")) { err = "NETWORK_ERROR"; errno = Errno.getName(Errno.EADDRNOTAVAIL); } else if (m.contains("ddress already in use")) { err = "NETWORK_ERROR"; errno = Errno.getName(Errno.EADDRINUSE); } } else if (ex instanceof NoRouteToHostException) { err = "NETWORK_ERROR"; errno = Errno.getName(Errno.EHOSTUNREACH); } else if (ex instanceof PortUnreachableException) { err = "NETWORK_ERROR"; errno = Errno.getName(Errno.ECONNREFUSED); } else if (m.contains("ermission denied")) { err = "NETWORK_ERROR"; errno = Errno.getName(Errno.EACCES); } } /* if we weren't able to identify the error, then throw it */ if (err == null) { throw ex; } } public void setError(int code) { err = "NETWORK_ERROR"; errno = Errno.getName(code); } public void setError(String name) { err = name; } public String getError() { return err; } public String getErrno() { return errno; } public long timeElapsed() { return endTime - startTime; } public long getSPort() { return sport; } public InetAddress getSource() { return src; } protected abstract void initRequestBuffer(byte [] req); protected void initResponseBuffer() { //TODO start more conservative and dynamically grow if more buffer space is //needed res = ByteBuffer.allocate(65536); } protected abstract void createSocket() throws IOException; protected void configureSocket() throws IOException { ((SelectableChannel)channel).configureBlocking(false); } protected void bindSocket() throws IOException { class bindAction implements PrivilegedExceptionAction { private int port = 0; public Object run() throws IOException { channel.bind(new InetSocketAddress(src, port)); return null; } public void setPort(int port) { this.port = port; } } bindAction a = new bindAction(); if (sport > 0) { a.setPort(sport); try { AccessController.doPrivileged(a); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { throw (IOException)ex; } else { throw (RuntimeException)ex; } } } else { Random r = new Random(); int i = 0; while (true) { // 65536 - 1024 = 64512 a.setPort(r.nextInt(64512) + 1024); try { AccessController.doPrivileged(a); break; } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof BindException) { if (++i > MAX_PORT_BIND_ATTEMPTS || !ex.getMessage().contains("ddress already in use")) { throw (BindException)ex; } } else if (ex instanceof IOException) { throw (IOException)ex; } else { throw (RuntimeException)ex; } } } } } public void prepare() throws IOException { initResponseBuffer(); try { createSocket(); configureSocket(); bindSocket(); setStart(); connect(); } catch (IOException ex) { setError(ex); cleanup(); } } protected void setSocketInfo() { InetSocketAddress addr; class getAddrAction implements PrivilegedExceptionAction { public InetSocketAddress run() throws IOException { return (InetSocketAddress)channel.getLocalAddress(); } } getAddrAction a = new getAddrAction(); try { addr = AccessController.doPrivileged(a); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { return; } else { throw (RuntimeException)ex; } } src = addr.getAddress(); sport = addr.getPort(); } protected void setStart() { Date d = new Date(); expiration = d.getTime() + timeout; startTime = d.getTime(); } protected abstract void connect() throws IOException; protected abstract boolean finishConnect() throws IOException; public boolean doWrite() throws IOException { class writeAction implements PrivilegedExceptionAction { public Object run() throws IOException { ((WritableByteChannel)channel).write(req); return null; } } writeAction a = new writeAction(); try { AccessController.doPrivileged(a); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { setError((IOException)ex); cleanup(); return true; } else { throw (RuntimeException)ex; } } if (!req.hasRemaining()) { return true; } return false; } public abstract boolean doRead() throws IOException; public void doTimeout() { err = "TIMEOUT"; cleanup(); } protected void setEnd() { // set end (and start, if necessary) times, as appropriate Date d = new Date(); endTime = d.getTime(); if (startTime == 0) { startTime = endTime; } } protected void closeSocket() { try { channel.close(); } catch (IOException ex) { /* do nothing here */ } } public void cleanup() { setEnd(); setSocketInfo(); closeSocket(); } protected abstract void checkSource(); public void finalize() { checkSource(); if (req != null) { req.rewind(); } if (err != null) { res = null; } else if (res != null) { res.rewind(); } } public String getEncodedResponse() { String res; byte [] buf; Base64Encoder e = new Base64Encoder(); if (this.res != null) { buf = new byte [this.res.limit()]; this.res.get(buf); return new String(e.encode(buf)); } else { return null; } } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandlerComparator.java0000644000175000017500000000215015001472663032356 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.transport; import java.util.Comparator; public class DNSQueryTransportHandlerComparator implements Comparator { public int compare(DNSQueryTransportHandler o1, DNSQueryTransportHandler o2) { return (int)(o1.getExpiration() - o2.getExpiration()); } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandlerTCP.java0000644000175000017500000001146215001472663030703 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.transport; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.DatagramChannel; import java.nio.channels.SocketChannel; import java.nio.channels.ReadableByteChannel; import java.nio.channels.SelectionKey; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; public class DNSQueryTransportHandlerTCP extends DNSQueryTransportHandler { protected boolean lengthKnown = false; public DNSQueryTransportHandlerTCP(byte [] req, InetAddress dst, int dport, InetAddress src, int sport, long timeout) { super(req, dst, dport, src, sport, timeout); } public int getInitialSelectionOp() { return SelectionKey.OP_CONNECT; } public int getStartOfReqPayload() { return 2; } protected void initRequestBuffer(byte [] req) { byte b1, b2; b1 = (byte)((req.length >> 8) & 0xff); b2 = (byte)(req.length & 0xff); this.req = ByteBuffer.allocate(req.length + 2); this.req.clear(); this.req.put(b1); this.req.put(b2); this.req.put(req); this.req.flip(); } protected void createSocket() throws IOException { channel = SocketChannel.open(); } protected void connect() throws IOException { class connectAction implements PrivilegedExceptionAction { public Object run() throws IOException { ((SocketChannel)channel).connect(new InetSocketAddress(dst, dport)); return null; } } connectAction a = new connectAction(); try { AccessController.doPrivileged(a); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { throw (IOException)ex; } else { throw (RuntimeException)ex; } } } public boolean finishConnect() throws IOException { try { return ((SocketChannel)channel).finishConnect(); } catch (IOException ex) { setError(ex); cleanup(); return true; } } public boolean doRead() throws IOException { int bytesRead; int len; byte b1, b2; ByteBuffer buf; class readAction implements PrivilegedExceptionAction { private int bytesRead; public Object run() throws IOException { bytesRead = ((ReadableByteChannel)channel).read(res); return null; } public int getBytesRead() { return bytesRead; } } readAction a = new readAction(); try { AccessController.doPrivileged(a); bytesRead = a.getBytesRead(); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { setError((IOException)ex); cleanup(); return true; } else { throw (RuntimeException)ex; } } if (bytesRead < 1) { setError(Errno.ECONNRESET); cleanup(); return true; } if (!lengthKnown && res.position() > 1) { res.limit(res.position()); b1 = res.get(0); b2 = res.get(1); len = ((b1 & 0xff) << 8) | (b2 & 0xff); buf = ByteBuffer.allocate(len); buf.clear(); res.rewind().position(2); buf.put(res); res = buf; lengthKnown = true; } if (!res.hasRemaining()) { cleanup(); return true; } return false; } protected InetAddress getLocalAddress() { InetAddress ret; try { final DatagramChannel c = DatagramChannel.open(); class getAddrAction implements PrivilegedExceptionAction { public InetSocketAddress run() throws IOException { c.connect(new InetSocketAddress(dst, dport)); return (InetSocketAddress)c.getLocalAddress(); } } getAddrAction a = new getAddrAction(); try { ret = AccessController.doPrivileged(a).getAddress(); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { throw (IOException)ex; } else { throw (RuntimeException)ex; } } c.close(); return ret; } catch (IOException ex) { return null; } } protected void checkSource() { if (src == null || src.isAnyLocalAddress()) { src = getLocalAddress(); } } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandlerUDP.java0000644000175000017500000000704115001472663030703 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.transport; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.DatagramChannel; import java.nio.channels.ReadableByteChannel; import java.nio.channels.SelectionKey; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; public class DNSQueryTransportHandlerUDP extends DNSQueryTransportHandler { public DNSQueryTransportHandlerUDP(byte [] req, InetAddress dst, int dport, InetAddress src, int sport, long timeout) { super(req, dst, dport, src, sport, timeout); } public int getInitialSelectionOp() { return SelectionKey.OP_WRITE; } public int getStartOfReqPayload() { return 0; } protected void initRequestBuffer(byte [] req) { this.req = ByteBuffer.allocate(req.length); this.req.clear(); this.req.put(req); this.req.flip(); } protected void initResponseBuffer() { //TODO start more conservative and dynamically grow if more buffer space is //needed res = ByteBuffer.allocate(65536); } protected void createSocket() throws IOException { channel = DatagramChannel.open(); } protected void connect() throws IOException { class connectAction implements PrivilegedExceptionAction { public Object run() throws IOException { ((DatagramChannel)channel).connect(new InetSocketAddress(dst, dport)); return null; } } connectAction a = new connectAction(); try { AccessController.doPrivileged(a); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { throw (IOException)ex; } else { throw (RuntimeException)ex; } } } public boolean finishConnect() { return true; } public boolean doRead() throws IOException { int bytesRead; class readAction implements PrivilegedExceptionAction { private int bytesRead; public Object run() throws IOException { bytesRead = ((ReadableByteChannel)channel).read(res); return null; } public int getBytesRead() { return bytesRead; } } readAction a = new readAction(); try { AccessController.doPrivileged(a); bytesRead = a.getBytesRead(); } catch (PrivilegedActionException pae) { Exception ex = pae.getException(); if (ex instanceof IOException) { setError((IOException)ex); cleanup(); return true; } else { throw (RuntimeException)ex; } } if (bytesRead < 1) { setError(Errno.ECONNREFUSED); cleanup(); return true; } //TODO check response consistency res.limit(bytesRead); cleanup(); return true; } protected void checkSource() { if (src != null && src.isAnyLocalAddress()) { src = null; } } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportManager.java0000644000175000017500000001306115001472663030326 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.transport; import java.io.IOException; import java.net.SocketException; import java.nio.channels.ClosedChannelException; import java.nio.channels.DatagramChannel; import java.nio.channels.SelectableChannel; import java.nio.channels.Selector; import java.nio.channels.SelectionKey; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.Date; import java.util.Iterator; import java.util.PriorityQueue; import java.util.Set; public class DNSQueryTransportManager { public void DNSQueryTransportManager() { } private void prepareAndQueue(DNSQueryTransportHandler qh, PriorityQueue q, Selector selector) throws IOException { qh.prepare(); if (!qh.hasError()) { // if we successfully bound and connected the socket, then register this // socket in the write fd list ((SelectableChannel)qh.getChannel()).register(selector, qh.getInitialSelectionOp(), qh); q.add(qh); } } public void query(DNSQueryTransportHandler [] queryHandlers) throws IOException { int i; int timeout; DNSQueryTransportHandler qh = null; DNSQueryTransportHandler standbyQH = null; DNSQueryTransportHandlerComparator cmp = new DNSQueryTransportHandlerComparator(); PriorityQueue standbyQueue = new PriorityQueue(queryHandlers.length, cmp); PriorityQueue activeQueue = new PriorityQueue(queryHandlers.length, cmp); Selector selector = Selector.open(); for (i = 0; i < queryHandlers.length; i++) { qh = queryHandlers[i]; try { prepareAndQueue(qh, activeQueue, selector); } catch (IOException ex) { if (ex instanceof SocketException && ex.getMessage().contains("maximum number of ")) { /* if we couldn't create the socket because too many datagrams were * open, then place this one in the standbyQueue */ standbyQueue.add(qh); } else { throw ex; } } } while (activeQueue.peek() != null) { Date d = new Date(); long currTime = d.getTime(); // remove expired entries while (((qh = activeQueue.peek()) != null) && currTime >= qh.getExpiration()) { // remove the qh from the priority queue, and run doTimeout() qh = activeQueue.poll(); qh.doTimeout(); if (qh.getChannel() instanceof DatagramChannel) { /* prepare and queue one from the standbyQueue, now that * there's a space available */ standbyQH = standbyQueue.poll(); if (standbyQH != null) { prepareAndQueue(standbyQH, activeQueue, selector); } } } if (qh == null) { // all entries have expired break; } timeout = (int)(qh.getExpiration() - currTime); /* timeout should never be less than 1 because of the loop termination test * of the while loop above */ selector.select(timeout); Set selectedKeys = selector.selectedKeys(); Iterator keyIterator = selectedKeys.iterator(); while (keyIterator.hasNext()) { SelectionKey key = keyIterator.next(); qh = (DNSQueryTransportHandler)key.attachment(); if ((key.interestOps() & SelectionKey.OP_CONNECT) != 0 && key.isConnectable()) { if (qh.finishConnect()) { if (qh.hasError()) { activeQueue.remove(qh); if (qh.getChannel() instanceof DatagramChannel) { /* prepare and queue one from the standbyQueue, now that * there's a space available */ standbyQH = standbyQueue.poll(); if (standbyQH != null) { prepareAndQueue(standbyQH, activeQueue, selector); } } continue; } else { key.interestOps(SelectionKey.OP_WRITE); } } } if ((key.interestOps() & SelectionKey.OP_WRITE) != 0 && key.isWritable()) { if (qh.doWrite()) { if (qh.hasError()) { activeQueue.remove(qh); if (qh.getChannel() instanceof DatagramChannel) { /* prepare and queue one from the standbyQueue, now that * there's a space available */ standbyQH = standbyQueue.poll(); if (standbyQH != null) { prepareAndQueue(standbyQH, activeQueue, selector); } } continue; } else { key.interestOps(SelectionKey.OP_READ); } } } if ((key.interestOps() & SelectionKey.OP_READ) != 0 && key.isReadable()) { if (qh.doRead()) { activeQueue.remove(qh); if (qh.getChannel() instanceof DatagramChannel) { /* prepare and queue one from the standbyQueue, now that * there's a space available */ standbyQH = standbyQueue.poll(); if (standbyQH != null) { prepareAndQueue(standbyQH, activeQueue, selector); } } continue; } } } } } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/transport/Errno.java0000644000175000017500000002035615001472663024536 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.transport; public class Errno { public static final int E2BIG = 7; public static final int EACCES = 13; public static final int EADDRINUSE = 98; public static final int EADDRNOTAVAIL = 99; public static final int EADV = 68; public static final int EAFNOSUPPORT = 97; public static final int EAGAIN = 11; public static final int EALREADY = 114; public static final int EBADE = 52; public static final int EBADF = 9; public static final int EBADFD = 77; public static final int EBADMSG = 74; public static final int EBADR = 53; public static final int EBADRQC = 56; public static final int EBADSLT = 57; public static final int EBFONT = 59; public static final int EBUSY = 16; public static final int ECHILD = 10; public static final int ECHRNG = 44; public static final int ECOMM = 70; public static final int ECONNABORTED = 103; public static final int ECONNREFUSED = 111; public static final int ECONNRESET = 104; public static final int EDEADLK = 35; public static final int EDEADLOCK = 35; public static final int EDESTADDRREQ = 89; public static final int EDOM = 33; public static final int EDOTDOT = 73; public static final int EDQUOT = 122; public static final int EEXIST = 17; public static final int EFAULT = 14; public static final int EFBIG = 27; public static final int EHOSTDOWN = 112; public static final int EHOSTUNREACH = 113; public static final int EIDRM = 43; public static final int EILSEQ = 84; public static final int EINPROGRESS = 115; public static final int EINTR = 4; public static final int EINVAL = 22; public static final int EIO = 5; public static final int EISCONN = 106; public static final int EISDIR = 21; public static final int EISNAM = 120; public static final int EL2HLT = 51; public static final int EL2NSYNC = 45; public static final int EL3HLT = 46; public static final int EL3RST = 47; public static final int ELIBACC = 79; public static final int ELIBBAD = 80; public static final int ELIBEXEC = 83; public static final int ELIBMAX = 82; public static final int ELIBSCN = 81; public static final int ELNRNG = 48; public static final int ELOOP = 40; public static final int EMFILE = 24; public static final int EMLINK = 31; public static final int EMSGSIZE = 90; public static final int EMULTIHOP = 72; public static final int ENAMETOOLONG = 36; public static final int ENAVAIL = 119; public static final int ENETDOWN = 100; public static final int ENETRESET = 102; public static final int ENETUNREACH = 101; public static final int ENFILE = 23; public static final int ENOANO = 55; public static final int ENOBUFS = 105; public static final int ENOCSI = 50; public static final int ENODATA = 61; public static final int ENODEV = 19; public static final int ENOENT = 2; public static final int ENOEXEC = 8; public static final int ENOLCK = 37; public static final int ENOLINK = 67; public static final int ENOMEM = 12; public static final int ENOMSG = 42; public static final int ENONET = 64; public static final int ENOPKG = 65; public static final int ENOPROTOOPT = 92; public static final int ENOSPC = 28; public static final int ENOSR = 63; public static final int ENOSTR = 60; public static final int ENOSYS = 38; public static final int ENOTBLK = 15; public static final int ENOTCONN = 107; public static final int ENOTDIR = 20; public static final int ENOTEMPTY = 39; public static final int ENOTNAM = 118; public static final int ENOTSOCK = 88; public static final int ENOTSUP = 95; public static final int ENOTTY = 25; public static final int ENOTUNIQ = 76; public static final int ENXIO = 6; public static final int EOPNOTSUPP = 95; public static final int EOVERFLOW = 75; public static final int EPERM = 1; public static final int EPFNOSUPPORT = 96; public static final int EPIPE = 32; public static final int EPROTO = 71; public static final int EPROTONOSUPPORT = 93; public static final int EPROTOTYPE = 91; public static final int ERANGE = 34; public static final int EREMCHG = 78; public static final int EREMOTE = 66; public static final int EREMOTEIO = 121; public static final int ERESTART = 85; public static final int EROFS = 30; public static final int ESHUTDOWN = 108; public static final int ESOCKTNOSUPPORT = 94; public static final int ESPIPE = 29; public static final int ESRCH = 3; public static final int ESRMNT = 69; public static final int ESTALE = 116; public static final int ESTRPIPE = 86; public static final int ETIME = 62; public static final int ETIMEDOUT = 110; public static final int ETOOMANYREFS = 109; public static final int ETXTBSY = 26; public static final int EUCLEAN = 117; public static final int EUNATCH = 49; public static final int EUSERS = 87; public static final int EWOULDBLOCK = 11; public static final int EXDEV = 18; public static final int EXFULL = 54; public static final String [] errorCode = { null, "EPERM", /* 1 */ "ENOENT", /* 2 */ "ESRCH", /* 3 */ "EINTR", /* 4 */ "EIO", /* 5 */ "ENXIO", /* 6 */ "E2BIG", /* 7 */ "ENOEXEC", /* 8 */ "EBADF", /* 9 */ "ECHILD", /* 10 */ "EAGAIN", /* 11 */ "ENOMEM", /* 12 */ "EACCES", /* 13 */ "EFAULT", /* 14 */ "ENOTBLK", /* 15 */ "EBUSY", /* 16 */ "EEXIST", /* 17 */ "EXDEV", /* 18 */ "ENODEV", /* 19 */ "ENOTDIR", /* 20 */ "EISDIR", /* 21 */ "EINVAL", /* 22 */ "ENFILE", /* 23 */ "EMFILE", /* 24 */ "ENOTTY", /* 25 */ "ETXTBSY", /* 26 */ "EFBIG", /* 27 */ "ENOSPC", /* 28 */ "ESPIPE", /* 29 */ "EROFS", /* 30 */ "EMLINK", /* 31 */ "EPIPE", /* 32 */ "EDOM", /* 33 */ "ERANGE", /* 34 */ "EDEADLK", /* 35 */ "ENAMETOOLONG", /* 36 */ "ENOLCK", /* 37 */ "ENOSYS", /* 38 */ "ENOTEMPTY", /* 39 */ "ELOOP", /* 40 */ null, "ENOMSG", /* 42 */ "EIDRM", /* 43 */ "ECHRNG", /* 44 */ "EL2NSYNC", /* 45 */ "EL3HLT", /* 46 */ "EL3RST", /* 47 */ "ELNRNG", /* 48 */ "EUNATCH", /* 49 */ "ENOCSI", /* 50 */ "EL2HLT", /* 51 */ "EBADE", /* 52 */ "EBADR", /* 53 */ "EXFULL", /* 54 */ "ENOANO", /* 55 */ "EBADRQC", /* 56 */ "EBADSLT", /* 57 */ null, "EBFONT", /* 59 */ "ENOSTR", /* 60 */ "ENODATA", /* 61 */ "ETIME", /* 62 */ "ENOSR", /* 63 */ "ENONET", /* 64 */ "ENOPKG", /* 65 */ "EREMOTE", /* 66 */ "ENOLINK", /* 67 */ "EADV", /* 68 */ "ESRMNT", /* 69 */ "ECOMM", /* 70 */ "EPROTO", /* 71 */ "EMULTIHOP", /* 72 */ "EDOTDOT", /* 73 */ "EBADMSG", /* 74 */ "EOVERFLOW", /* 75 */ "ENOTUNIQ", /* 76 */ "EBADFD", /* 77 */ "EREMCHG", /* 78 */ "ELIBACC", /* 79 */ "ELIBBAD", /* 80 */ "ELIBSCN", /* 81 */ "ELIBMAX", /* 82 */ "ELIBEXEC", /* 83 */ "EILSEQ", /* 84 */ "ERESTART", /* 85 */ "ESTRPIPE", /* 86 */ "EUSERS", /* 87 */ "ENOTSOCK", /* 88 */ "EDESTADDRREQ", /* 89 */ "EMSGSIZE", /* 90 */ "EPROTOTYPE", /* 91 */ "ENOPROTOOPT", /* 92 */ "EPROTONOSUPPORT", /* 93 */ "ESOCKTNOSUPPORT", /* 94 */ "ENOTSUP", /* 95 */ "EPFNOSUPPORT", /* 96 */ "EAFNOSUPPORT", /* 97 */ "EADDRINUSE", /* 98 */ "EADDRNOTAVAIL", /* 99 */ "ENETDOWN", /* 100 */ "ENETUNREACH", /* 101 */ "ENETRESET", /* 102 */ "ECONNABORTED", /* 103 */ "ECONNRESET", /* 104 */ "ENOBUFS", /* 105 */ "EISCONN", /* 106 */ "ENOTCONN", /* 107 */ "ESHUTDOWN", /* 108 */ "ETOOMANYREFS", /* 109 */ "ETIMEDOUT", /* 110 */ "ECONNREFUSED", /* 111 */ "EHOSTDOWN", /* 112 */ "EHOSTUNREACH", /* 113 */ "EALREADY", /* 114 */ "EINPROGRESS", /* 115 */ "ESTALE", /* 116 */ "EUCLEAN", /* 117 */ "ENOTNAM", /* 118 */ "ENAVAIL", /* 119 */ "EISNAM", /* 120 */ "EREMOTEIO", /* 121 */ "EDQUOT" /* 122 */ }; public static String getName(int code) { String ret; if (code < 0 || code >= errorCode.length) { return ""; } ret = errorCode[code]; if (ret == null) { return ""; } else { return ret; } } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/util/0000755000175000017500000000000015001473074021516 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/util/Base64.java0000644000175000017500000000414415001472663023413 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.util; public class Base64 { public static byte [] alphabet = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' }; public static byte [] values = { 0, 0, 0, 0, 0, 0, 0, 0, // 0 - 7 0, 0, 0, 0, 0, 0, 0, 0, // 8 - 15 0, 0, 0, 0, 0, 0, 0, 0, // 16 - 23 0, 0, 0, 0, 0, 0, 0, 0, // 24 - 31 0, 0, 0, 0, 0, 0, 0, 0, // 32 - 39 0, 0, 0, 62, 0, 0, 0, 63, // 40 - 47 52, 53, 54, 55, 56, 57, 58, 59, // 48 - 55 60, 61, 0, 0, 0, 0, 0, 0, // 56 - 63 0, 0, 1, 2, 3, 4, 5, 6, // 64 - 71 7, 8, 9, 10, 11, 12, 13, 14, // 72 - 79 15, 16, 17, 18, 19, 20, 21, 22, // 80 - 87 23, 24, 25, 0, 0, 0, 0, 0, // 88 - 95 0, 26, 27, 28, 29, 30, 31, 32, // 96 - 103 33, 34, 35, 36, 37, 38, 39, 40, // 104 - 111 41, 42, 43, 44, 45, 46, 47, 48, // 112 - 119 49, 50, 51, 0, 0, 0, 0, 0 // 120 - 127 }; public static boolean isValid(byte b) { return (b >= 0 && b <= 127) && (b == 65 || values[b] != 0); } public static byte pad = '='; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/util/Base64Decoder.java0000644000175000017500000000342415001472663024701 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.util; import net.dnsviz.util.Base64; public class Base64Decoder { public byte [] decode(byte [] msg) { int msgBits; int i; byte [] ret; int bitIndex; int index; int offset; byte val; msgBits = msg.length * 6; if (msg[msg.length - 1] == Base64.pad) { msgBits -= 6; } if (msg[msg.length - 2] == Base64.pad) { msgBits -= 6; } msgBits -= (msgBits % 8); ret = new byte [msgBits >> 3]; for (i = 0; i < ret.length; i++) { ret[i] = 0; } for (i = 0; i < msg.length; i++) { assert(Base64.isValid(msg[i])); val = Base64.values[msg[i]]; bitIndex = i * 6; index = bitIndex / 8; offset = bitIndex % 8; if (index >= ret.length) { break; } else { if (offset <= 2) { ret[index] |= (byte)(val << (2 - offset)); } else { ret[index] |= (byte)(val >> (offset - 2)); if (index + 1 < ret.length) { ret[index + 1] |= (byte)(val << (10 - offset)); } } } } return ret; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/util/Base64Encoder.java0000644000175000017500000000335015001472663024711 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.util; import net.dnsviz.util.Base64; public class Base64Encoder { public byte [] encode(byte [] msg) { int msgBits; int leftover; byte [] ret; int i; int bitIndex; int index; int offset; byte next; byte val; msgBits = msg.length << 3; leftover = msgBits % 24; if (leftover > 0) { msgBits += 24 - leftover; } ret = new byte [msgBits/6]; for (i = 0; i < ret.length; i++) { bitIndex = i * 6; index = bitIndex / 8; offset = bitIndex % 8; if (index >= msg.length) { ret[i] = Base64.pad; } else { if (offset <= 2) { val = (byte)((msg[index] >> (2 - offset)) & 0x3f); } else { if (index + 1 < msg.length) { next = msg[index + 1]; } else { next = 0; } val = (byte)(((msg[index] << (offset - 2)) | ((next >> (10 - offset)) & ~(0xff << (offset - 2)))) & 0x3f); } ret[i] = Base64.alphabet[val]; } } return ret; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/util/DNSSettings.java0000644000175000017500000000671615001472663024543 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.util; import java.io.BufferedReader; import java.io.InputStreamReader; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.file.Files; import java.nio.file.FileSystems; import java.util.LinkedList; import java.util.regex.Pattern; public class DNSSettings { public InetAddress [] getDNSServers() { final Pattern ipCharsRE = Pattern.compile("[0-9a-fA-F\\.:]+(%.+)?"); String line; boolean foundColon; boolean lineHasColon; BufferedReader reader; ProcessBuilder [] pbs; Process p; LinkedList addresses = new LinkedList(); InetAddress [] ret; InetAddress addr; String [] words; try { String[] lines = Files.readAllLines(FileSystems.getDefault().getPath("/", "etc", "resolv.conf")).toArray(new String[0]); for (int i = 0; i < lines.length; i++) { words = lines[i].split("\\s+"); if (words.length > 1 && words[0].equals("nameserver")) { try { addr = InetAddress.getByName(words[1]); if (!addresses.contains(addr)) { addresses.add(addr); } } catch (UnknownHostException e) { /* Bad address. Move along */ } } } } catch (IOException e) { /* File not found, error opening or reading, etc. Move along. */ } pbs = new ProcessBuilder [] { /* Windows XP */ new ProcessBuilder("netsh", "interface", "ip", "show", "dns"), /* Windows 7, Windows 8, Windows Server 2012R2 */ new ProcessBuilder("netsh", "interface", "ipv4", "show", "dnsservers"), new ProcessBuilder("netsh", "interface", "ipv6", "show", "dnsservers") }; for (int i = 0; i < pbs.length; i++) { try { p = pbs[i].start(); reader = new BufferedReader(new InputStreamReader(p.getInputStream())); foundColon = false; lineHasColon = false; while ((line = reader.readLine()) != null) { words = line.split("\\s+"); lineHasColon = words.length > 1 && words[words.length - 2].endsWith(":") && ipCharsRE.matcher(words[words.length - 1]).matches(); if (foundColon && !lineHasColon) { if (!(words.length == 2 && words[0].equals(""))) { foundColon = false; } } if (lineHasColon || foundColon) { try { addr = InetAddress.getByName(words[words.length - 1]); if (!addresses.contains(addr)) { addresses.add(addr); } } catch (UnknownHostException e) { /* Bad address. Move along */ } } if (lineHasColon) { foundColon = true; } } } catch (IOException e) { /* Command not found, bad arguments. etc. Move along. */ } } return addresses.toArray(new InetAddress[addresses.size()]); } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/websocket/0000755000175000017500000000000015001473074022527 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-java/net/dnsviz/websocket/WebSocketClient.java0000644000175000017500000002117715001472663026432 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ package net.dnsviz.websocket; import java.io.IOException; import java.math.BigInteger; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Iterator; import java.util.LinkedList; import java.util.Random; import net.dnsviz.util.Base64Encoder; public class WebSocketClient { final static protected String WEBSOCKET_GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; final static protected int WEBSOCKET_VERSION = 13; protected SocketChannel channel = null; protected ByteBuffer buffer = null; public WebSocketClient(String host, int port, String path, String origin) throws IOException { channel = SocketChannel.open(); channel.connect(new InetSocketAddress(InetAddress.getByName(host), port)); buffer = ByteBuffer.allocate(8192); String clientKey = keyForClient(); String serverKey = keyForServer(clientKey); sendRequestHeaders(path, host, origin, clientKey); getResponseHeaders(serverKey); } public void close() throws IOException { channel.close(); } protected String keyForServer(String clientKey) { try { MessageDigest md = MessageDigest.getInstance("SHA"); byte [] b = md.digest((clientKey + WEBSOCKET_GUID).getBytes()); return new String(new Base64Encoder().encode(b)); } catch (NoSuchAlgorithmException ex) { return ""; } } protected String keyForClient() { BigInteger n = new BigInteger(64, new Random()); return new String(new Base64Encoder().encode(n.toByteArray())); } protected void sendRequestHeaders(String path, String host, String origin, String clientKey) throws IOException { String headers = "GET " + path + " HTTP/1.1\r\n" + "Host: " + host + "\r\n" + "Upgrade: websocket\r\n" + "Connection: Upgrade\r\n" + "Sec-WebSocket-Key: " + clientKey + "\r\n" + "Origin: " + origin + "\r\n" + "Sec-WebSocket-Version: " + WEBSOCKET_VERSION + "\r\n\r\n"; ByteBuffer buf = ByteBuffer.allocate(headers.length()); buf.put(headers.getBytes()); buf.flip(); channel.write(buf); } protected void getResponseHeaders(String serverKey) throws IOException { String headers = ""; String [] headerLines; ByteBuffer buf; byte [] bytes; int endOfHeadersIndex = -1; int endOfName = -1; boolean upgradeFound = false; boolean connectionFound = false; boolean acceptFound = false; buf = ByteBuffer.allocate(2048); while (true) { channel.read(buf); bytes = new byte [buf.position()]; buf.rewind(); buf.get(bytes); headers += new String(bytes); buf.clear(); endOfHeadersIndex = headers.indexOf("\r\n\r\n"); if (endOfHeadersIndex >= 0) { endOfHeadersIndex += 4; break; } endOfHeadersIndex = headers.indexOf("\n\n"); if (endOfHeadersIndex >= 0) { endOfHeadersIndex += 2; break; } endOfHeadersIndex = headers.indexOf("\r\r"); if (endOfHeadersIndex >= 0) { endOfHeadersIndex += 2; break; } } // put any buffered data beyond headers into buffer if (headers.length() > endOfHeadersIndex) { buffer.put(headers.substring(endOfHeadersIndex).getBytes()); } headerLines = headers.split("\r\n|\n|\r"); if (!headerLines[0].matches("HTTP/\\d\\.\\d+ 101 .*")) { throw new IOException("Invalid status response: " + headerLines[0]); } for (int i = 1; i < headerLines.length; i++) { if (headerLines[i].matches("Upgrade:\\s*websocket\\s*")) { upgradeFound = true; } else if (headerLines[i].matches("Connection:\\s*Upgrade\\s*")) { connectionFound = true; } else if (headerLines[i].matches("Sec-WebSocket-Accept:\\s*" + serverKey.replaceAll("\\+", "\\\\+") + "\\s*")) { acceptFound = true; } } if (!upgradeFound) { throw new IOException("Invalid response: Upgrade header not found"); } if (!connectionFound) { throw new IOException("Invalid response: Connection header not found"); } if (!acceptFound) { throw new IOException("Invalid response: Sec-WebSocket-Accept header not found or key not correct."); } } public byte [] read() throws IOException { ByteBuffer buf = ByteBuffer.allocate(2048); int byte0; int byte1; int byte1b; int headerLen; long frameLen = -1; boolean hasMore = true; LinkedList frames = new LinkedList(); byte [] frame; byte [] message; long totalLength = 0; int index = 0; // first read any content from buffer if (buffer.position() > 0) { buffer.flip(); buf.put(buffer); buffer.clear(); } while (hasMore) { while (buf.position() < 2) { channel.read(buf); } byte0 = buf.get(0) & 0xff; byte1 = buf.get(1) & 0xff; byte1b = byte1 & 0x7f; // mask must not be set if ((byte1 & 0x80) != 0) { throw new IOException("Mask is set in frame"); } // check whether FIN flag is set or not hasMore = (byte0 & 0x80) == 0; // determine the header length if (byte1b <= 125) { headerLen = 2; } else if (byte1b == 126) { headerLen = 4; } else { // byte1b == 127: headerLen = 10; } while (buf.position() < headerLen) { channel.read(buf); } if (byte1b <= 125) { frameLen = byte1b; } else if (byte1b == 126) { frameLen = ((buf.get(2) & 0xff) << 8) | (buf.get(3) & 0xff); } else if (byte1b == 127) { frameLen = (buf.getLong(2)); } totalLength += frameLen; // put any leftover content from buf into buffer if (buf.position() >= headerLen) { buf.flip().position(headerLen); buffer.put(buf); } if (frameLen > 0x7fffffff) { throw new IOException("Frame size too big for buffer"); } // allocate a buffer that is for the whole frame, or the size of the // previous buffer, whichever is greater buf = ByteBuffer.allocate(Math.max((int)frameLen, buffer.position())); // fill buf with content from buffer first if (buffer.position() > 0) { buffer.flip(); buf.put(buffer); buffer.clear(); } while (buf.hasRemaining()) { channel.read(buf); } // create a byte array with the bytes frame = new byte[(int)frameLen]; buf.flip(); buf.get(frame, 0, (int)frameLen); frames.add(frame); // if there is any content remaining if buf, put it into buffer, so it // will persist if (buf.hasRemaining()) { buffer.put(buf); } } if (totalLength > 0x7fffffff) { throw new IOException("Total message size too big for array"); } message = new byte[(int)totalLength]; Iterator iterator = frames.iterator(); while (iterator.hasNext()) { frame = iterator.next(); System.arraycopy(frame, 0, message, index, frame.length); index += frame.length; } return message; } public void write(byte [] data) throws IOException { ByteBuffer buf = null; int headerLen; byte [] mask; if (data.length <= 125) { headerLen = 6; } else if (data.length <= 0xffff) { headerLen = 8; } else { // 0xffff < data.length <= 2^63 headerLen = 14; } buf = ByteBuffer.allocate(headerLen + data.length); buf.put((byte)(0x81)); if (data.length <= 125) { buf.put((byte)(data.length | 0x80)); } else if (data.length <= 0xffff) { buf.put((byte)(126 | 0x80)); buf.put((byte)((data.length >> 8) & 0xff)); buf.put((byte)(data.length & 0xff)); } else { // 0xffff < data.length <= 2^63 buf.put((byte)(127 | 0x80)); buf.put((byte)0); buf.put((byte)0); buf.put((byte)0); buf.put((byte)0); buf.put((byte)((data.length >> 24) & 0xff)); buf.put((byte)((data.length >> 16) & 0xff)); buf.put((byte)((data.length >> 8) & 0xff)); buf.put((byte)(data.length & 0xff)); } mask = new byte [4]; BigInteger n = new BigInteger(32, new Random()); System.arraycopy(n.toByteArray(), 0, mask, 0, 4); buf.put(mask); for (int i = 0; i < data.length; i++) { buf.put((byte)(mask[i % mask.length]^data[i])); } buf.flip(); while (buf.hasRemaining()) { channel.write(buf); } } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg-ws.js0000644000175000017500000001414515001472663016514 0ustar00caseycasey/* * This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, * analysis, and visualization. * Created by Casey Deccio (casey@deccio.net) * * Copyright 2016 VeriSign, Inc. * * DNSViz is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * DNSViz is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with DNSViz. If not, see . */ var ORIGINS = new Array(); var WEBSOCKET_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'; var WEBSOCKET_VERSION = 13; var PROTOCOL = 'dns-looking-glass'; function setupSocket(webSocket, filename) { var crypto = require('crypto'); var fs = require('fs'); var net = require('net'); var path = require('path'); var os = require('os'); // Create a (hopefully) unique filename for the UNIX domain socket var sockname; var sha1 = crypto.createHash('sha1'); sha1.update(filename, 'ascii'); sockname = path.join(os.tmpdir(), sha1.digest('hex')); var srv = net.createServer(); srv.on('connection', function(socket) { // if there are any errors with the new connection, // then write them to the console before closing. socket.on('error', function(e) { console.error('Socket ' + e.toString()); }); // once connected, send UNIX domain socket data // to webSocket, and vice-versa socket.on('data', function(data) { webSocket.write(data); }); // (use function for this one, so it can be removed later) var sendDataToSocket = function(data) { socket.write(data); }; webSocket.on('data', sendDataToSocket); // when the socket is closed, don't send data from // the webSocket to it anymore socket.on('close', function() { webSocket.removeListener('data', sendDataToSocket); }); }); // if there is an error on either the webSocket or the listening socket // then report it to the console srv.on('error', function(e) { console.error('Listen Socket ' + e.toString()); webSocket.end(); }); webSocket.on('error', function(e) { console.error('WebSocket ' + e.toString()); srv.close(); }); // when the Web client closes its end of the webSocket, close the server webSocket.on('end', function() { srv.close(); }); srv.listen(sockname, function(e) { fs.chmod(sockname, 0660); }); } function checkHeaders(key, origin, version, protocols) { var msg = ''; if (key == null) { return { code: 400, msg: 'Bad Request', extraHeader: '', content: 'Key not found' }; } if (origin == null) { return { code: 400, msg: 'Bad Request', extraHeader: '', content: 'Origin not found' }; } var origin_match = false; for (var i = 0; i < ORIGINS.length; i++) { if (origin == ORIGINS[i]) { origin_match = true; break; } } if (!origin_match) { return { code: 403, msg: 'Forbidden', extraHeader: '', content: 'Invalid origin' }; } if (version == null) { return { code: 400, msg: 'Bad Request', extraHeader: '', content: 'Version not found' }; } if (version != WEBSOCKET_VERSION) { return { code: 426, msg: 'Upgrade Required', extraHeader: 'Sec-WebSocket-Version: ' + WEBSOCKET_VERSION + '\r\n', content: 'Unsupported version' }; } /*TODO protocols*/ return null; } function handleUpgrade(req, socket, head) { var key = req.headers['sec-websocket-key']; var origin = req.headers['origin']; var version = req.headers['sec-websocket-version']; var protocols = req.headers['sec-websocket-protocol']; var error = checkHeaders(key, origin, version, protocols); if (error != null) { var errorResponse = 'HTTP/1.1 ' + error.code + ' ' + error.msg + '\r\n' + error.extraHeader + 'Content-Length: ' + error.content.length + '\r\n\r\n' + error.content; socket.write(errorResponse, function() { socket.end(); console.log(new Date() + ' ' + req.method + ' ' + req.url + ' ' + error.code + ' ' + error.msg + ' (' + error.content + ')'); }); return; } var crypto = require('crypto'); var sha1 = crypto.createHash('sha1'); sha1.update(key + WEBSOCKET_GUID, 'ascii'); var successResponse = 'HTTP/1.1 101 Switching Protocols\r\n' + 'Upgrade: websocket\r\n' + 'Connection: Upgrade\r\n' + 'Sec-WebSocket-Accept: ' + sha1.digest('base64') + '\r\n\r\n'; var qs = require('url').parse(req.url, true); socket.write(successResponse, function() { setupSocket(socket, qs.query.fn); console.log(new Date() + ' ' + req.method + ' ' + req.url + ' 101 upgraded'); }); } function usage() { console.error('Usage: ' + process.argv[0] + ' ' + process.argv[1] + ' ip:port[,ip:port...] [ origin[,origin...] ]'); } function main() { if (process.argv.length < 3) { usage(); process.exit(1); } var ips = process.argv[2].split(","); if (process.argv.length > 3) { var origins = process.argv[3].split(","); for (var i = 0; i < origins.length; i++) { ORIGINS.push(origins[i]); } } // Create an HTTP server var http = require('http'); var srv = http.createServer(); var repr = new Array(); srv.on('upgrade', handleUpgrade); srv.on('error', function(e) { console.error(e.toString()); srv.close(); }); srv.on('listening', function() { var all_repr = repr.join(", "); console.log(new Date() + ' Listening for connections on ' + all_repr); }); for (var i = 0; i < ips.length; i++) { var ip_port = ips[i].split(':'); if (ip_port.length < 2) { usage(); process.exit(1); } var host = ip_port.slice(0, ip_port.length - 1).join(':'); var port = ip_port[ip_port.length - 1]; if (host[0] == "[" && host[host.length - 1] == "]") { host = host.slice(1, host.length - 1); } if (host.indexOf(":") >= 0) { repr.push("[" + host + "]:" + port); } else { repr.push(host + ":" + port); } ORIGINS.push('http://' + repr[i]); ORIGINS.push('https://' + repr[i]); srv.listen(port, host); } } main(); ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/contrib/dnsviz-lg.cgi0000755000175000017500000001713615001472663016221 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2015-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import json import os import re import struct import sys # python3/python2 dual compatibility try: import queue except ImportError: import Queue as queue from dnsviz.ipaddr import * from dnsviz import transport FALSE_RE = re.compile(r'^(0|f(alse)?)?$', re.IGNORECASE) try: MAX_QUERIES = int(os.environ.get('MAX_QUERIES', 200)) except ValueError: MAX_QUERIES = 200 ALLOW_PRIVATE_QUERY = not bool(FALSE_RE.search(os.environ.get('ALLOW_PRIVATE_QUERY', 'f'))) ALLOW_LOOPBACK_QUERY = not bool(FALSE_RE.search(os.environ.get('ALLOW_LOOPBACK_QUERY', 'f'))) BLACKLIST_FILE = os.environ.get('BLACKLIST_FILE', None) WHITELIST_FILE = os.environ.get('WHITELIST_FILE', None) blacklist = None whitelist = None class RemoteQueryError(Exception): pass class InvalidName(Exception): pass def check_dst(dst): # check for local addresses if not ALLOW_PRIVATE_QUERY and (RFC_1918_RE.search(dst) is not None or \ LINK_LOCAL_RE.search(dst) is not None or \ UNIQ_LOCAL_RE.search(dst) is not None): raise RemoteQueryError('Issuing queries to %s not allowed' % dst) if not ALLOW_LOOPBACK_QUERY and (LOOPBACK_IPV4_RE.search(dst) is not None or \ dst == LOOPBACK_IPV6): raise RemoteQueryError('Issuing queries to %s not allowed' % dst) def get_qname(msg): n = '' index = 12 labels = [] while True: # no label if index >= len(msg): raise InvalidName() # python3/python2 dual compatibility if isinstance(msg, str): l = struct.unpack(b'!B', msg[index])[0] else: l = msg[index] # no compression allowed in question if l & 0xc0: raise InvalidName() # account for label length index += 1 # not enough message for label if index + l > len(msg): raise InvalidName() # zero labels - this is the end if l == 0: break # append label to list labels.append(msg[index:index + l].decode('latin1')) index += l return '.'.join(labels) + '.' def import_blacklist(): global blacklist global whitelist blacklist = set() whitelist = set() if BLACKLIST_FILE is None: return with open(BLACKLIST_FILE, 'r') as fh: for line in fh: name = line.rstrip().lower() if not name.endswith('.'): name += '.' blacklist.add(name) if WHITELIST_FILE is None: return with open(WHITELIST_FILE, 'r') as fh: for line in fh: name = line.rstrip().lower() if not name.endswith('.'): name += '.' whitelist.add(name) def check_qname(msg): global blacklist global whitelist try: qname = get_qname(msg) except InvalidName: return if blacklist is None: import_blacklist() subdomain = qname.lower() while True: if subdomain in whitelist: return if subdomain in blacklist: raise RemoteQueryError('Querying %s not allowed' % qname) try: nextdot = subdomain.index('.') except ValueError: break else: subdomain = subdomain[nextdot+1:] def main(): try: if not os.environ.get('REQUEST_METHOD', None): os.environ['REQUEST_METHOD'] = 'POST' if os.environ['REQUEST_METHOD'] != 'POST': raise RemoteQueryError('Request method %s not supported' % os.environ['REQUEST_METHOD']) if os.environ['CONTENT_TYPE'] != 'application/json': raise RemoteQueryError('Incorrect content type: %s' % os.environ['CONTENT_TYPE']) response_queue = queue.Queue() queries_in_waiting = set() th_factory = transport.DNSQueryTransportHandlerDNSFactory() tm = transport.DNSQueryTransportManager() qtms = [] try: try: content_len = int(os.environ['CONTENT_LENGTH']) raw_content = sys.stdin.read(content_len) except (OSError, ValueError) as e: raise RemoteQueryError('Reading of input failed: %s' % str(e)) # load the json content try: content = json.loads(raw_content) except ValueError: raise RemoteQueryError('JSON decoding of HTTP request failed: %s' % raw_content) if 'version' not in content: raise RemoteQueryError('No version information in HTTP request.') try: major_vers, minor_vers = [int(x) for x in str(content['version']).split('.', 1)] except ValueError: raise RemoteQueryError('Version of JSON input in HTTP request is invalid: %s' % content['version']) # ensure major version is a match and minor version is no greater # than the current minor version curr_major_vers, curr_minor_vers = [int(x) for x in str(transport.DNS_TRANSPORT_VERSION).split('.', 1)] if major_vers != curr_major_vers or minor_vers > curr_minor_vers: raise RemoteQueryError('Version %d.%d of JSON input in HTTP request is incompatible with this software.' % (major_vers, minor_vers)) if 'requests' not in content: raise RemoteQueryError('No request information in HTTP request.') for i, qtm_serialized in enumerate(content['requests']): if i >= MAX_QUERIES: raise RemoteQueryError('Maximum requests exceeded.') try: qtm = transport.DNSQueryTransportMeta.deserialize_request(qtm_serialized) except transport.TransportMetaDeserializationError as e: raise RemoteQueryError('Error deserializing request information: %s' % e) check_dst(qtm.dst) check_qname(qtm.req) qtms.append(qtm) th = th_factory.build(processed_queue=response_queue) th.add_qtm(qtm) th.init_req() tm.handle_msg_nowait(th) queries_in_waiting.add(th) while queries_in_waiting: th = response_queue.get() th.finalize() queries_in_waiting.remove(th) finally: tm.close() ret = { 'version': transport.DNS_TRANSPORT_VERSION, 'responses': [qtm.serialize_response() for qtm in qtms], } except RemoteQueryError as e: ret = { 'version': transport.DNS_TRANSPORT_VERSION, 'error': str(e), } sys.stdout.write('Content-type: application/json\r\n\r\n') sys.stdout.write(json.dumps(ret)) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3905313 dnsviz-0.11.1/dnsviz/0000755000175000017500000000000015001473074013457 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/__init__.py0000644000175000017500000000067315001472663015601 0ustar00caseycaseytry: # compatibility with dnspython >= 2.7.0 from dns.edns import GenericOption, OptionType, register_type for o in list(OptionType): register_type(GenericOption, o) except ImportError: pass try: import importlib.metadata except ImportError: # compatibility with python < 3.8 class Placeholder(object): version = '' dist = Placeholder() else: dist = importlib.metadata.distribution(__name__) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3905313 dnsviz-0.11.1/dnsviz/analysis/0000755000175000017500000000000015001473074015302 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/analysis/__init__.py0000644000175000017500000000050715001472663017420 0ustar00caseycaseyfrom .online import COOKIE_STANDIN, WILDCARD_EXPLICIT_DELEGATION, Analyst, OnlineDomainNameAnalysis, PrivateAnalyst, RecursiveAnalyst, PrivateRecursiveAnalyst, NetworkConnectivityException, DNS_RAW_VERSION from .offline import OfflineDomainNameAnalysis, TTLAgnosticOfflineDomainNameAnalysis, deserialize, DNS_PROCESSED_VERSION ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/analysis/errors.py0000644000175000017500000031626015001472663017203 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2015-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import datetime # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # python3/python2 dual compatibility try: from html import escape except ImportError: from cgi import escape import dns.dnssec import dnsviz.format as fmt from dnsviz.util import tuple_to_dict class DomainNameAnalysisError(object): _abstract = True code = None description_template = '%(code)s' terse_description_template = '%(code)s' references = [] required_params = [] use_effective_query_tag = True def __init__(self, **kwargs): if self._abstract: raise TypeError('Only subclasses may be instantiated.') self.template_kwargs = { 'code': self.code } self.servers_clients = {} for param in self.required_params: try: self.template_kwargs[param] = kwargs[param] except KeyError: raise TypeError('The "%s" keyword argument is required for instantiation.' % param) def __hash__(self): return id(self) def __str__(self): return self.code def __eq__(self, other): return self.__class__ == other.__class__ and self.args == other.args def copy(self): return self.__class__(**dict(list(zip(self.required_params, self.args)))) @property def args(self): if not hasattr(self, '_args') or self._args is None: self._args = [self.template_kwargs[p] for p in self.required_params] return self._args @property def description(self): desc = self.description_template % self.template_kwargs if self.references: desc += ' See %s.' % (', '.join(self.references)) return desc @property def description_without_references(self): return self.description_template % self.template_kwargs @property def terse_description(self): return self.terse_description_template % self.template_kwargs @property def html_description(self): description_template_escaped = escape(self.description_template, True) template_kwargs_escaped = {} for n, v in self.template_kwargs.items(): if isinstance(v, int): template_kwargs_escaped[n] = v else: if isinstance(v, str): template_kwargs_escaped[n] = escape(v) else: template_kwargs_escaped[n] = escape(str(v)) desc = description_template_escaped % template_kwargs_escaped if self.references: desc += ' See %s.' % (', '.join(self.references)) return desc def add_server_client(self, server, client, response): if (server, client) not in self.servers_clients: self.servers_clients[(server, client)] = [] if response not in self.servers_clients[(server, client)]: self.servers_clients[(server, client)].append(response) def remove_server_client(self, server, client, response): if (server, client) in self.servers_clients: try: self.servers_clients[(server, client)].remove(response) except ValueError: pass else: if not self.servers_clients[(server, client)]: del self.servers_clients[(server, client)] def serialize(self, consolidate_clients=False, html_format=False): d = OrderedDict() if html_format: d['description'] = self.html_description else: d['description'] = self.description d['code'] = self.code if self.servers_clients: servers = tuple_to_dict(self.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers tags = set() for server,client in self.servers_clients: for response in self.servers_clients[(server,client)]: # some errors are not in conjunction with responses, per # se, only servers, in which case, the response value is # None. if response is not None: if self.use_effective_query_tag: tag = response.effective_query_tag() else: tag = response.initial_query_tag() tags.add(tag) if tags: d['query_options'] = list(tags) d['query_options'].sort() return d @classmethod def insert_into_list(cls, error, error_list, server, client, response): try: index = error_list.index(error) except ValueError: error_list.append(error) else: error = error_list[index] if server is not None and client is not None: error.add_server_client(server, client, response) return error class RRSIGError(DomainNameAnalysisError): pass class SignerNotZone(RRSIGError): ''' >>> e = SignerNotZone(zone_name='foo.', signer_name='bar.') >>> e.args ['foo.', 'bar.'] >>> e.description "The Signer's Name field of the RRSIG RR (bar.) does not match the name of the zone containing the RRset (foo.). See ..." ''' _abstract = False code = 'SIGNER_NOT_ZONE' description_template = "The Signer's Name field of the RRSIG RR (%(signer_name)s) does not match the name of the zone containing the RRset (%(zone_name)s)." references = ['RFC 4035, Sec. 5.3.1'] required_params = ['zone_name', 'signer_name'] class RRSIGLabelsExceedRRsetOwnerLabels(RRSIGError): ''' >>> e = RRSIGLabelsExceedRRsetOwnerLabels(rrsig_labels=2, rrset_owner_labels=1) >>> e.args [2, 1] >>> e.description 'The value of the labels field of the RRSIG RR (2) exceeds the number of labels in the RRset owner name (1). See ...' ''' _abstract = False code = 'RRSIG_LABELS_EXCEED_RRSET_OWNER_LABELS' description_template = 'The value of the labels field of the RRSIG RR (%(rrsig_labels)d) exceeds the number of labels in the RRset owner name (%(rrset_owner_labels)d).' references = ['RFC 4035, Sec. 5.3.1'] required_params = ['rrsig_labels', 'rrset_owner_labels'] class RRsetTTLMismatch(RRSIGError): ''' >>> e = RRsetTTLMismatch(rrset_ttl=50, rrsig_ttl=10) >>> e.args [50, 10] >>> e.description 'The TTL of the RRSIG RR (10) does not match the TTL of the RRset it covers (50). See ...' ''' _abstract = False code = 'RRSET_TTL_MISMATCH' description_template = 'The TTL of the RRSIG RR (%(rrsig_ttl)d) does not match the TTL of the RRset it covers (%(rrset_ttl)d).' references = ['RFC 4035, Sec. 2.2'] required_params = ['rrset_ttl', 'rrsig_ttl'] class OriginalTTLExceeded(RRSIGError): references = ['RFC 4035, Sec. 2.2'] class OriginalTTLExceededRRset(OriginalTTLExceeded): ''' >>> e = OriginalTTLExceededRRset(original_ttl=10, rrset_ttl=50) >>> e.args [10, 50] >>> e.description 'The TTL of the RRset (50) exceeds the value of the Original TTL field of the RRSIG RR covering it (10). See ...' ''' _abstract = False code = 'ORIGINAL_TTL_EXCEEDED_RRSET' description_template = 'The TTL of the RRset (%(rrset_ttl)d) exceeds the value of the Original TTL field of the RRSIG RR covering it (%(original_ttl)d).' required_params = ['original_ttl', 'rrset_ttl'] class OriginalTTLExceededRRSIG(OriginalTTLExceeded): ''' >>> e = OriginalTTLExceededRRSIG(original_ttl=10, rrsig_ttl=50) >>> e.args [10, 50] >>> e.description 'The TTL of the RRSIG (50) exceeds the value of its Original TTL field (10). See ...' ''' _abstract = False code = 'ORIGINAL_TTL_EXCEEDED_RRSIG' description_template = 'The TTL of the RRSIG (%(rrsig_ttl)d) exceeds the value of its Original TTL field (%(original_ttl)d).' required_params = ['original_ttl', 'rrsig_ttl'] class TTLBeyondExpiration(RRSIGError): ''' >>> e = TTLBeyondExpiration(expiration=datetime.datetime(2015,1,10), rrsig_ttl=86401, reference_time=datetime.datetime(2015,1,9)) >>> e.args [datetime.datetime(2015, 1, 10, 0, 0), 86401, datetime.datetime(2015, 1, 9, 0, 0)] >>> e.description 'With a TTL of 86401 the RRSIG RR can be in the cache of a non-validating resolver until 1 second after it expires at 2015-01-10 00:00:00. See ...' ''' _abstract = False code = 'TTL_BEYOND_EXPIRATION' description_template = "With a TTL of %(rrsig_ttl)d the RRSIG RR can be in the cache of a non-validating resolver until %(difference)s after it expires at %(expiration)s." references = ['RFC 4035, Sec. 5.3.3'] required_params = ['expiration', 'rrsig_ttl', 'reference_time'] def __init__(self, **kwargs): super(TTLBeyondExpiration, self).__init__(**kwargs) diff = self.template_kwargs['reference_time'] + datetime.timedelta(seconds=self.template_kwargs['rrsig_ttl']) - self.template_kwargs['expiration'] self.template_kwargs['difference'] = fmt.humanize_time(diff.seconds, diff.days) class AlgorithmNotSupported(RRSIGError): ''' >>> e = AlgorithmNotSupported(algorithm=5) >>> e.args [5] >>> e.description 'Validation of DNSSEC algorithm 5 (RSASHA1) is not supported by this code, so the cryptographic status of this RRSIG is unknown. See ...' ''' _abstract = False code = 'ALGORITHM_NOT_SUPPORTED' description_template = "Validation of DNSSEC algorithm %(algorithm)d (%(algorithm_text)s) is not supported by this code, so the cryptographic status of this RRSIG is unknown." references = ['RFC 4035, Sec. 5.2'] required_params = ['algorithm'] def __init__(self, **kwargs): super(AlgorithmNotSupported, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = dns.dnssec.algorithm_to_text(self.template_kwargs['algorithm']) class AlgorithmValidationProhibited(RRSIGError): ''' >>> e = AlgorithmValidationProhibited(algorithm=5) >>> e.args [5] >>> e.description 'DNSSEC implementers are prohibited from implementing validation of DNSSEC algorithm 5 (RSASHA1). See ...' ''' _abstract = False code = 'ALGORITHM_VALIDATION_PROHIBITED' description_template = "DNSSEC implementers are prohibited from implementing validation of DNSSEC algorithm %(algorithm)d (%(algorithm_text)s)." references = ['RFC 8624, Sec. 3.1'] required_params = ['algorithm'] def __init__(self, **kwargs): super(AlgorithmValidationProhibited, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = dns.dnssec.algorithm_to_text(self.template_kwargs['algorithm']) class AlgorithmProhibited(RRSIGError): ''' >>> e = AlgorithmProhibited(algorithm=5) >>> e.args [5] >>> e.description 'DNSSEC implementers are prohibited from implementing signing with DNSSEC algorithm 5 (RSASHA1). See ...' ''' _abstract = False code = 'ALGORITHM_PROHIBITED' description_template = 'DNSSEC implementers are prohibited from implementing signing with DNSSEC algorithm %(algorithm)d (%(algorithm_text)s).' references = ['RFC 8624, Sec. 3.1'] required_params = ['algorithm'] def __init__(self, **kwargs): super(AlgorithmProhibited, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = dns.dnssec.algorithm_to_text(self.template_kwargs['algorithm']) class AlgorithmNotRecommended(RRSIGError): ''' >>> e = AlgorithmNotRecommended(algorithm=5) >>> e.args [5] >>> e.description 'DNSSEC implementers are recommended against implementing signing with DNSSEC algorithm 5 (RSASHA1). See ...' ''' _abstract = False code = 'ALGORITHM_NOT_RECOMMENDED' description_template = "DNSSEC implementers are recommended against implementing signing with DNSSEC algorithm %(algorithm)d (%(algorithm_text)s)." references = ['RFC 8624, Sec. 3.1'] required_params = ['algorithm'] def __init__(self, **kwargs): super(AlgorithmNotRecommended, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = dns.dnssec.algorithm_to_text(self.template_kwargs['algorithm']) class DNSKEYRevokedRRSIG(RRSIGError): ''' >>> e = DNSKEYRevokedRRSIG() >>> e.description 'The DNSKEY RR corresponding to the RRSIG RR has the REVOKE bit set. A revoked key cannot be used to validate RRSIGs. See ...' ''' _abstract = False code = 'DNSKEY_REVOKED_RRSIG' description_template = "The DNSKEY RR corresponding to the RRSIG RR has the REVOKE bit set. A revoked key cannot be used to validate RRSIGs." references = ['RFC 5011, Sec. 2.1'] required_params = [] class InceptionInFuture(RRSIGError): ''' >>> e = InceptionInFuture(inception=datetime.datetime(2015,1,10), reference_time=datetime.datetime(2015,1,9)) >>> e.args [datetime.datetime(2015, 1, 10, 0, 0), datetime.datetime(2015, 1, 9, 0, 0)] >>> e.description 'The Signature Inception field of the RRSIG RR (2015-01-10 00:00:00) is 1 day in the future. See ...' ''' _abstract = False code = 'INCEPTION_IN_FUTURE' description_template = "The Signature Inception field of the RRSIG RR (%(inception)s) is %(premature_time)s in the future." references = ['RFC 4035, Sec. 5.3.1'] required_params = ['inception', 'reference_time'] def __init__(self, **kwargs): super(InceptionInFuture, self).__init__(**kwargs) diff = self.template_kwargs['inception'] - self.template_kwargs['reference_time'] self.template_kwargs['premature_time'] = fmt.humanize_time(diff.seconds, diff.days) class ExpirationInPast(RRSIGError): ''' >>> e = ExpirationInPast(expiration=datetime.datetime(2015,1,10), reference_time=datetime.datetime(2015,1,11)) >>> e.args [datetime.datetime(2015, 1, 10, 0, 0), datetime.datetime(2015, 1, 11, 0, 0)] >>> e.description 'The Signature Expiration field of the RRSIG RR (2015-01-10 00:00:00) is 1 day in the past. See ...' ''' _abstract = False code = 'EXPIRATION_IN_PAST' description_template = "The Signature Expiration field of the RRSIG RR (%(expiration)s) is %(expired_time)s in the past." references = ['RFC 4035, Sec. 5.3.1'] required_params = ['expiration', 'reference_time'] def __init__(self, **kwargs): super(ExpirationInPast, self).__init__(**kwargs) diff = self.template_kwargs['reference_time'] - self.template_kwargs['expiration'] self.template_kwargs['expired_time'] = fmt.humanize_time(diff.seconds, diff.days) class InceptionWithinClockSkew(RRSIGError): ''' >>> e = InceptionWithinClockSkew(inception=datetime.datetime(2015,1,10,0,0,0), reference_time=datetime.datetime(2015,1,10,0,0,1)) >>> e.description 'The value of the Signature Inception field of the RRSIG RR (2015-01-10 00:00:00) is within possible clock skew range (1 second) of the current time (2015-01-10 00:00:01). See ...' ''' _abstract = False code = 'INCEPTION_WITHIN_CLOCK_SKEW' description_template = "The value of the Signature Inception field of the RRSIG RR (%(inception)s) is within possible clock skew range (%(difference)s) of the current time (%(reference_time)s)." references = ['RFC 4035, Sec. 5.3.1'] required_params = ['inception', 'reference_time'] def __init__(self, **kwargs): super(InceptionWithinClockSkew, self).__init__(**kwargs) diff = self.template_kwargs['reference_time'] - self.template_kwargs['inception'] self.template_kwargs['difference'] = fmt.humanize_time(diff.seconds, diff.days) class ExpirationWithinClockSkew(RRSIGError): ''' >>> e = ExpirationWithinClockSkew(expiration=datetime.datetime(2015,1,10,0,0,1), reference_time=datetime.datetime(2015,1,10,0,0,0)) >>> e.description 'The value of the Signature Expiration field of the RRSIG RR (2015-01-10 00:00:01) is within possible clock skew range (1 second) of the current time (2015-01-10 00:00:00). See ...' ''' _abstract = False code = 'EXPIRATION_WITHIN_CLOCK_SKEW' description_template = "The value of the Signature Expiration field of the RRSIG RR (%(expiration)s) is within possible clock skew range (%(difference)s) of the current time (%(reference_time)s)." references = ['RFC 4035, Sec. 5.3.1'] required_params = ['expiration', 'reference_time'] def __init__(self, **kwargs): super(ExpirationWithinClockSkew, self).__init__(**kwargs) diff = self.template_kwargs['expiration'] - self.template_kwargs['reference_time'] self.template_kwargs['difference'] = fmt.humanize_time(diff.seconds, diff.days) class SignatureInvalid(RRSIGError): ''' >>> e = SignatureInvalid() >>> e.description 'The cryptographic signature of the RRSIG RR does not properly validate. See ...' ''' _abstract = False code = 'SIGNATURE_INVALID' description_template = "The cryptographic signature of the RRSIG RR does not properly validate." references = ['RFC 4035, Sec. 5.3.3'] required_params = [] class RRSIGBadLength(RRSIGError): pass class RRSIGBadLengthGOST(RRSIGBadLength): ''' >>> e = RRSIGBadLengthGOST(length=500) >>> e.description 'The length of the signature is 500 bits, but a GOST signature (DNSSEC algorithm 12) must be 512 bits long. See ...' ''' _abstract = False description_template = 'The length of the signature is %(length)d bits, but a GOST signature (DNSSEC algorithm 12) must be 512 bits long.' code = 'RRSIG_BAD_LENGTH_GOST' references = ['RFC 5933, Sec. 5.2'] required_params = ['length'] class RRSIGBadLengthECDSA(RRSIGBadLength): curve = None algorithm = None correct_length = None description_template = 'The length of the signature is %(length)d bits, but an ECDSA signature made with Curve %(curve)s (DNSSEC algorithm %(algorithm)d) must be %(correct_length)d bits long.' references = ['RFC 6605, Sec. 4'] required_params = ['length'] def __init__(self, **kwargs): super(RRSIGBadLengthECDSA, self).__init__(**kwargs) self.template_kwargs['curve'] = self.curve self.template_kwargs['algorithm'] = self.algorithm self.template_kwargs['correct_length'] = self.correct_length class RRSIGBadLengthECDSA256(RRSIGBadLengthECDSA): ''' >>> e = RRSIGBadLengthECDSA256(length=500) >>> e.description 'The length of the signature is 500 bits, but an ECDSA signature made with Curve P-256 (DNSSEC algorithm 13) must be 512 bits long. See ...' ''' curve = 'P-256' algorithm = 13 correct_length = 512 _abstract = False code = 'RRSIG_BAD_LENGTH_ECDSA256' class RRSIGBadLengthECDSA384(RRSIGBadLengthECDSA): ''' >>> e = RRSIGBadLengthECDSA384(length=500) >>> e.description 'The length of the signature is 500 bits, but an ECDSA signature made with Curve P-384 (DNSSEC algorithm 14) must be 768 bits long. See ...' ''' curve = 'P-384' algorithm = 14 correct_length = 768 _abstract = False code = 'RRSIG_BAD_LENGTH_ECDSA384' class RRSIGBadLengthEdDSA(RRSIGBadLength): curve = None algorithm = None correct_length = None description_template = 'The length of the signature is %(length)d bits, but an %(curve)s signature (DNSSEC algorithm %(algorithm)d) must be %(correct_length)d bits long.' references = ['RFC 8080, Sec. 4'] required_params = ['length'] def __init__(self, **kwargs): super(RRSIGBadLengthEdDSA, self).__init__(**kwargs) self.template_kwargs['curve'] = self.curve self.template_kwargs['algorithm'] = self.algorithm self.template_kwargs['correct_length'] = self.correct_length class RRSIGBadLengthEd25519(RRSIGBadLengthEdDSA): ''' >>> e = RRSIGBadLengthEd25519(length=500) >>> e.description 'The length of the signature is 500 bits, but an Ed25519 signature (DNSSEC algorithm 15) must be 512 bits long. See ...' ''' curve = 'Ed25519' algorithm = 15 correct_length = 512 _abstract = False code = 'RRSIG_BAD_LENGTH_ED25519' class RRSIGBadLengthEd448(RRSIGBadLengthEdDSA): ''' >>> e = RRSIGBadLengthEd448(length=500) >>> e.description 'The length of the signature is 500 bits, but an Ed448 signature (DNSSEC algorithm 16) must be 912 bits long. See ...' ''' curve = 'Ed448' algorithm = 16 correct_length = 912 _abstract = False code = 'RRSIG_BAD_LENGTH_ED448' class DSError(DomainNameAnalysisError): pass class ReferralForDSQuery(DSError): ''' >>> e = ReferralForDSQuery(parent='baz.') >>> e.description 'The server(s) for the parent zone (baz.) responded with a referral instead of answering authoritatively for the DS RR type. See ...' ''' _abstract = False code = 'REFERRAL_FOR_DS_QUERY' description_template = 'The server(s) for the parent zone (%(parent)s) responded with a referral instead of answering authoritatively for the DS RR type.' references = ['RFC 4034, Sec. 5'] required_params = ['parent'] class DSDigestAlgorithmIgnored(DSError): ''' >>> e = DSDigestAlgorithmIgnored(algorithm=1, new_algorithm=2) >>> e.description 'DS records with digest type 1 (SHA-1) are ignored when DS records with digest type 2 (SHA-256) exist in the same RRset. See ...' ''' _abstract = False code = 'DS_DIGEST_ALGORITHM_IGNORED' description_template = "DS records with digest type %(algorithm)d (%(algorithm_text)s) are ignored when DS records with digest type %(new_algorithm)d (%(new_algorithm_text)s) exist in the same RRset." references = ['RFC 4509, Sec. 3'] required_params = ['algorithm', 'new_algorithm'] def __init__(self, **kwargs): super(DSDigestAlgorithmIgnored, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['algorithm'], str(self.template_kwargs['algorithm'])) self.template_kwargs['new_algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['new_algorithm'], str(self.template_kwargs['algorithm'])) class DSDigestAlgorithmMaybeIgnored(DSError): ''' >>> e = DSDigestAlgorithmMaybeIgnored(algorithm=1, new_algorithm=2) >>> e.description 'In the spirit of RFC 4509, DS records with digest type 1 (SHA-1) might be ignored when DS records with digest type 2 (SHA-256) exist in the same RRset. See ...' ''' _abstract = False code = 'DS_DIGEST_ALGORITHM_MAYBE_IGNORED' description_template = "In the spirit of RFC 4509, DS records with digest type %(algorithm)d (%(algorithm_text)s) might be ignored when DS records with digest type %(new_algorithm)d (%(new_algorithm_text)s) exist in the same RRset." references = ['RFC 4509, Sec. 3'] required_params = ['algorithm', 'new_algorithm'] def __init__(self, **kwargs): super(DSDigestAlgorithmMaybeIgnored, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['algorithm'], str(self.template_kwargs['algorithm'])) self.template_kwargs['new_algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['new_algorithm'], str(self.template_kwargs['algorithm'])) class DSDigestError(DSError): pass class DigestAlgorithmNotSupported(DSDigestError): ''' >>> e = DigestAlgorithmNotSupported(algorithm=5) >>> e.description 'Generating cryptographic hashes using algorithm 5 (5) is not supported by this code, so the cryptographic status of the DS RR is unknown. See ...' ''' _abstract = False code = 'DIGEST_ALGORITHM_NOT_SUPPORTED' description_template = "Generating cryptographic hashes using algorithm %(algorithm)d (%(algorithm_text)s) is not supported by this code, so the cryptographic status of the DS RR is unknown." references = ['RFC 4035, Sec. 5.2'] required_params = ['algorithm'] def __init__(self, **kwargs): super(DigestAlgorithmNotSupported, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['algorithm'], self.template_kwargs['algorithm']) class DigestAlgorithmValidationProhibited(DSDigestError): ''' >>> e = DigestAlgorithmValidationProhibited(algorithm=1) >>> e.description 'DNSSEC implementers are prohibited from implementing validation of DS algorithm 1 (SHA-1). See ...' ''' _abstract = False code = 'DIGEST_ALGORITHM_VALIDATION_PROHIBITED' description_template = "DNSSEC implementers are prohibited from implementing validation of DS algorithm %(algorithm)d (%(algorithm_text)s)." references = ['RFC 8624, Sec. 3.2'] required_params = ['algorithm'] def __init__(self, **kwargs): super(DigestAlgorithmValidationProhibited, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['algorithm'], self.template_kwargs['algorithm']) class DigestAlgorithmProhibited(DSDigestError): ''' >>> e = DigestAlgorithmProhibited(algorithm=1) >>> e.description 'DNSSEC implementers are prohibited from implementing signing with DS algorithm 1 (SHA-1). See ...' ''' _abstract = False code = 'DIGEST_ALGORITHM_PROHIBITED' description_template = "DNSSEC implementers are prohibited from implementing signing with DS algorithm %(algorithm)d (%(algorithm_text)s)." references = ['RFC 8624, Sec. 3.2'] required_params = ['algorithm'] def __init__(self, **kwargs): super(DigestAlgorithmProhibited, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['algorithm'], self.template_kwargs['algorithm']) class DigestAlgorithmNotRecommended(DSDigestError): ''' >>> e = DigestAlgorithmNotRecommended(algorithm=1) >>> e.description 'DNSSEC implementers are recommended against implementing signing with DS algorithm 1 (SHA-1). See ...' ''' _abstract = False code = 'DIGEST_ALGORITHM_NOT_RECOMMENDED' description_template = "DNSSEC implementers are recommended against implementing signing with DS algorithm %(algorithm)d (%(algorithm_text)s)." references = ['RFC 8624, Sec. 3.2'] required_params = ['algorithm'] def __init__(self, **kwargs): super(DigestAlgorithmNotRecommended, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = fmt.DS_DIGEST_TYPES.get(self.template_kwargs['algorithm'], self.template_kwargs['algorithm']) class DNSKEYRevokedDS(DSDigestError): ''' >>> e = DNSKEYRevokedDS() >>> e.description 'The DNSKEY RR corresponding to the DS RR has the REVOKE bit set. A revoked key cannot be used with DS records. See ...' ''' _abstract = False code = 'DNSKEY_REVOKED_DS' description_template = "The DNSKEY RR corresponding to the DS RR has the REVOKE bit set. A revoked key cannot be used with DS records." references = ['RFC 5011, Sec. 2.1'] required_params = [] class DigestInvalid(DSDigestError): ''' >>> e = DigestInvalid() >>> e.description 'The cryptographic hash in the Digest field of the DS RR does not match the computed value. See ...' ''' _abstract = False code = 'DIGEST_INVALID' description_template = "The cryptographic hash in the Digest field of the DS RR does not match the computed value." references = ['RFC 4035, Sec. 5.2'] required_params = [] class NSECError(DomainNameAnalysisError): nsec_type = None def __init__(self, *args, **kwargs): super(NSECError, self).__init__(**kwargs) self.template_kwargs['nsec_type'] = self.nsec_type class SnameNotCovered(NSECError): code = 'SNAME_NOT_COVERED' description_template = "No %(nsec_type)s RR covers the SNAME (%(sname)s)." required_params = ['sname'] nsec_type = 'NSEC' class SnameNotCoveredNameError(SnameNotCovered): ''' >>> e = SnameNotCoveredNameError(sname='foo.baz.') >>> e.description 'No NSEC RR covers the SNAME (foo.baz.). See ...' ''' _abstract = False references = ['RFC 4035, Sec. 3.1.3.2'] class SnameNotCoveredWildcardAnswer(SnameNotCovered): _abstract = False references = ['RFC 4035, Sec. 3.1.3.3'] class NextClosestEncloserNotCovered(NSECError): code = 'NEXT_CLOSEST_ENCLOSER_NOT_COVERED' description_template = "No %(nsec_type)s RR covers the next closest encloser (%(next_closest_encloser)s)." required_params = ['next_closest_encloser'] nsec_type = 'NSEC3' class NextClosestEncloserNotCoveredNameError(NextClosestEncloserNotCovered): ''' >>> e = NextClosestEncloserNotCoveredNameError(next_closest_encloser='foo.baz.') >>> e.description 'No NSEC3 RR covers the next closest encloser (foo.baz.). See ...' ''' _abstract = False references = ['RFC 5155, Sec. 8.4'] class NextClosestEncloserNotCoveredNODATADS(NextClosestEncloserNotCovered): _abstract = False references = ['RFC 5155, Sec. 8.6'] class NextClosestEncloserNotCoveredWildcardNODATA(NextClosestEncloserNotCovered): _abstract = False references = ['RFC 5155, Sec. 8.7'] class NextClosestEncloserNotCoveredWildcardAnswer(NextClosestEncloserNotCovered): _abstract = False references = ['RFC 5155, Sec. 8.8'] class WildcardNotCovered(NSECError): code = 'WILDCARD_NOT_COVERED' description_template = "No %(nsec_type)s RR covers the wildcard (%(wildcard)s)." required_params = ['wildcard'] class WildcardNotCoveredNSEC(WildcardNotCovered): ''' >>> e = WildcardNotCoveredNSEC(wildcard='*.foo.baz.') >>> e.description 'No NSEC RR covers the wildcard (*.foo.baz.). See ...' ''' _abstract = False references = ['RFC 4035, Sec. 3.1.3.2'] nsec_type = 'NSEC' class WildcardNotCoveredNSEC3(WildcardNotCovered): _abstract = False references = ['RFC 5155, Sec. 8.4'] nsec_type = 'NSEC3' class NoClosestEncloser(NSECError): code = 'NO_CLOSEST_ENCLOSER' description_template = "No %(nsec_type)s RR corresponds to the closest encloser of the SNAME (%(sname)s)." required_params = ['sname'] nsec_type = 'NSEC3' class NoClosestEncloserNameError(NoClosestEncloser): ''' >>> e = NoClosestEncloserNameError(sname='foo.baz.') >>> e.description 'No NSEC3 RR corresponds to the closest encloser of the SNAME (foo.baz.). See ...' ''' _abstract = False references = ['RFC 5155, Sec. 8.4'] class NoClosestEncloserNODATADS(NoClosestEncloser): _abstract = False references = ['RFC 5155, Sec. 8.6'] class NoClosestEncloserWildcardNODATA(NoClosestEncloser): _abstract = False references = ['RFC 5155, Sec. 8.7'] class NoClosestEncloserWildcardAnswer(NoClosestEncloser): _abstract = False references = ['RFC 5155, Sec. 8.8'] class OptOutFlagNotSet(NSECError): code = 'OPT_OUT_FLAG_NOT_SET' description_template = "The opt-out flag was not set in the %(nsec_type)s RR covering the next closest encloser (%(next_closest_encloser)s) but was required for the NODATA response." required_params = ['next_closest_encloser'] nsec_type = 'NSEC3' class OptOutFlagNotSetNODATA(OptOutFlagNotSet): ''' >>> e = OptOutFlagNotSetNODATA(next_closest_encloser='foo.baz.') >>> e.description 'The opt-out flag was not set in the NSEC3 RR covering the next closest encloser (foo.baz.) but was required for the NODATA response. See ...' ''' _abstract = False references = ['RFC 5155, Sec. 8.5', 'RFC Errata 3441'] class OptOutFlagNotSetNODATADS(OptOutFlagNotSet): _abstract = False references = ['RFC 5155, Sec. 8.6'] class ReferralWithSOABit(NSECError): code = 'REFERRAL_WITH_SOA' description_template = "The SOA bit was set in the bitmap of the %(nsec_type)s RR corresponding to the delegated name (%(sname)s)." required_params = ['sname'] class ReferralWithSOABitNSEC(ReferralWithSOABit): ''' >>> e = ReferralWithSOABitNSEC(sname='foo.baz.') >>> e.description 'The SOA bit was set in the bitmap of the NSEC RR corresponding to the delegated name (foo.baz.). See ...' ''' _abstract = False references = ['RFC 4034, Sec. 5.2'] nsec_type = 'NSEC' class ReferralWithSOABitNSEC3(ReferralWithSOABit): _abstract = False references = ['RFC 5155, Sec. 8.9'] nsec_type = 'NSEC3' class ReferralWithDSBit(NSECError): code = 'REFERRAL_WITH_DS' description_template = "The DS bit was set in the bitmap of the %(nsec_type)s RR corresponding to the delegated name (%(sname)s)." required_params = ['sname'] class ReferralWithDSBitNSEC(ReferralWithDSBit): ''' >>> e = ReferralWithDSBitNSEC(sname='foo.baz.') >>> e.description 'The DS bit was set in the bitmap of the NSEC RR corresponding to the delegated name (foo.baz.). See ...' ''' _abstract = False references = ['RFC 4034, Sec. 5.2'] nsec_type = 'NSEC' class ReferralWithDSBitNSEC3(ReferralWithDSBit): _abstract = False references = ['RFC 5155, Sec. 8.9'] nsec_type = 'NSEC3' class ReferralWithoutNSBit(NSECError): code = 'REFERRAL_WITHOUT_NS' description_template = "The NS bit was not set in the bitmap of the %(nsec_type)s RR corresponding to the delegated name (%(sname)s)." required_params = ['sname'] class ReferralWithoutNSBitNSEC(ReferralWithoutNSBit): ''' >>> e = ReferralWithoutNSBitNSEC(sname='foo.baz.') >>> e.description 'The NS bit was not set in the bitmap of the NSEC RR corresponding to the delegated name (foo.baz.). See ...' ''' _abstract = False references = ['RFC 6840, Sec. 4.4'] nsec_type = 'NSEC' class ReferralWithoutNSBitNSEC3(ReferralWithoutNSBit): _abstract = False references = ['RFC 5155, Sec. 8.9'] nsec_type = 'NSEC3' class StypeInBitmap(NSECError): code = 'STYPE_IN_BITMAP' description_template = "The %(stype)s bit was set in the bitmap of the %(nsec_type)s RR corresponding to the SNAME (%(sname)s)." required_params = ['stype', 'sname'] class StypeInBitmapNODATA(StypeInBitmap): pass class StypeInBitmapNODATANSEC(StypeInBitmapNODATA): ''' >>> e = StypeInBitmapNODATANSEC(stype='A', sname='foo.baz.') >>> e.description 'The A bit was set in the bitmap of the NSEC RR corresponding to the SNAME (foo.baz.). See ...' ''' _abstract = False references = ['RFC 4035, Sec. 3.1.3.1'] nsec_type = 'NSEC' class StypeInBitmapNODATANSEC3(StypeInBitmapNODATA): _abstract = False references = ['RFC 5155, Sec. 8.5'] nsec_type = 'NSEC3' class StypeInBitmapNODATADSNSEC3(StypeInBitmapNODATANSEC3): _abstract = False references = ['RFC 5155, Sec. 8.6'] class StypeInBitmapWildcardNODATA(StypeInBitmap): pass class StypeInBitmapWildcardNODATANSEC(StypeInBitmapWildcardNODATA): _abstract = False references = ['RFC 4035, Sec. 3.1.3.4'] nsec_type = 'NSEC' class StypeInBitmapWildcardNODATANSEC3(StypeInBitmapWildcardNODATA): _abstract = False references = ['RFC 5155, Sec. 8.7'] nsec_type = 'NSEC3' class NoNSECMatchingSname(NSECError): code = 'NO_NSEC_MATCHING_SNAME' description_template = "No %(nsec_type)s RR matches the SNAME (%(sname)s)." required_params = ['sname'] nsec_type = 'NSEC' class NoNSECMatchingSnameNODATA(NoNSECMatchingSname): ''' >>> e = NoNSECMatchingSnameNODATA(sname='foo.baz.') >>> e.description 'No NSEC RR matches the SNAME (foo.baz.). See ...' ''' _abstract = False references = ['RFC 4035, Sec. 3.1.3.1'] class NoNSECMatchingSnameWildcardNODATA(NoNSECMatchingSname): _abstract = False references = ['RFC 4035, Sec. 3.1.3.4'] class NoNSEC3MatchingSname(NSECError): code = 'NO_NSEC3_MATCHING_SNAME' description_template = "No %(nsec_type)s RR matches the SNAME (%(sname)s)." required_params = ['sname'] nsec_type = 'NSEC3' class NoNSEC3MatchingSnameNODATA(NoNSEC3MatchingSname): ''' >>> e = NoNSEC3MatchingSnameNODATA(sname='foo.baz.') >>> e.description 'No NSEC3 RR matches the SNAME (foo.baz.). See ...' ''' _abstract = False references = ['RFC 5155, Sec. 8.5'] class NoNSEC3MatchingSnameDSNODATA(NoNSEC3MatchingSname): _abstract = False references = ['RFC 5155, Sec. 8.6'] class WildcardExpansionInvalid(NSECError): ''' >>> e = WildcardExpansionInvalid(sname='a.b.c.foo.baz.', wildcard='*.foo.baz.', next_closest_encloser='b.c.foo.baz.') >>> e.description 'The wildcard expansion of *.foo.baz. to a.b.c.foo.baz. is invalid, as the NSEC RR indicates that the next closest encloser (b.c.foo.baz.) exists. See ...' ''' _abstract = False code = 'WILDCARD_EXPANSION_INVALID' description_template = "The wildcard expansion of %(wildcard)s to %(sname)s is invalid, as the %(nsec_type)s RR indicates that the next closest encloser (%(next_closest_encloser)s) exists." references = ['RFC 1034, Sec. 4.4'] required_params = ['sname','wildcard','next_closest_encloser'] nsec_type = 'NSEC' class WildcardCovered(NSECError): code = 'WILDCARD_COVERED' description_template = "The %(nsec_type)s RR covers the wildcard itself (%(wildcard)s), indicating that it doesn't exist." required_params = ['wildcard'] class WildcardCoveredAnswer(WildcardCovered): pass class WildcardCoveredAnswerNSEC(WildcardCoveredAnswer): ''' >>> e = WildcardCoveredAnswerNSEC(wildcard='*.foo.baz.') >>> e.description "The NSEC RR covers the wildcard itself (*.foo.baz.), indicating that it doesn't exist. See ..." ''' _abstract = False references = ['RFC 4035, Sec. 3.1.3.3'] nsec_type = 'NSEC' class WildcardCoveredAnswerNSEC3(WildcardCoveredAnswer): _abstract = False references = ['RFC 5155, Sec. 8.8'] nsec_type = 'NSEC3' class WildcardCoveredNODATA(WildcardCovered): pass class WildcardCoveredNODATANSEC(WildcardCoveredNODATA): _abstract = False references = ['RFC 4035, Sec. 3.1.3.4'] nsec_type = 'NSEC' class WildcardCoveredNODATANSEC3(WildcardCoveredNODATA): _abstract = False references = ['RFC 5155, Sec. 8.7'] nsec_type = 'NSEC3' class ExistingNSECError(NSECError): required_params = ['queries'] def __init__(self, **kwargs): super(ExistingNSECError, self).__init__(**kwargs) queries_text = ['%s/%s' % (name, rdtype) for name, rdtype in self.template_kwargs['queries']] self.template_kwargs['queries_text'] = ', '.join(queries_text) class ExistingCovered(ExistingNSECError): description_template = 'The following queries resulted in an answer response, even though the %(nsec_type)s records indicate that the queried names don\'t exist: %(queries_text)s' code = 'EXISTING_NAME_COVERED' class ExistingCoveredNSEC(ExistingCovered): ''' >>> e = ExistingCoveredNSEC(queries=[('www.foo.baz.', 'A'), ('www1.foo.baz.', 'TXT')]) >>> e.description "The following queries resulted in an answer response, even though the NSEC records indicate that the queried names don't exist: www.foo.baz./A, www1.foo.baz./TXT See ..." ''' _abstract = False references = ['RFC 4035, Sec. 3.1.3.2'] nsec_type = 'NSEC' class ExistingCoveredNSEC3(ExistingCovered): ''' >>> e = ExistingCoveredNSEC3(queries=[('www.foo.baz.', 'A'), ('www1.foo.baz.', 'TXT')]) >>> e.description "The following queries resulted in an answer response, even though the NSEC3 records indicate that the queried names don't exist: www.foo.baz./A, www1.foo.baz./TXT See ..." ''' _abstract = False references = ['RFC 5155, Sec. 8.4'] nsec_type = 'NSEC3' class ExistingTypeNotInBitmap(ExistingNSECError): description_template = 'The following queries resulted in an answer response, even though the bitmap in the %(nsec_type)s RR indicates that the queried records don\'t exist: %(queries_text)s' code = 'EXISTING_TYPE_NOT_IN_BITMAP' class ExistingTypeNotInBitmapNSEC(ExistingTypeNotInBitmap): ''' >>> e = ExistingTypeNotInBitmapNSEC(queries=[('www.foo.baz.', 'A'), ('www.foo.baz.', 'TXT')]) >>> e.description "The following queries resulted in an answer response, even though the bitmap in the NSEC RR indicates that the queried records don't exist: www.foo.baz./A, www.foo.baz./TXT See ..." ''' _abstract = False references = ['RFC 4035, Sec. 3.1.3.1'] nsec_type = 'NSEC' class ExistingTypeNotInBitmapNSEC3(ExistingTypeNotInBitmap): ''' >>> e = ExistingTypeNotInBitmapNSEC3(queries=[('www.foo.baz.', 'A'), ('www.foo.baz.', 'TXT')]) >>> e.description "The following queries resulted in an answer response, even though the bitmap in the NSEC3 RR indicates that the queried records don't exist: www.foo.baz./A, www.foo.baz./TXT See ..." ''' _abstract = False references = ['RFC 5155, Sec. 8.5'] nsec_type = 'NSEC3' class SnameCoveredNODATANSEC(NSECError): ''' >>> e = SnameCoveredNODATANSEC(sname='foo.baz.') >>> e.description "The NSEC RR covers the SNAME (foo.baz.), indicating that it doesn't exist. See ..." ''' _abstract = False code = 'SNAME_COVERED' description_template = "The %(nsec_type)s RR covers the SNAME (%(sname)s), indicating that it doesn't exist." references = ['RFC 4035, Sec. 3.1.3.1'] required_params = ['sname'] nsec_type = 'NSEC' class LastNSECNextNotZone(NSECError): ''' >>> e = LastNSECNextNotZone(nsec_owner='z.foo.baz.', next_name='a.foo.baz.', zone_name='foo.baz.') >>> e.description 'The value of the Next Domain Name field in the NSEC RR with owner name z.foo.baz. indicates that it is the last NSEC RR in the zone, but the value (a.foo.baz.) did not match the name of the zone apex (foo.baz.). See ...' ''' _abstract = False code = 'LAST_NSEC_NEXT_NOT_ZONE' description_template = "The value of the Next Domain Name field in the %(nsec_type)s RR with owner name %(nsec_owner)s indicates that it is the last %(nsec_type)s RR in the zone, but the value (%(next_name)s) did not match the name of the zone apex (%(zone_name)s)." references = ['RFC 4034, Sec. 4.1.1'] required_params = ['nsec_owner','next_name','zone_name'] nsec_type = 'NSEC' class UnsupportedNSEC3Algorithm(NSECError): ''' >>> e = UnsupportedNSEC3Algorithm(algorithm=2) >>> e.description 'Generating NSEC3 hashes using algorithm 2 is not supported by this code. See ...' ''' _abstract = False code = 'UNSUPPORTED_NSEC3_ALGORITHM' description_template = "Generating %(nsec_type)s hashes using algorithm %(algorithm)d is not supported by this code." references = ['RFC 5155, Sec. 8.1'] required_params = ['algorithm'] nsec_type = 'NSEC3' class NonZeroNSEC3IterationCount(NSECError): ''' >>> e = NonZeroNSEC3IterationCount() >>> e.description 'An iterations count of 0 must be used in NSEC3 records to alleviate computational burdens. See ...' ''' _abstract = False code = 'NONZERO_NSEC3_ITERATION_COUNT' description_template = 'An iterations count of 0 must be used in %(nsec_type)s records to alleviate computational burdens.' references = ['RFC 9276, Sec. 3.1'] required_params = [] nsec_type = 'NSEC3' class NonEmptyNSEC3Salt(NSECError): ''' >>> e = NonEmptyNSEC3Salt() >>> e.description 'The salt value for an NSEC3 record should be empty. See ...' ''' _abstract = False code = 'NONEMPTY_NSEC3_SALT' description_template = 'The salt value for an %(nsec_type)s record should be empty.' references = ['RFC 9276, Sec. 3.1'] required_params = [] nsec_type = 'NSEC3' class InvalidNSEC3OwnerName(NSECError): ''' >>> e = InvalidNSEC3OwnerName(name='foo.com.') >>> e.description 'The NSEC3 owner name (foo.com.) is invalid; it does not appear to be the Base32 Hex encoding of a hashed owner name. See ...' ''' _abstract = False code = 'INVALID_NSEC3_OWNER_NAME' description_template = "The %(nsec_type)s owner name (%(name)s) is invalid; it does not appear to be the Base32 Hex encoding of a hashed owner name." references = ['RFC 5155, Sec. 3'] required_params = ['name'] nsec_type = 'NSEC3' class InvalidNSEC3Hash(NSECError): ''' >>> e = InvalidNSEC3Hash(name='foo', nsec3_hash='foo===') >>> e.description 'The NSEC3 record for foo is invalid; the value of the Next Hashed Owner Name field (foo===) does not appear to be a valid hash. See ...' ''' _abstract = False code = 'INVALID_NSEC3_HASH' description_template = 'The NSEC3 record for %(name)s is invalid; the value of the Next Hashed Owner Name field (%(nsec3_hash)s) does not appear to be a valid hash.' references = ['RFC 5155, Sec. 3.1.7'] required_params = ['name', 'nsec3_hash'] nsec_type = 'NSEC3' class ResponseError(DomainNameAnalysisError): pass class InvalidResponseError(ResponseError): required_params = ['tcp'] def __init__(self, *args, **kwargs): super(ResponseError, self).__init__(**kwargs) if self.template_kwargs['tcp']: self.template_kwargs['proto'] = 'TCP' else: self.template_kwargs['proto'] = 'UDP' class NetworkError(InvalidResponseError): ''' >>> e = NetworkError(tcp=False, errno='EHOSTUNREACH') >>> e.description 'The server was not reachable over UDP (EHOSTUNREACH). See ...' >>> e = NetworkError(tcp=False, errno='ECONNREFUSED') >>> e.description 'The UDP connection was refused (ECONNREFUSED). See ...' >>> e.terse_description 'NETWORK_ERROR:ECONNREFUSED' ''' _abstract = False code = 'NETWORK_ERROR' description_template = '%(description)s' terse_description_template = '%(code)s:%(errno)s' required_params = InvalidResponseError.required_params + ['errno'] references = ['RFC 1035, Sec. 4.2'] def __init__(self, *args, **kwargs): super(NetworkError, self).__init__(**kwargs) if self.template_kwargs['errno'] == 'ECONNRESET': self.template_kwargs['description'] = 'The %s connection was interrupted (%s).' % (self.template_kwargs['proto'], self.template_kwargs['errno']) elif self.template_kwargs['errno'] == 'ECONNREFUSED': self.template_kwargs['description'] = 'The %s connection was refused (%s).' % (self.template_kwargs['proto'], self.template_kwargs['errno']) elif self.template_kwargs['errno'] == 'EHOSTUNREACH': self.template_kwargs['description'] = 'The server was not reachable over %s (%s).' % (self.template_kwargs['proto'], self.template_kwargs['errno']) else: self.template_kwargs['description'] = 'There was an error communicating with the server over %s (%s).' % (self.template_kwargs['proto'], self.template_kwargs['errno']) class FormError(InvalidResponseError): ''' >>> e = FormError(tcp=False, msg_size=30) >>> e.description 'The response (30 bytes) was malformed.' ''' _abstract = False code = 'FORMERR' description_template = "The response (%(msg_size)d bytes) was malformed." required_params = InvalidResponseError.required_params + ['msg_size'] class Timeout(InvalidResponseError): ''' >>> e = Timeout(tcp=False, attempts=3) >>> e.description 'No response was received from the server over UDP (tried 3 times). See ...' ''' _abstract = False code = 'TIMEOUT' description_template = "No response was received from the server over %(proto)s (tried %(attempts)d times)." required_params = InvalidResponseError.required_params + ['attempts'] references = ['RFC 1035, Sec. 4.2'] class UnknownResponseError(InvalidResponseError): ''' >>> e = UnknownResponseError(tcp=False) >>> e.description 'An invalid response was received from the server over UDP.' ''' _abstract = False code = 'RESPONSE_ERROR' description_template = "An invalid response was received from the server over %(proto)s." def __init__(self, *args, **kwargs): super(UnknownResponseError, self).__init__(**kwargs) self.template_kwargs['description'] = "An invalid response was received from the server over %s" % (self.template_kwargs['proto']) class InvalidRcode(InvalidResponseError): ''' >>> e = InvalidRcode(tcp=False, rcode='SERVFAIL') >>> e.description 'The response had an invalid RCODE (SERVFAIL). See ...' >>> e.terse_description 'INVALID_RCODE:SERVFAIL' ''' _abstract = False code = 'INVALID_RCODE' description_template = "The response had an invalid RCODE (%(rcode)s)." terse_description_template = '%(code)s:%(rcode)s' required_params = InvalidResponseError.required_params + ['rcode'] references = ['RFC 1035, Sec. 4.1.1'] class NotAuthoritative(ResponseError): ''' >>> e = NotAuthoritative() >>> e.description 'The Authoritative Answer (AA) flag was not set in the response. See ...' ''' _abstract = False code = 'NOT_AUTHORITATIVE' description_template = "The Authoritative Answer (AA) flag was not set in the response." references = ['RFC 1035, Sec. 4.1.1'] required_params = [] class AuthoritativeReferral(ResponseError): ''' >>> e = AuthoritativeReferral() >>> e.description 'The Authoritative Answer (AA) flag was set in the referral response. See ...' ''' _abstract = False code = 'AUTHORITATIVE_REFERRAL' description_template = "The Authoritative Answer (AA) flag was set in the referral response." references = ['RFC 1035, Sec. 4.1.1'] required_params = [] class RecursionNotAvailable(ResponseError): ''' >>> e = RecursionNotAvailable() >>> e.description 'Recursion was desired, but the Recursion Available (RA) flag was not set in the response. See ...' ''' _abstract = False code = 'RECURSION_NOT_AVAILABLE' description_template = "Recursion was desired, but the Recursion Available (RA) flag was not set in the response." references = ['RFC 1035, Sec. 4.1.1'] required_params = [] class ResponseErrorWithCondition(ResponseError): description_template = "%(response_error_description)s until %(change)s%(query_specific_text)s." required_params = ['response_error', 'query_specific'] use_effective_query_tag = False def __init__(self, *args, **kwargs): super(ResponseErrorWithCondition, self).__init__(**kwargs) desc = self.template_kwargs['response_error'].description_without_references self.template_kwargs['response_error_description'] = desc[:-1] if self.template_kwargs['query_specific']: self.template_kwargs['query_specific_text'] = ' (however, this server appeared to respond legitimately to other queries with %s)' % (self.precondition % self.template_kwargs) else: self.template_kwargs['query_specific_text'] = '' class ResponseErrorWithRequestFlag(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithRequestFlag(response_error=Timeout(tcp=False, attempts=3), flag='RD', query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until the RD flag was cleared. See ...' ''' _abstract = False code = 'ERROR_WITH_REQUEST_FLAG' references = ['RFC 1035, Sec. 4.1.1'] required_params = ResponseErrorWithCondition.required_params + ['flag'] def __init__(self, *args, **kwargs): self.precondition = 'the %(flag)s flag set' super(ResponseErrorWithRequestFlag, self).__init__(**kwargs) self.template_kwargs['change'] = 'the %s flag was cleared' % (self.template_kwargs['flag']) class ResponseErrorWithoutRequestFlag(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithoutRequestFlag(response_error=Timeout(tcp=False, attempts=3), flag='RD', query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until the RD flag was set. See ...' ''' _abstract = False code = 'ERROR_WITHOUT_REQUEST_FLAG' references = ['RFC 1035, Sec. 4.1.1'] required_params = ResponseErrorWithCondition.required_params + ['flag'] def __init__(self, *args, **kwargs): self.precondition = 'the %(flag)s flag cleared' super(ResponseErrorWithoutRequestFlag, self).__init__(**kwargs) self.template_kwargs['change'] = 'the %s flag was set' % (self.template_kwargs['flag']) class ResponseErrorWithEDNS(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithEDNS(response_error=Timeout(tcp=False, attempts=3), query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until EDNS was disabled. See ...' ''' _abstract = False code = 'ERROR_WITH_EDNS' references = ['RFC 6891, Sec. 6.2.6'] def __init__(self, *args, **kwargs): self.precondition = 'EDNS enabled' super(ResponseErrorWithEDNS, self).__init__(**kwargs) self.template_kwargs['change'] = 'EDNS was disabled' class ResponseErrorWithEDNSVersion(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithEDNSVersion(response_error=Timeout(tcp=False, attempts=3), edns_old=3, edns_new=0, query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until the version of EDNS was changed from 3 to 0. See ...' ''' _abstract = False code = 'ERROR_WITH_EDNS_VERSION' references = ['RFC 6891, Sec. 6.1.3'] required_params = ResponseErrorWithCondition.required_params + ['edns_old', 'edns_new'] def __init__(self, *args, **kwargs): self.precondition = 'EDNS version %(edns_old)d' super(ResponseErrorWithEDNSVersion, self).__init__(**kwargs) self.template_kwargs['change'] = 'the version of EDNS was changed from %d to %d' % \ (self.template_kwargs['edns_old'], self.template_kwargs['edns_new']) class ResponseErrorWithEDNSFlag(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithEDNSFlag(response_error=Timeout(tcp=False, attempts=3), flag='DO', query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until the DO EDNS flag was cleared. See ...' ''' _abstract = False code = 'ERROR_WITH_EDNS_FLAG' references = ['RFC 6891, Sec. 6.1.4'] required_params = ResponseErrorWithCondition.required_params + ['flag'] def __init__(self, *args, **kwargs): self.precondition = 'the %(flag)s EDNS flag set' super(ResponseErrorWithEDNSFlag, self).__init__(**kwargs) self.template_kwargs['change'] = 'the %s EDNS flag was cleared' % (self.template_kwargs['flag']) class ResponseErrorWithoutEDNSFlag(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithoutEDNSFlag(response_error=Timeout(tcp=False, attempts=3), flag='DO', query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until the DO EDNS flag was set. See ...' ''' _abstract = False code = 'ERROR_WITHOUT_EDNS_FLAG' references = ['RFC 6891, Sec. 6.1.4'] required_params = ResponseErrorWithCondition.required_params + ['flag'] def __init__(self, *args, **kwargs): self.precondition = 'the %(flag)s EDNS flag cleared' super(ResponseErrorWithoutEDNSFlag, self).__init__(**kwargs) self.template_kwargs['change'] = 'the %s EDNS flag was set' % (self.template_kwargs['flag']) class ResponseErrorWithEDNSOption(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithEDNSOption(response_error=Timeout(tcp=False, attempts=3), option='NSID', query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until the NSID EDNS option was removed. See ...' ''' _abstract = False code = 'ERROR_WITH_EDNS_OPTION' references = ['RFC 6891, Sec. 6.1.2'] required_params = ResponseErrorWithCondition.required_params + ['option'] def __init__(self, *args, **kwargs): self.precondition = 'the %(option)s EDNS option present' super(ResponseErrorWithEDNSOption, self).__init__(**kwargs) self.template_kwargs['change'] = 'the %s EDNS option was removed' % (self.template_kwargs['option']) class ResponseErrorWithoutEDNSOption(ResponseErrorWithCondition): ''' >>> e = ResponseErrorWithoutEDNSOption(response_error=Timeout(tcp=False, attempts=3), option='NSID', query_specific=False) >>> e.description 'No response was received from the server over UDP (tried 3 times) until the NSID EDNS option was added. See ...' ''' _abstract = False code = 'ERROR_WITHOUT_EDNS_OPTION' references = ['RFC 6891, Sec. 6.1.2'] required_params = ResponseErrorWithCondition.required_params + ['option'] def __init__(self, *args, **kwargs): self.precondition = 'without the %(option)s EDNS option' super(ResponseErrorWithoutEDNSOption, self).__init__(**kwargs) self.template_kwargs['change'] = 'the %s EDNS option was added' % (self.template_kwargs['option']) class EDNSError(ResponseError): pass class EDNSVersionMismatch(EDNSError): ''' >>> e = EDNSVersionMismatch(request_version=1, response_version=0) >>> e.description 'The server responded with EDNS version 0 when a request with EDNS version 1 was sent, instead of responding with RCODE BADVERS. See ...' ''' _abstract = False code = 'EDNS_VERSION_MISMATCH' description_template = "The server responded with EDNS version %(response_version)d when a request with EDNS version %(request_version)d was sent, instead of responding with RCODE BADVERS." references = ['RFC 6891, Sec. 6.1.3'] required_params = ['request_version', 'response_version'] class EDNSIgnored(EDNSError): ''' >>> e = EDNSIgnored() >>> e.description 'The server responded with no OPT record, rather than with RCODE FORMERR. See ...' ''' _abstract = False code = 'EDNS_IGNORED' description_template = 'The server responded with no OPT record, rather than with RCODE FORMERR.' references = ['RFC 6891, Sec. 7'] required_params = [] class EDNSSupportNoOpt(EDNSError): ''' >>> e = EDNSSupportNoOpt() >>> e.description 'The server appeared to understand EDNS by including RRSIG records, but its response included no OPT record. See ...' ''' _abstract = False code = 'EDNS_SUPPORT_NO_OPT' description_template = 'The server appeared to understand EDNS by including RRSIG records, but its response included no OPT record.' references = ['RFC 6891, Sec. 7'] required_params = [] class GratuitousOPT(EDNSError): ''' >>> e = GratuitousOPT() >>> e.description 'The server responded with an OPT record, even though none was sent in the request. See ...' ''' _abstract = False code = 'GRATUITOUS_OPT' description_template = 'The server responded with an OPT record, even though none was sent in the request.' references = ['RFC 6891, Sec. 6.1.1'] required_params = [] class ImplementedEDNSVersionNotProvided(EDNSError): ''' >>> e = ImplementedEDNSVersionNotProvided(request_version=100, response_version=100) >>> e.description 'The server responded with BADVERS to EDNS version 100 but responded with version 100 instead of providing the highest EDNS version it implements. See ...' ''' _abstract = False code = 'IMPLEMENTED_EDNS_VERSION_NOT_PROVIDED' description_template = "The server responded with BADVERS to EDNS version %(request_version)d but responded with version %(response_version)d instead of providing the highest EDNS version it implements." references = ['RFC 6891, Sec. 6.1.3'] required_params = ['request_version', 'response_version'] class EDNSUndefinedFlagsSet(EDNSError): ''' >>> e = EDNSUndefinedFlagsSet(flags=0x80) >>> e.description 'The server set EDNS flags that are undefined: 0x80. See ...' ''' _abstract = False code = 'EDNS_UNDEFINED_FLAGS_SET' description_template = 'The server set EDNS flags that are undefined: %(flags_text)s.' references = ['RFC 6891, Sec. 6.1.4'] required_params = ['flags'] def __init__(self, **kwargs): super(EDNSUndefinedFlagsSet, self).__init__(**kwargs) self.template_kwargs['flags_text'] = '0x%x' % (self.template_kwargs['flags']) class DNSSECDowngrade(EDNSError): description_template = "DNSSEC was effectively downgraded because %(response_error_description)s with %(precondition)s." required_params = ['response_error'] precondition = None def __init__(self, *args, **kwargs): super(DNSSECDowngrade, self).__init__(**kwargs) desc = self.template_kwargs['response_error'].description_without_references self.template_kwargs['response_error_description'] = desc[0].lower() + desc[1:-1] self.template_kwargs['precondition'] = self.precondition class DNSSECDowngradeDOBitCleared(DNSSECDowngrade): ''' >>> e = DNSSECDowngradeDOBitCleared(response_error=Timeout(tcp=False, attempts=3)) >>> e.description 'DNSSEC was effectively downgraded because no response was received from the server over UDP (tried 3 times) with the DO bit set. See ...' ''' _abstract = False code = 'DNSSEC_DOWNGRADE_DO_CLEARED' precondition = 'the DO bit set' references = ['RFC 4035, Sec. 3.2.1'] class DNSSECDowngradeEDNSDisabled(DNSSECDowngrade): ''' >>> e = DNSSECDowngradeEDNSDisabled(response_error=Timeout(tcp=False, attempts=3), query_specific=False) >>> e.description 'DNSSEC was effectively downgraded because no response was received from the server over UDP (tried 3 times) with EDNS enabled. See ...' ''' _abstract = False code = 'DNSSEC_DOWNGRADE_EDNS_DISABLED' precondition = 'EDNS enabled' references = ['RFC 6891, Sec. 7', 'RFC 2671, Sec. 5.3'] class DNSCookieError(ResponseError): pass class GratuitousCookie(DNSCookieError): ''' >>> e = GratuitousCookie() >>> e.description 'The server sent a COOKIE option when none was sent by the client. See ...' ''' _abstract = False code = 'GRATUITOUS_COOKIE' description_template = 'The server sent a COOKIE option when none was sent by the client.' references = ['RFC 7873, Sec. 5.2.1'] class MalformedCookieWithoutFORMERR(DNSCookieError): ''' >>> e = MalformedCookieWithoutFORMERR() >>> e.description 'The server appears to support DNS cookies but did not return a FORMERR status when issued a malformed COOKIE option. See ...' ''' _abstract = False code = 'MALFORMED_COOKIE_WITHOUT_FORMERR' description_template = 'The server appears to support DNS cookies but did not return a FORMERR status when issued a malformed COOKIE option.' references = ['RFC 7873, Sec. 5.2.2'] class NoCookieOption(DNSCookieError): ''' >>> e = NoCookieOption() >>> e.description 'The server appears to support DNS cookies but did not return a COOKIE option. See ...' ''' _abstract = False code = 'NO_COOKIE_OPTION' description_template = 'The server appears to support DNS cookies but did not return a COOKIE option.' references = ['RFC 7873, Sec. 5.2.3'] class NoServerCookieWithoutBADCOOKIE(DNSCookieError): ''' >>> e = NoServerCookieWithoutBADCOOKIE() >>> e.description 'The server appears to support DNS cookies but did not return a BADCOOKIE status when no server cookie was sent. See ...' ''' _abstract = False code = 'NO_SERVER_COOKIE_WITHOUT_BADCOOKIE' description_template = 'The server appears to support DNS cookies but did not return a BADCOOKIE status when no server cookie was sent.' references = ['RFC 7873, Sec. 5.2.3'] class InvalidServerCookieWithoutBADCOOKIE(DNSCookieError): ''' >>> e = InvalidServerCookieWithoutBADCOOKIE() >>> e.description 'The server appears to support DNS cookies but did not return a BADCOOKIE status when an invalid server cookie was sent. See ...' ''' _abstract = False code = 'INVALID_SERVER_COOKIE_WITHOUT_BADCOOKIE' description_template = 'The server appears to support DNS cookies but did not return a BADCOOKIE status when an invalid server cookie was sent.' references = ['RFC 7873, Sec. 5.2.4'] class NoServerCookie(DNSCookieError): ''' >>> e = NoServerCookie() >>> e.description 'The server appears to support DNS cookies but did not return a server cookie with its COOKIE option. See ...' ''' _abstract = False code = 'NO_SERVER_COOKIE' description_template = 'The server appears to support DNS cookies but did not return a server cookie with its COOKIE option.' references = ['RFC 7873, Sec. 5.2.3'] class ClientCookieMismatch(DNSCookieError): ''' >>> e = ClientCookieMismatch() >>> e.description 'The client cookie returned by the server did not match what was sent. See ...' ''' _abstract = False code = 'CLIENT_COOKIE_MISMATCH' description_template = 'The client cookie returned by the server did not match what was sent.' references = ['RFC 7873, Sec. 5.3'] class CookieInvalidLength(DNSCookieError): ''' >>> e = CookieInvalidLength(length=61) >>> e.description 'The cookie returned by the server had an invalid length of 61 bytes. See ...' ''' _abstract = False code = 'COOKIE_INVALID_LENGTH' description_template = 'The cookie returned by the server had an invalid length of %(length)d bytes.' references = ['RFC 7873, Sec. 5.3'] required_params = ['length'] class UnableToRetrieveDNSSECRecords(ResponseError): ''' >>> e = UnableToRetrieveDNSSECRecords() >>> e.description 'The DNSSEC records necessary to validate the response could not be retrieved from the server. See ...' ''' _abstract = False code = 'UNABLE_TO_RETRIEVE_DNSSEC_RECORDS' description_template = 'The DNSSEC records necessary to validate the response could not be retrieved from the server.' references = ['RFC 4035, Sec. 3.1.1', 'RFC 4035, Sec. 3.1.3'] required_params = [] use_effective_query_tag = False class MissingRRSIG(ResponseError): ''' >>> e = MissingRRSIG() >>> e.description 'No RRSIG covering the RRset was returned in the response. See ...' ''' _abstract = False code = 'MISSING_RRSIG' description_template = 'No RRSIG covering the RRset was returned in the response.' references = ['RFC 4035, Sec. 3.1.1'] required_params = [] class MissingRRSIGForAlg(ResponseError): description_template = 'The %(source)s RRset for the zone included algorithm %(algorithm)d (%(algorithm_text)s), but no RRSIG with algorithm %(algorithm)d covering the RRset was returned in the response.' references = ['RFC 4035, Sec. 2.2', 'RFC 6840, Sec. 5.11'] required_params = ['algorithm'] source = None def __init__(self, **kwargs): super(MissingRRSIGForAlg, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = dns.dnssec.algorithm_to_text(self.template_kwargs['algorithm']) self.template_kwargs['source'] = self.source class MissingRRSIGForAlgDNSKEY(MissingRRSIGForAlg): ''' >>> e = MissingRRSIGForAlgDNSKEY(algorithm=5) >>> e.description 'The DNSKEY RRset for the zone included algorithm 5 (RSASHA1), but no RRSIG with algorithm 5 covering the RRset was returned in the response. See ...' ''' _abstract = False code = 'MISSING_RRSIG_FOR_ALG_DNSKEY' source = 'DNSKEY' class MissingRRSIGForAlgDS(MissingRRSIGForAlg): ''' >>> e = MissingRRSIGForAlgDS(algorithm=5) >>> e.description 'The DS RRset for the zone included algorithm 5 (RSASHA1), but no RRSIG with algorithm 5 covering the RRset was returned in the response. See ...' ''' _abstract = False code = 'MISSING_RRSIG_FOR_ALG_DS' source = 'DS' class MissingRRSIGForAlgDLV(MissingRRSIGForAlg): ''' >>> e = MissingRRSIGForAlgDLV(algorithm=5) >>> e.description 'The DLV RRset for the zone included algorithm 5 (RSASHA1), but no RRSIG with algorithm 5 covering the RRset was returned in the response. See ...' ''' _abstract = False code = 'MISSING_RRSIG_FOR_ALG_DLV' source = 'DLV' class MissingNSEC(ResponseError): description_template = 'No NSEC RR(s) were returned to validate the %(response)s response.' response = None def __init__(self, **kwargs): super(MissingNSEC, self).__init__(**kwargs) self.template_kwargs['response'] = self.response class MissingNSECForNXDOMAIN(MissingNSEC): ''' >>> e = MissingNSECForNXDOMAIN() >>> e.description 'No NSEC RR(s) were returned to validate the NXDOMAIN response. See ...' ''' _abstract = False code = 'MISSING_NSEC_FOR_NXDOMAIN' references = ['RFC 4035, Sec. 3.1.3.2', 'RFC 5155, Sec. 7.2.2'] response = 'NXDOMAIN' class MissingNSECForNODATA(MissingNSEC): ''' >>> e = MissingNSECForNODATA() >>> e.description 'No NSEC RR(s) were returned to validate the NODATA response. See ...' ''' _abstract = False code = 'MISSING_NSEC_FOR_NODATA' references = ['RFC 4035, Sec. 3.1.3.1', 'RFC 5155, Sec. 7.2.3', 'RFC 5155, Sec. 7.2.4'] response = 'NODATA' class MissingNSECForWildcard(MissingNSEC): ''' >>> e = MissingNSECForWildcard() >>> e.description 'No NSEC RR(s) were returned to validate the wildcard response. See ...' ''' _abstract = False code = 'MISSING_NSEC_FOR_WILDCARD' references = ['RFC 4035, Sec. 3.1.3.3', 'RFC 4035, Sec. 3.1.3.4', 'RFC 5155, Sec. 7.2.5', 'RFC 5155, Sec. 7.2.6'] response = 'wildcard' class MissingSOA(ResponseError): description_template = 'The %(response)s response did not include an SOA record.' references = ['RFC 1034, Sec. 4.3.4'] response = None def __init__(self, **kwargs): super(MissingSOA, self).__init__(**kwargs) self.template_kwargs['response'] = self.response class MissingSOAForNXDOMAIN(MissingSOA): ''' >>> e = MissingSOAForNXDOMAIN() >>> e.description 'The NXDOMAIN response did not include an SOA record. See ...' ''' _abstract = False code = 'MISSING_SOA_FOR_NXDOMAIN' references = MissingSOA.references + ['RFC 2308, Sec. 2.1'] response = 'NXDOMAIN' class MissingSOAForNODATA(MissingSOA): ''' >>> e = MissingSOAForNODATA() >>> e.description 'The NODATA response did not include an SOA record. See ...' ''' _abstract = False code = 'MISSING_SOA_FOR_NODATA' references = MissingSOA.references + ['RFC 2308, Sec. 2.2'] response = 'NODATA' class UpwardReferral(ResponseError): _abstract = False code = 'UPWARD_REFERRAL' description_template = 'The response was an upward referral.' references = ['https://www.dns-oarc.net/oarc/articles/upward-referrals-considered-harmful'] class SOAOwnerNotZone(ResponseError): description_template = 'An SOA RR with owner name (%(soa_owner_name)s) not matching the zone name (%(zone_name)s) was returned with the %(response)s response.' references = ['RFC 1034, Sec. 4.3.4'] required_params = ['soa_owner_name', 'zone_name'] response = None def __init__(self, **kwargs): super(SOAOwnerNotZone, self).__init__(**kwargs) self.template_kwargs['response'] = self.response class SOAOwnerNotZoneForNXDOMAIN(SOAOwnerNotZone): ''' >>> e = SOAOwnerNotZoneForNXDOMAIN(soa_owner_name='foo.baz.', zone_name='bar.') >>> e.description 'An SOA RR with owner name (foo.baz.) not matching the zone name (bar.) was returned with the NXDOMAIN response. See ...' ''' _abstract = False code = 'SOA_NOT_OWNER_FOR_NXDOMAIN' references = SOAOwnerNotZone.references + ['RFC 2308, Sec. 2.1'] response = 'NXDOMAIN' class SOAOwnerNotZoneForNODATA(SOAOwnerNotZone): ''' >>> e = SOAOwnerNotZoneForNODATA(soa_owner_name='foo.baz.', zone_name='bar.') >>> e.description 'An SOA RR with owner name (foo.baz.) not matching the zone name (bar.) was returned with the NODATA response. See ...' ''' _abstract = False code = 'SOA_NOT_OWNER_FOR_NODATA' references = SOAOwnerNotZone.references + ['RFC 2308, Sec. 2.2'] response = 'NODATA' class InconsistentNXDOMAIN(ResponseError): ''' >>> e = InconsistentNXDOMAIN(qname='foo.baz.', rdtype_nxdomain='NS', rdtype_noerror='A') >>> e.description 'The server returned a no error (NOERROR) response when queried for foo.baz. having record data of type A, but returned a name error (NXDOMAIN) when queried for foo.baz. having record data of type NS. See ...' ''' _abstract = False code = 'INCONSISTENT_NXDOMAIN' description_template = 'The server returned a no error (NOERROR) response when queried for %(qname)s having record data of type %(rdtype_noerror)s, but returned a name error (NXDOMAIN) when queried for %(qname)s having record data of type %(rdtype_nxdomain)s.' required_params = ['qname', 'rdtype_nxdomain', 'rdtype_noerror'] references = ['RFC 1034, Sec. 4.3.2'] class InconsistentNXDOMAINAncestry(ResponseError): ''' >>> e = InconsistentNXDOMAINAncestry(qname='foo.baz.', ancestor_qname='baz.') >>> e.description "A query for foo.baz. results in a NOERROR response, while a query for its ancestor, baz., returns a name error (NXDOMAIN), which indicates that subdomains of baz., including foo.baz., don't exist. See ..." ''' _abstract = False code = 'INCONSISTENT_NXDOMAIN_ANCESTOR' description_template = "A query for %(qname)s results in a NOERROR response, while a query for its ancestor, %(ancestor_qname)s, returns a name error (NXDOMAIN), which indicates that subdomains of %(ancestor_qname)s, including %(qname)s, don't exist." required_params = ['qname', 'ancestor_qname'] references = ['RFC 8020, Sec. 2'] class PMTUExceeded(ResponseError): ''' >>> e = PMTUExceeded(pmtu_lower_bound=None, pmtu_upper_bound=None) >>> e.description 'No response was received until the UDP payload size was decreased, indicating that the server might be attempting to send a payload that exceeds the path maximum transmission unit (PMTU) size. See ...' >>> e = PMTUExceeded(pmtu_lower_bound=511, pmtu_upper_bound=513) >>> e.description 'No response was received until the UDP payload size was decreased, indicating that the server might be attempting to send a payload that exceeds the path maximum transmission unit (PMTU) size. The PMTU was bounded between 511 and 513 bytes. See ...' ''' _abstract = False code = 'PMTU_EXCEEDED' description_template = '%(description)s' required_params = ['pmtu_lower_bound', 'pmtu_upper_bound'] references = ['RFC 6891, Sec. 6.2.6'] use_effective_query_tag = False def __init__(self, **kwargs): super(PMTUExceeded, self).__init__(**kwargs) self.template_kwargs['description'] = 'No response was received until the UDP payload size was decreased, indicating that the server might be attempting to send a payload that exceeds the path maximum transmission unit (PMTU) size.' if self.template_kwargs['pmtu_lower_bound'] is not None and self.template_kwargs['pmtu_upper_bound'] is not None: self.template_kwargs['description'] += ' The PMTU was bounded between %(pmtu_lower_bound)d and %(pmtu_upper_bound)d bytes.' % self.template_kwargs class ForeignClassData(ResponseError): section = None description_template = 'Data of class %(cls)s was found in the %(section)s section of the response.' references = ['RFC 1034, Sec. 6.1', 'RFC 1035, Sec. 5.2'] required_params = ['cls'] def __init__(self, **kwargs): super(ForeignClassData, self).__init__(**kwargs) self.template_kwargs['section'] = self.section class ForeignClassDataAnswer(ForeignClassData): ''' >>> e = ForeignClassDataAnswer(cls='CH') >>> e.description 'Data of class CH was found in the Answer section of the response. See ...' ''' section = 'Answer' _abstract = False code = 'FOREIGN_CLASS_DATA_ANSWER' class ForeignClassDataAuthority(ForeignClassData): ''' >>> e = ForeignClassDataAuthority(cls='CH') >>> e.description 'Data of class CH was found in the Authority section of the response. See ...' ''' section = 'Authority' _abstract = False code = 'FOREIGN_CLASS_DATA_AUTHORITY' class ForeignClassDataAdditional(ForeignClassData): ''' >>> e = ForeignClassDataAdditional(cls='CH') >>> e.description 'Data of class CH was found in the Additional section of the response. See ...' ''' section = 'Additional' _abstract = False code = 'FOREIGN_CLASS_DATA_ADDITIONAL' class CasePreservationError(ResponseError): ''' >>> e = CasePreservationError(qname='ExAmPlE.CoM') >>> e.description 'The case of the query name (ExAmPlE.CoM) was not preserved in the Question section of the response.' ''' _abstract = False code = 'CASE_NOT_PRESERVED' description_template = '%(description)s' description_template = 'The case of the query name (%(qname)s) was not preserved in the Question section of the response.' required_params = ['qname'] class DelegationError(DomainNameAnalysisError): pass class MissingSEPForAlg(DelegationError): ''' >>> e = MissingSEPForAlg(algorithm=5, source='DS') >>> e.description "The DS RRset for the zone included algorithm 5 (RSASHA1), but no DS RR matched a DNSKEY with algorithm 5 that signs the zone's DNSKEY RRset. See ..." ''' _abstract = False code = 'MISSING_SEP_FOR_ALG' description_template = "The %(source)s RRset for the zone included algorithm %(algorithm)d (%(algorithm_text)s), but no %(source)s RR matched a DNSKEY with algorithm %(algorithm)d that signs the zone's DNSKEY RRset." references = ['RFC 4035, Sec. 2.2', 'RFC 6840, Sec. 5.11'] required_params = ['algorithm'] def __init__(self, **kwargs): super(MissingSEPForAlg, self).__init__(**kwargs) self.template_kwargs['algorithm_text'] = dns.dnssec.algorithm_to_text(self.template_kwargs['algorithm']) try: self.template_kwargs['source'] = kwargs['source'] except KeyError: raise TypeError('The "source" keyword argument is required for instantiation.') class MissingSEPForAlgCDNSKEY(MissingSEPForAlg): references = MissingSEPForAlg.references + ['RFC 7344, Sec. 3', 'RFC 7344, Sec. 5'] class NoSEP(DelegationError): ''' >>> e = NoSEP(source='DS') >>> e.description 'No valid RRSIGs made by a key corresponding to a DS RR were found covering the DNSKEY RRset, resulting in no secure entry point (SEP) into the zone. See ...' ''' _abstract = False code = 'NO_SEP' description_template = "No valid RRSIGs made by a key corresponding to a %(source)s RR were found covering the DNSKEY RRset, resulting in no secure entry point (SEP) into the zone." references = ['RFC 4035, Sec. 2.2', 'RFC 6840, Sec. 5.11'] required_params = [] def __init__(self, **kwargs): super(NoSEP, self).__init__(**kwargs) try: self.template_kwargs['source'] = kwargs['source'] except KeyError: raise TypeError('The "source" keyword argument is required for instantiation.') class NoSEPCDNSKEY(NoSEP): references = NoSEP.references + ['RFC 7344, Sec. 3', 'RFC 7344, Sec. 5'] class NoNSInParent(DelegationError): pass class NoNSInParentNXDOMAIN(NoNSInParent): ''' >>> e = NoNSInParentNXDOMAIN(parent='baz.') >>> e.description 'No delegation NS records were detected in the parent zone (baz.). This results in an NXDOMAIN response to a DS query (for DNSSEC), even if the parent servers are authoritative for the child. See ...' ''' _abstract = False code = 'NO_NS_IN_PARENT_NXDOMAIN' description_template = "No delegation NS records were detected in the parent zone (%(parent)s). This results in an NXDOMAIN response to a DS query (for DNSSEC), even if the parent servers are authoritative for the child." references = ['RFC 1034, Sec. 4.2.2'] required_params = ['parent'] class NoNSInParentNoData(NoNSInParent): ''' >>> e = NoNSInParentNoData(parent='baz.') >>> e.description 'No delegation NS records were detected in the parent zone (baz.). This results in a NODATA response to a DS query (for DNSSEC), even if the parent servers are authoritative for the child. See ...' ''' _abstract = False code = 'NO_NS_IN_PARENT_NODATA' description_template = "No delegation NS records were detected in the parent zone (%(parent)s). This results in a NODATA response to a DS query (for DNSSEC), even if the parent servers are authoritative for the child." references = ['RFC 1034, Sec. 4.2.2'] required_params = ['parent'] class NoNSAddressesForIPVersion(DelegationError): version = None required_params = ['reference'] def __init__(self, *args, **kwargs): super(NoNSAddressesForIPVersion, self).__init__(**kwargs) self.template_kwargs['version'] = self.version class NoNSAddressesForIPv4(NoNSAddressesForIPVersion): ''' >>> e = NoNSAddressesForIPv4(reference='parent') >>> e.description 'No IPv4 addresses were found for NS records in the parent zone.' ''' _abstract = False code = 'NO_NS_ADDRESSES_FOR_IPV4' description_template = "No IPv%(version)d addresses were found for NS records in the %(reference)s zone." references = [] version = 4 class NoNSAddressesForIPv6(NoNSAddressesForIPVersion): ''' >>> e = NoNSAddressesForIPv6(reference='parent') >>> e.description 'No IPv6 addresses were found for NS records in the parent zone.' ''' _abstract = False code = 'NO_NS_ADDRESSES_FOR_IPV6' description_template = "No IPv%(version)d addresses were found for NS records in the %(reference)s zone." references = [] version = 6 class NSNameError(DelegationError): required_params = ['names'] def __init__(self, **kwargs): super(NSNameError, self).__init__(**kwargs) self.template_kwargs['names_text'] = ', '.join(self.template_kwargs['names']) class NSNameNotInChild(NSNameError): ''' >>> e = NSNameNotInChild(names=('ns1.foo.baz.',), parent='baz.') >>> e.description 'The following NS name(s) were found in the delegation NS RRset (i.e., in the baz. zone), but not in the authoritative NS RRset: ns1.foo.baz. See ...' ''' _abstract = False code = 'NS_NAME_NOT_IN_CHILD' description_template = "The following NS name(s) were found in the delegation NS RRset (i.e., in the %(parent)s zone), but not in the authoritative NS RRset: %(names_text)s" required_params = NSNameError.required_params + ['parent'] references = ['RFC 1034, Sec. 4.2.2'] class NSNameNotInParent(NSNameError): ''' >>> e = NSNameNotInParent(names=('ns1.foo.baz.',), parent='baz.') >>> e.description 'The following NS name(s) were found in the authoritative NS RRset, but not in the delegation NS RRset (i.e., in the baz. zone): ns1.foo.baz. See ...' ''' _abstract = False code = 'NS_NAME_NOT_IN_PARENT' description_template = "The following NS name(s) were found in the authoritative NS RRset, but not in the delegation NS RRset (i.e., in the %(parent)s zone): %(names_text)s" required_params = NSNameError.required_params + ['parent'] references = ['RFC 1034, Sec. 4.2.2'] class ErrorResolvingNSName(NSNameError): ''' >>> e = ErrorResolvingNSName(names=('ns1.foo.baz.',)) >>> e.description 'There was an error resolving the following NS name(s) to address(es): ns1.foo.baz.' ''' _abstract = False code = 'ERROR_RESOLVING_NS_NAME' description_template = 'There was an error resolving the following NS name(s) to address(es): %(names_text)s' class MissingGlueForNSName(NSNameError): ''' >>> e = MissingGlueForNSName(names=('ns1.foo.baz.',)) >>> e.description 'The following NS name(s) required glue, but no glue was returned in the referral: ns1.foo.baz. See ...' ''' _abstract = False code = 'MISSING_GLUE_FOR_NS_NAME' description_template = "The following NS name(s) required glue, but no glue was returned in the referral: %(names_text)s" references = ['RFC 1034, Sec. 4.2.1'] class NoAddressForNSName(NSNameError): ''' >>> e = NoAddressForNSName(names=('ns1.foo.baz.',)) >>> e.description 'The following NS name(s) did not resolve to address(es): ns1.foo.baz.' ''' _abstract = False code = 'NO_ADDRESS_FOR_NS_NAME' description_template = "The following NS name(s) did not resolve to address(es): %(names_text)s" class PrivateAddressNS(NSNameError): pass class NSNameResolvesToPrivateIP(PrivateAddressNS): ''' >>> e = NSNameResolvesToPrivateIP(names=('ns1.foo.baz.',)) >>> e.description 'The following NS name(s) resolved to IP address(es) in private IP address space: ns1.foo.baz.' ''' _abstract = False code = 'NS_NAME_PRIVATE_IP' description_template = "The following NS name(s) resolved to IP address(es) in private IP address space: %(names_text)s" class GlueReferencesPrivateIP(PrivateAddressNS): ''' >>> e = GlueReferencesPrivateIP(names=('ns1.foo.baz.',)) >>> e.description 'Glue for the following NS name(s) referenced IP address(es) in private IP address space: ns1.foo.baz.' ''' _abstract = False code = 'GLUE_PRIVATE_IP' description_template = "Glue for the following NS name(s) referenced IP address(es) in private IP address space: %(names_text)s" class GlueMismatchError(DelegationError): ''' >>> e = GlueMismatchError(name='ns1.foo.baz.', glue_addresses=('192.0.2.1',), auth_addresses=('192.0.2.2',)) >>> e.description 'The glue address(es) for ns1.foo.baz. (192.0.2.1) differed from its authoritative address(es) (192.0.2.2). See ...' ''' _abstract = False code = 'GLUE_MISMATCH' description_template = 'The glue address(es) for %(name)s (%(glue_addresses_text)s) differed from its authoritative address(es) (%(auth_addresses_text)s).' required_params = ['name', 'glue_addresses', 'auth_addresses'] references = ['RFC 1034, Sec. 4.2.2'] def __init__(self, **kwargs): super(GlueMismatchError, self).__init__(**kwargs) self.template_kwargs['glue_addresses_text'] = ', '.join(self.template_kwargs['glue_addresses']) self.template_kwargs['auth_addresses_text'] = ', '.join(self.template_kwargs['auth_addresses']) class MissingGlueIPv4(DelegationError): ''' >>> e = MissingGlueIPv4(name='ns1.foo.baz.') >>> e.description 'Authoritative A records exist for ns1.foo.baz., but there are no corresponding A glue records. See ...' ''' _abstract = False code = 'MISSING_GLUE_IPV4' description_template = "Authoritative A records exist for %(name)s, but there are no corresponding A glue records." required_params = ['name'] references = ['RFC 1034, Sec. 4.2.2'] class MissingGlueIPv6(DelegationError): ''' >>> e = MissingGlueIPv6(name='ns1.foo.baz.') >>> e.description 'Authoritative AAAA records exist for ns1.foo.baz., but there are no corresponding AAAA glue records. See ...' ''' _abstract = False code = 'MISSING_GLUE_IPV6' description_template = "Authoritative AAAA records exist for %(name)s, but there are no corresponding AAAA glue records." required_params = ['name'] references = ['RFC 1034, Sec. 4.2.2'] class ExtraGlueIPv4(DelegationError): ''' >>> e = ExtraGlueIPv4(name='ns1.foo.baz.') >>> e.description 'A glue records exist for ns1.foo.baz., but there are no corresponding authoritative A records. See ...' ''' _abstract = False code = 'EXTRA_GLUE_IPV4' description_template = "A glue records exist for %(name)s, but there are no corresponding authoritative A records." required_params = ['name'] references = ['RFC 1034, Sec. 4.2.2'] class ExtraGlueIPv6(DelegationError): ''' >>> e = ExtraGlueIPv6(name='ns1.foo.baz.') >>> e.description 'AAAA glue records exist for ns1.foo.baz., but there are no corresponding authoritative AAAA records. See ...' ''' _abstract = False code = 'EXTRA_GLUE_IPV6' description_template = "AAAA glue records exist for %(name)s, but there are no corresponding authoritative AAAA records." required_params = ['name'] references = ['RFC 1034, Sec. 4.2.2'] class ServerUnresponsive(DelegationError): description_template = "The server(s) were not responsive to queries over %(proto)s." proto = None def __init__(self, **kwargs): super(ServerUnresponsive, self).__init__(**kwargs) self.template_kwargs['proto'] = self.proto class ServerUnresponsiveUDP(ServerUnresponsive): ''' >>> e = ServerUnresponsiveUDP() >>> e.description 'The server(s) were not responsive to queries over UDP. See ...' ''' _abstract = False code = 'SERVER_UNRESPONSIVE_UDP' proto = 'UDP' references = ['RFC 1035, Sec. 4.2'] class ServerUnresponsiveTCP(ServerUnresponsive): ''' >>> e = ServerUnresponsiveTCP() >>> e.description 'The server(s) were not responsive to queries over TCP. See ...' ''' _abstract = False code = 'SERVER_UNRESPONSIVE_TCP' proto = 'TCP' references = ['RFC 1035, Sec. 4.2'] class ServerInvalidResponseUDP(DelegationError): ''' >>> e = ServerInvalidResponseUDP() >>> e.description 'The server(s) responded over UDP with a malformed response or with an invalid RCODE. See ...' ''' _abstract = False code = 'SERVER_INVALID_RESPONSE_UDP' description_template = 'The server(s) responded over UDP with a malformed response or with an invalid RCODE.' references = ['RFC 1035, Sec. 4.1.1'] class ServerInvalidResponseTCP(DelegationError): ''' >>> e = ServerInvalidResponseTCP() >>> e.description 'The server(s) responded over TCP with a malformed response or with an invalid RCODE. See ...' ''' _abstract = False code = 'SERVER_INVALID_RESPONSE_TCP' description_template = 'The server(s) responded over TCP with a malformed response or with an invalid RCODE.' references = ['RFC 1035, Sec. 4.1.1'] class ServerNotAuthoritative(DelegationError): ''' >>> e = ServerNotAuthoritative() >>> e.description 'The server(s) did not respond authoritatively for the namespace. See ...' ''' _abstract = False code = 'SERVER_NOT_AUTHORITATIVE' description_template = "The server(s) did not respond authoritatively for the namespace." references = ['RFC 1035, Sec. 4.1.1'] class CDNSKEYCDSError(DomainNameAnalysisError): pass class CDNSKEYCDSOnlyError(CDNSKEYCDSError): rdtype = None def __init__(self, *args, **kwargs): super(CDNSKEYCDSOnlyError, self).__init__(**kwargs) self.template_kwargs['rdtype'] = self.rdtype class DSConsistencyError(CDNSKEYCDSOnlyError): description_template = 'The contents of the DS RRset are inconsistent with those of the %(rdtype)s RRset.' references = ['RFC 7344, Sec. 3', 'RFC 7344, Sec. 5'] class CDNSKEYInconsistentWithDS(DSConsistencyError): ''' >>> e = CDNSKEYInconsistentWithDS() >>> e.description 'The contents of the DS RRset are inconsistent with those of the CDNSKEY RRset. See ...' ''' _abstract = False code = 'CDNSKEY_INCONSISTENT_WITH_DS' rdtype = 'CDNSKEY' class CDSInconsistentWithDS(DSConsistencyError): ''' >>> e = CDSInconsistentWithDS() >>> e.description 'The contents of the DS RRset are inconsistent with those of the CDS RRset. See ...' ''' _abstract = False code = 'CDS_INCONSISTENT_WITH_DS' rdtype = 'CDS' class MultipleCDNSKEYCDS(CDNSKEYCDSOnlyError): description_template = 'Multiple variants of %(rdtype)s RRsets were found.' references = ['RFC 7344, Sec. 3', 'RFC 7344, Sec. 5'] class MultipleCDNSKEY(MultipleCDNSKEYCDS): ''' >>> e = MultipleCDNSKEY() >>> e.description 'Multiple variants of CDNSKEY RRsets were found. See ...' ''' _abstract = False code = 'MULTIPLE_CDNSKEY' rdtype = 'CDNSKEY' class MultipleCDS(MultipleCDNSKEYCDS): ''' >>> e = MultipleCDS() >>> e.description 'Multiple variants of CDS RRsets were found. See ...' ''' _abstract = False code = 'MULTIPLE_CDS' rdtype = 'CDS' class CDNSKEYCDSSignerInvalid(CDNSKEYCDSOnlyError): description_template = 'The %(rdtype)s RRset must be signed with a key that is represented in both the current DNSKEY and the current DS RRset.' references = ['RFC 7344, Sec. 4.1'] class CDNSKEYSignerInvalid(CDNSKEYCDSSignerInvalid): ''' >>> e = CDNSKEYSignerInvalid() >>> e.description 'The CDNSKEY RRset must be signed with a key that is represented in both the current DNSKEY and the current DS RRset. See ...' ''' _abstract = False code = 'CDNSKEY_SIGNER_INVALID' rdtype = 'CDNSKEY' class CDSSignerInvalid(CDNSKEYCDSSignerInvalid): ''' >>> e = CDSSignerInvalid() >>> e.description 'The CDS RRset must be signed with a key that is represented in both the current DNSKEY and the current DS RRset. See ...' ''' _abstract = False code = 'CDS_SIGNER_INVALID' rdtype = 'CDS' class DNSSECDeleteRecordError(CDNSKEYCDSOnlyError): pass class DNSSECDeleteRecordIncorrectValues(DNSSECDeleteRecordError): references = ['RFC 8078, Sec. 4'] class CDNSKEYRecordIncorrectDeleteValues(DNSSECDeleteRecordIncorrectValues): ''' >>> e = CDNSKEYRecordIncorrectDeleteValues() >>> e.description 'The contents of a CDNSKEY record used for DNSSEC deletion (algorithm 0) should be 0, 3, 0, AA==. See...' ''' _abstract = False description_template = 'The contents of a %(rdtype)s record used for DNSSEC deletion (algorithm 0) should be 0, 3, 0, AA==.' code = 'CDNSKEY_INCORRECT_DELETE_VALUES' rdtype = 'CDNSKEY' class CDSRecordIncorrectDeleteValues(DNSSECDeleteRecordIncorrectValues): ''' >>> e = CDSRecordIncorrectDeleteValues() >>> e.description 'The contents of a CDS record used for DNSSEC deletion (algorithm 0) should be 0, 0, 0, 00. See...' ''' _abstract = False description_template = 'The contents of a %(rdtype)s record used for DNSSEC deletion (algorithm 0) should be 0, 0, 0, 00.' code = 'CDS_INCORRECT_DELETE_VALUES' rdtype = 'CDS' class DNSSECDeleteMultipleRecords(DNSSECDeleteRecordError): description_template = 'A %(rdtype)s RRset used for DNSSEC deletion (algorithm 0) should only include a single record.' references = ['RFC 8078, Sec. 4'] class CDNSKEYDeleteMultipleRecords(DNSSECDeleteMultipleRecords): ''' >>> e = CDNSKEYDeleteMultipleRecords() >>> e.description 'A CDNSKEY RRset used for DNSSEC deletion (algorithm 0) should only include a single record. See...' ''' _abstract = False code = 'CDNSKEY_DELETE_MULTIPLE_RECORDS' rdtype = 'CDNSKEY' class CDSDeleteMultipleRecords(DNSSECDeleteMultipleRecords): ''' >>> e = CDSDeleteMultipleRecords() >>> e.description 'A CDS RRset used for DNSSEC deletion (algorithm 0) should only include a single record. See...' ''' _abstract = False code = 'CDS_DELETE_MULTIPLE_RECORDS' rdtype = 'CDS' class CDNSKEYInconsistentWithCDS(CDNSKEYCDSError): ''' >>> e = CDNSKEYInconsistentWithCDS() >>> e.description 'The contents of the CDNSKEY RRset are inconsistent with those of the CDS RRset. See...' ''' _abstract = False description_template = 'The contents of the CDNSKEY RRset are inconsistent with those of the CDS RRset.' references = ['RFC 7344, Sec. 4'] code = 'CDNSKEY_INCONSISTENT_WITH_CDS' class DNAMEError(DomainNameAnalysisError): pass class DNAMENoCNAME(DNAMEError): ''' >>> e = DNAMENoCNAME() >>> e.description 'No synthesized CNAME RR was found accompanying the DNAME record. See ...' ''' _abstract = False description_template = "No synthesized CNAME RR was found accompanying the DNAME record." code = 'DNAME_NO_CNAME' references = ['RFC 6672, Sec. 3.1'] class DNAMETargetMismatch(DNAMEError): ''' >>> e = DNAMETargetMismatch(included_target='foo.baz.', synthesized_target='bar.baz.') >>> e.description 'The included CNAME RR is not a valid synthesis of the DNAME record (foo.baz. != bar.baz.). See ...' ''' _abstract = False description_template = "The included CNAME RR is not a valid synthesis of the DNAME record (%(included_target)s != %(synthesized_target)s)." code = 'DNAME_TARGET_MISMATCH' required_params = ['included_target', 'synthesized_target'] references = ['RFC 6672, Sec. 2.2'] class DNAMETTLZero(DNAMEError): ''' >>> e = DNAMETTLZero() >>> e.description 'The TTL of the synthesized CNAME RR is 0. See ...' ''' _abstract = False description_template = "The TTL of the synthesized CNAME RR is 0." code = 'DNAME_TTL_ZERO' references = ['RFC 6672, Sec. 3.1'] class DNAMETTLMismatch(DNAMEError): ''' >>> e = DNAMETTLMismatch(cname_ttl=50, dname_ttl=60) >>> e.description 'The TTL of the synthesized CNAME RR (50) does not match the TTL of the DNAME record (60). See ...' ''' _abstract = False description_template = "The TTL of the synthesized CNAME RR (%(cname_ttl)d) does not match the TTL of the DNAME record (%(dname_ttl)d)." code = 'DNAME_TTL_MISMATCH' required_params = ['cname_ttl', 'dname_ttl'] references = ['RFC 6672, Sec. 3.1'] class DNSKEYError(DomainNameAnalysisError): pass class DNSKEYMissingFromServers(DNSKEYError): ''' >>> e = DNSKEYMissingFromServers() >>> e.description 'The DNSKEY RR was not found in the DNSKEY RRset returned by one or more servers.' ''' _abstract = False description_template = "The DNSKEY RR was not found in the DNSKEY RRset returned by one or more servers." code = 'DNSKEY_MISSING_FROM_SERVERS' class DNSKEYNotAtZoneApex(DNSKEYError): ''' >>> e = DNSKEYNotAtZoneApex(zone='foo.baz.', name='bar.foo.baz.') >>> e.description 'The owner name of the DNSKEY RRset (bar.foo.baz.) does not match the zone apex (foo.baz.). See ...' ''' _abstract = False description_template = "The owner name of the DNSKEY RRset (%(name)s) does not match the zone apex (%(zone)s)." code = 'DNSKEY_NOT_AT_ZONE_APEX' required_params = ['zone', 'name'] references = ['RFC 4035, Sec. 5.3.1'] class DNSKEYBadLength(DNSKEYError): pass class DNSKEYZeroLength(DNSKEYBadLength): ''' >>> e = DNSKEYZeroLength() >>> e.description 'The length of the key is 0 bits.' ''' _abstract = False description_template = 'The length of the key is 0 bits.' code = 'DNSKEY_ZERO_LENGTH' references = [] required_params = [] class DNSKEYBadLengthGOST(DNSKEYBadLength): ''' >>> e = DNSKEYBadLengthGOST(length=500) >>> e.description 'The length of the key is 500 bits, but a GOST public key (DNSSEC algorithm 12) must be 512 bits long. See ...' ''' _abstract = False description_template = 'The length of the key is %(length)d bits, but a GOST public key (DNSSEC algorithm 12) must be 512 bits long.' code = 'DNSKEY_BAD_LENGTH_GOST' references = ['RFC 5933, Sec. 5.1'] required_params = ['length'] class DNSKEYBadLengthECDSA(DNSKEYBadLength): curve = None algorithm = None correct_length = None description_template = 'The length of the key is %(length)d bits, but an ECDSA public key using Curve %(curve)s (DNSSEC algorithm %(algorithm)d) must be %(correct_length)d bits long.' references = ['RFC 6605, Sec. 4'] required_params = ['length'] def __init__(self, **kwargs): super(DNSKEYBadLengthECDSA, self).__init__(**kwargs) self.template_kwargs['curve'] = self.curve self.template_kwargs['algorithm'] = self.algorithm self.template_kwargs['correct_length'] = self.correct_length class DNSKEYBadLengthECDSA256(DNSKEYBadLengthECDSA): ''' >>> e = DNSKEYBadLengthECDSA256(length=500) >>> e.description 'The length of the key is 500 bits, but an ECDSA public key using Curve P-256 (DNSSEC algorithm 13) must be 512 bits long. See ...' ''' curve = 'P-256' algorithm = 13 correct_length = 512 _abstract = False code = 'DNSKEY_BAD_LENGTH_ECDSA256' class DNSKEYBadLengthECDSA384(DNSKEYBadLengthECDSA): ''' >>> e = DNSKEYBadLengthECDSA384(length=500) >>> e.description 'The length of the key is 500 bits, but an ECDSA public key using Curve P-384 (DNSSEC algorithm 14) must be 768 bits long. See ...' ''' curve = 'P-384' algorithm = 14 correct_length = 768 _abstract = False code = 'DNSKEY_BAD_LENGTH_ECDSA384' class DNSKEYBadLengthEdDSA(DNSKEYBadLength): curve = None algorithm = None correct_length = None description_template = 'The length of the key is %(length)d bits, but an %(curve)s public key (DNSSEC algorithm %(algorithm)d) must be %(correct_length)d bits long.' references = ['RFC 8080, Sec. 3'] required_params = ['length'] def __init__(self, **kwargs): super(DNSKEYBadLengthEdDSA, self).__init__(**kwargs) self.template_kwargs['curve'] = self.curve self.template_kwargs['algorithm'] = self.algorithm self.template_kwargs['correct_length'] = self.correct_length class DNSKEYBadLengthEd25519(DNSKEYBadLengthEdDSA): ''' >>> e = DNSKEYBadLengthEd25519(length=500) >>> e.description 'The length of the key is 500 bits, but an Ed25519 public key (DNSSEC algorithm 15) must be 256 bits long. See ...' ''' curve = 'Ed25519' algorithm = 15 correct_length = 256 _abstract = False code = 'DNSKEY_BAD_LENGTH_ED25519' class DNSKEYBadLengthEd448(DNSKEYBadLengthEdDSA): ''' >>> e = DNSKEYBadLengthEd448(length=500) >>> e.description 'The length of the key is 500 bits, but an Ed448 public key (DNSSEC algorithm 16) must be 456 bits long. See ...' ''' curve = 'Ed448' algorithm = 16 correct_length = 456 _abstract = False code = 'DNSKEY_BAD_LENGTH_ED448' class TrustAnchorError(DomainNameAnalysisError): pass class NoTrustAnchorSigning(TrustAnchorError): ''' >>> e = NoTrustAnchorSigning(zone='foo.baz.') >>> e.description 'One or more keys were designated as trust anchors for foo.baz., but none were found signing the DNSKEY RRset. See ...' ''' _abstract = False description_template = "One or more keys were designated as trust anchors for %(zone)s, but none were found signing the DNSKEY RRset." code = 'NO_TRUST_ANCHOR_SIGNING' required_params = ['zone'] references = ['RFC 4035, Sec. 5'] class RevokedNotSigning(DNSKEYError): ''' >>> e = RevokedNotSigning() >>> e.description 'The key was revoked but was not found signing the RRset. See ...' ''' _abstract = False description_template = "The key was revoked but was not found signing the RRset." code = 'REVOKED_NOT_SIGNING' references = ['RFC 5011, Sec. 2.1'] class ZoneDataError(DomainNameAnalysisError): pass class CNAMEWithOtherData(ZoneDataError): ''' >>> e = CNAMEWithOtherData(name='foo.') >>> e.description 'The server returned CNAME for foo., but records of other types exist at that name. See ...' ''' _abstract = False description_template = "The server returned CNAME for %(name)s, but records of other types exist at that name." code = 'CNAME_WITH_OTHER_DATA' required_params = ['name'] references = ['RFC 2181, Sec. 10.1'] class CNAMELoop(ZoneDataError): ''' >>> e = CNAMELoop() >>> e.description 'This record results in a CNAME loop. See ...' ''' _abstract = False description_template = "This record results in a CNAME loop." code = 'CNAME_LOOP' references = ['RFC 1034, Sec. 3.6.2'] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/analysis/offline.py0000644000175000017500000060206415001472663017311 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import copy import errno import logging # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict import dns.flags, dns.rcode, dns.rdataclass, dns.rdatatype from dnsviz import crypto import dnsviz.format as fmt from dnsviz.ipaddr import * import dnsviz.query as Q from dnsviz import response as Response from dnsviz.util import tuple_to_dict lb2s = fmt.latin1_binary_to_string from . import errors as Errors from .online import OnlineDomainNameAnalysis, \ analysis_type_codes, \ ANALYSIS_TYPE_AUTHORITATIVE, ANALYSIS_TYPE_RECURSIVE, ANALYSIS_TYPE_CACHE from . import status as Status DNS_PROCESSED_VERSION = '1.0' #XXX (this needs to be updated if new specification ever updates # RFC 6891) EDNS_DEFINED_FLAGS = dns.flags.DO DNSSEC_KEY_LENGTHS_BY_ALGORITHM = { 12: 512, 13: 512, 14: 768, 15: 256, 16: 456, } DNSSEC_KEY_LENGTH_ERRORS = { 12: Errors.DNSKEYBadLengthGOST, 13: Errors.DNSKEYBadLengthECDSA256, 14: Errors.DNSKEYBadLengthECDSA384, 15: Errors.DNSKEYBadLengthEd25519, 16: Errors.DNSKEYBadLengthEd448, } DNSSEC_DELETE_ALG = 0 _logger = logging.getLogger(__name__) class FoundYXDOMAIN(Exception): pass class CNAMELoopDetected(Exception): pass class AggregateResponseInfo(object): def __init__(self, qname, rdtype, name_obj, zone_obj): self.qname = qname self.rdtype = rdtype self.name_obj = name_obj self.zone_obj = zone_obj self.response_info_list = [] def __repr__(self): return '<%s %s/%s>' % (self.__class__.__name__, self.qname, dns.rdatatype.to_text(self.rdtype)) def add_response_info(self, response_info, cname_info): self.response_info_list.append((response_info, cname_info)) class OfflineDomainNameAnalysis(OnlineDomainNameAnalysis): RDTYPES_ALL = 0 RDTYPES_ALL_SAME_NAME = 1 RDTYPES_NS_TARGET = 2 RDTYPES_SECURE_DELEGATION = 3 RDTYPES_DELEGATION = 4 QUERY_CLASS = Q.TTLDistinguishingMultiQueryAggregateDNSResponse def __init__(self, *args, **kwargs): self._strict_cookies = kwargs.pop('strict_cookies', False) self._allow_private = kwargs.pop('allow_private', False) super(OfflineDomainNameAnalysis, self).__init__(*args, **kwargs) if self.analysis_type != ANALYSIS_TYPE_AUTHORITATIVE: self._query_cls = Q.MultiQueryAggregateDNSResponse # Shortcuts to the values in the SOA record. self.serial = None self.rname = None self.mname = None self.dnssec_algorithms_in_dnskey = set() self.dnssec_algorithms_in_ds = set() self.dnssec_algorithms_in_dlv = set() self.dnssec_algorithms_digest_in_ds = set() self.dnssec_algorithms_digest_in_dlv = set() self.status = None self.yxdomain = None self.yxrrset = None self.yxrrset_proper = None self.nxrrset = None self.rrset_warnings = None self.rrset_errors = None self.rrsig_status = None self.response_component_status = None self.wildcard_status = None self.dname_status = None self.nxdomain_status = None self.nxdomain_warnings = None self.nxdomain_errors = None self.nodata_status = None self.nodata_warnings = None self.nodata_errors = None self.response_errors = None self.response_warnings = None self.ds_status_by_ds = None self.ds_status_by_dnskey = None self.zone_errors = None self.zone_warnings = None self.zone_status = None self.delegation_warnings = None self.delegation_errors = None self.delegation_status = None self.published_keys = None self.revoked_keys = None self.zsks = None self.ksks = None self.dnskey_with_ds = None self._dnskey_sets = None self._dnskeys = None def _signed(self): return bool(self.dnssec_algorithms_in_dnskey or self.dnssec_algorithms_in_ds or self.dnssec_algorithms_in_dlv) signed = property(_signed) def _handle_soa_response(self, rrset): '''Indicate that there exists an SOA record for the name which is the subject of this analysis, and save the relevant parts.''' self.has_soa = True if self.serial is None or rrset[0].serial > self.serial: self.serial = rrset[0].serial self.rname = rrset[0].rname self.mname = rrset[0].mname def _handle_dnskey_response(self, rrset): for dnskey in rrset: self.dnssec_algorithms_in_dnskey.add(dnskey.algorithm) def _handle_ds_response(self, rrset): if rrset.rdtype == dns.rdatatype.DS: dnssec_algs = self.dnssec_algorithms_in_ds digest_algs = self.dnssec_algorithms_digest_in_ds else: dnssec_algs = self.dnssec_algorithms_in_dlv digest_algs = self.dnssec_algorithms_digest_in_dlv for ds in rrset: dnssec_algs.add(ds.algorithm) digest_algs.add((ds.algorithm, ds.digest_type)) def _process_response_answer_rrset(self, rrset, query, response): super(OfflineDomainNameAnalysis, self)._process_response_answer_rrset(rrset, query, response) if query.qname in (self.name, self.dlv_name): if rrset.rdtype == dns.rdatatype.SOA: self._handle_soa_response(rrset) elif rrset.rdtype == dns.rdatatype.DNSKEY: self._handle_dnskey_response(rrset) elif rrset.rdtype in (dns.rdatatype.DS, dns.rdatatype.DLV): self._handle_ds_response(rrset) def _index_dnskeys(self): if self._dnskey_sets is not None: return self._dnskey_sets = [] self._dnskeys = {} if (self.name, dns.rdatatype.DNSKEY) not in self.queries: return for dnskey_info in self.queries[(self.name, dns.rdatatype.DNSKEY)].answer_info: # there are CNAMEs that show up here... if not (dnskey_info.rrset.name == self.name and dnskey_info.rrset.rdtype == dns.rdatatype.DNSKEY): continue dnskey_set = set() for dnskey_rdata in dnskey_info.rrset: if dnskey_rdata not in self._dnskeys: self._dnskeys[dnskey_rdata] = Response.DNSKEYMeta(dnskey_info.rrset.name, dnskey_rdata, dnskey_info.rrset.ttl) self._dnskeys[dnskey_rdata].rrset_info.append(dnskey_info) self._dnskeys[dnskey_rdata].servers_clients.update(dnskey_info.servers_clients) dnskey_set.add(self._dnskeys[dnskey_rdata]) self._dnskey_sets.append((dnskey_set, dnskey_info)) def get_dnskey_sets(self): if not hasattr(self, '_dnskey_sets') or self._dnskey_sets is None: self._index_dnskeys() return self._dnskey_sets def get_dnskeys(self): if not hasattr(self, '_dnskeys') or self._dnskeys is None: self._index_dnskeys() return list(self._dnskeys.values()) def potential_trusted_keys(self): active_ksks = self.ksks.difference(self.zsks).difference(self.revoked_keys) if active_ksks: return active_ksks return self.ksks.difference(self.revoked_keys) def _create_response_info_recursive(self, name, rdtype, name_to_info_mapping, rrset_to_cname_mapping, trace=None): zone_obj = self.get_name(name).zone info_obj = AggregateResponseInfo(name, rdtype, self, zone_obj) if trace is None: trace = [name] for info in name_to_info_mapping[name]: if info in rrset_to_cname_mapping: target = info.rrset[0].target if target not in trace: cname_info = self._create_response_info_recursive(rrset_to_cname_mapping[info], rdtype, name_to_info_mapping, rrset_to_cname_mapping, trace=trace + [target]) else: cname_info = None else: cname_info = None info_obj.add_response_info(info, cname_info) return info_obj def _get_response_info(self, name, rdtype): #XXX there are reasons for this (e.g., NXDOMAIN, after which no further # queries are made), but it would be good to have a sanity check, so # we don't simply produce an incomplete output. # see also: dnsviz.viz.dnssec.graph_rrset_auth() if (name, rdtype) not in self.queries: return None query = self.queries[(name, rdtype)] name_to_info_mapping = {} rrset_to_cname_mapping = {} name_to_info_mapping[name] = [] for rrset_info in query.answer_info: # only do qname, unless analysis type is recursive if not (rrset_info.rrset.name == name or self.analysis_type == ANALYSIS_TYPE_RECURSIVE): continue # if this is a CNAME record, create an info-to-target mapping if rrset_info.rrset.rdtype == dns.rdatatype.CNAME: rrset_to_cname_mapping[rrset_info] = rrset_info.rrset[0].target # map name to info and name_obj if rrset_info.rrset.name not in name_to_info_mapping: name_to_info_mapping[rrset_info.rrset.name] = [] name_to_info_mapping[rrset_info.rrset.name].append(rrset_info) for neg_response_info in query.nxdomain_info + query.nodata_info: # only do qname, unless analysis type is recursive if not (neg_response_info.qname == name or self.analysis_type == ANALYSIS_TYPE_RECURSIVE): continue # make sure this query was made to a server designated as # authoritative z_obj = self.zone if self.is_zone() and neg_response_info.rdtype == dns.rdatatype.DS: z_obj = self.zone.parent if not set([s for (s,c) in neg_response_info.servers_clients]).intersection(z_obj.get_auth_or_designated_servers()): continue if neg_response_info.qname not in name_to_info_mapping: name_to_info_mapping[neg_response_info.qname] = [] name_to_info_mapping[neg_response_info.qname].append(neg_response_info) for error in self.response_errors[query]: name_to_info_mapping[name].append(error) for warning in self.response_warnings[query]: name_to_info_mapping[name].append(warning) info_obj = AggregateResponseInfo(name, rdtype, self, self.zone) for info in name_to_info_mapping[name]: if info in rrset_to_cname_mapping: if self.analysis_type == ANALYSIS_TYPE_RECURSIVE: cname_info = self._create_response_info_recursive(rrset_to_cname_mapping[info], rdtype, name_to_info_mapping, rrset_to_cname_mapping) else: cname_obj = self.get_name(rrset_to_cname_mapping[info]) cname_info = cname_obj.get_response_info(rrset_to_cname_mapping[info], rdtype) else: cname_info = None info_obj.add_response_info(info, cname_info) return info_obj def get_response_info(self, name, rdtype): if not hasattr(self, '_response_info') or self._response_info is None: self._response_info = {} if (name, rdtype) not in self._response_info: self._response_info[(name, rdtype)] = None self._response_info[(name, rdtype)] = self._get_response_info(name, rdtype) return self._response_info[(name, rdtype)] def _serialize_nsec_set_simple(self, nsec_set_info, neg_status, response_info): nsec_tup = [] if neg_status[nsec_set_info]: for nsec_status in neg_status[nsec_set_info]: # assign the "overall" status of the NSEC proof, based on both # the correctness of the NSEC proof as well as the # authentication status of the collective records comprising # the proof. # # if the proof is not valid, then use the validity status of # the proof as the overall status. if nsec_status.validation_status != Status.NSEC_STATUS_VALID: status = Status.nsec_status_mapping[nsec_status.validation_status] # else (the NSEC proof is valid) else: # if there is a component status, then set the overall # status to the authentication status of collective records # comprising the proof (the proof is only as good as it is # authenticated). if self.response_component_status is not None: status = Status.rrset_status_mapping[self.response_component_status[nsec_status.nsec_set_info]] # otherwise, set the overall status to insecure else: status = Status.rrset_status_mapping[Status.RRSET_STATUS_INSECURE] warnings = [w.terse_description for w in nsec_status.warnings] errors = [e.terse_description for e in nsec_status.errors] children = [] for nsec_rrset_info in nsec_status.nsec_set_info.rrsets.values(): children.append(self._serialize_response_component_simple(nsec_rrset_info.rrset.rdtype, response_info, nsec_rrset_info, True)) nsec_tup.append(('PROOF', status, [], [], [(Status.nsec_status_mapping[nsec_status.validation_status], warnings, errors, '')], children)) return nsec_tup def _serialize_rrsig_simple(self, name_obj, rrset_info): rrsig_tup = [] if name_obj.rrsig_status[rrset_info]: rrsigs = list(name_obj.rrsig_status[rrset_info].keys()) rrsigs.sort() for rrsig in rrsigs: dnskeys = list(name_obj.rrsig_status[rrset_info][rrsig].keys()) dnskeys.sort() for dnskey in dnskeys: rrsig_status = name_obj.rrsig_status[rrset_info][rrsig][dnskey] # assign the "overall" status of the RRSIG, based on both # the validity of the RRSIG as well as the authentication # status of the DNSKEY with which it is validated # # if the RRSIG is not valid, then use the RRSIG status as # the overall status if rrsig_status.validation_status != Status.RRSIG_STATUS_VALID: status = Status.rrsig_status_mapping[rrsig_status.validation_status] # else (the status of the RRSIG is valid) else: # if there is a component status, then set the overall # status to that of the status of the DNSKEY (an RRSIG # is only as authentic as the DNSKEY that signs it) if self.response_component_status is not None: status = Status.rrset_status_mapping[self.response_component_status[dnskey]] # otherwise, set the overall status to insecure else: status = Status.rrset_status_mapping[Status.RRSET_STATUS_INSECURE] warnings = [w.terse_description for w in rrsig_status.warnings] errors = [e.terse_description for e in rrsig_status.errors] rrsig_tup.append(('RRSIG', status, [], [], [(Status.rrsig_status_mapping[rrsig_status.validation_status], warnings, errors, '%s/%s/%s (%s - %s)' % \ (fmt.humanize_name(rrsig.signer), rrsig.algorithm, rrsig.key_tag, fmt.timestamp_to_str(rrsig.inception)[:10], fmt.timestamp_to_str(rrsig.expiration)[:10]))], [])) return rrsig_tup def _serialize_response_component_simple(self, rdtype, response_info, info, show_neg_response, dname_status=None): rdata = [] if isinstance(info, Errors.DomainNameAnalysisError): query = response_info.name_obj.queries[(response_info.qname, response_info.rdtype)] if info in response_info.name_obj.response_warnings[query]: status = 'WARNING' else: status = 'ERROR' else: if self.response_component_status is not None: status = Status.rrset_status_mapping[self.response_component_status[info]] else: status = Status.rrset_status_mapping[Status.RRSET_STATUS_INSECURE] rdata_tup = [] children = [] if isinstance(info, Response.RRsetInfo): if info.rrset.rdtype == dns.rdatatype.CNAME: rdata_tup.append((None, [], [], 'CNAME %s' % (lb2s(info.rrset[0].target.to_text())))) elif rdtype == dns.rdatatype.DNSKEY: for d in info.rrset: dnskey_meta = response_info.name_obj._dnskeys[d] warnings = [w.terse_description for w in dnskey_meta.warnings] errors = [e.terse_description for e in dnskey_meta.errors] rdata_tup.append(('VALID', warnings, errors, '%d/%d/%d' % (d.algorithm, dnskey_meta.key_tag, d.flags))) elif rdtype == dns.rdatatype.DS: dss = list(response_info.name_obj.ds_status_by_ds[dns.rdatatype.DS].keys()) dss.sort() for ds in dss: # only show the DS if in the RRset in question if ds not in info.rrset: continue dnskeys = list(response_info.name_obj.ds_status_by_ds[rdtype][ds].keys()) dnskeys.sort() for dnskey in dnskeys: ds_status = response_info.name_obj.ds_status_by_ds[rdtype][ds][dnskey] warnings = [w.terse_description for w in ds_status.warnings] errors = [e.terse_description for e in ds_status.errors] rdata_tup.append((Status.ds_status_mapping[ds_status.validation_status], warnings, errors, '%d/%d/%d' % (ds.algorithm, ds.key_tag, ds.digest_type))) elif rdtype == dns.rdatatype.NSEC3: rdata_tup.append((None, [], [], '%s %s' % (fmt.format_nsec3_name(info.rrset.name), fmt.format_nsec3_rrset_text(info.rrset[0].to_text())))) elif rdtype == dns.rdatatype.NSEC: rdata_tup.append((None, [], [], '%s %s' % (lb2s(info.rrset.name.to_text()), info.rrset[0].to_text()))) elif rdtype == dns.rdatatype.DNAME: warnings = [w.terse_description for w in dname_status.warnings] errors = [e.terse_description for e in dname_status.errors] rdata_tup.append((Status.dname_status_mapping[dname_status.validation_status], warnings, errors, info.rrset[0].to_text())) else: rdata_tup.extend([(None, [], [], r.to_text()) for r in info.rrset]) warnings = [w.terse_description for w in response_info.name_obj.rrset_warnings[info]] errors = [e.terse_description for e in response_info.name_obj.rrset_errors[info]] children.extend(self._serialize_rrsig_simple(response_info.name_obj, info)) for wildcard_name in info.wildcard_info: children.extend(self._serialize_nsec_set_simple(info.wildcard_info[wildcard_name], response_info.name_obj.wildcard_status, response_info)) if info in response_info.name_obj.dname_status: for dname_status in response_info.name_obj.dname_status[info]: children.append(self._serialize_response_component_simple(dns.rdatatype.DNAME, response_info, dname_status.synthesized_cname.dname_info, True, dname_status)) elif isinstance(info, Errors.DomainNameAnalysisError): warnings = [] errors = [] rdata_tup.append((None, [], [], '%s' % (info.terse_description))) elif info in self.nodata_status: warnings = [w.terse_description for w in response_info.name_obj.nodata_warnings[info]] errors = [e.terse_description for e in response_info.name_obj.nodata_errors[info]] # never show the negative response if show_neg_response is False if show_neg_response is False: return None # only show the negative response if there is a corresponding # status or show_neg_response is True if not self.nodata_status[info] and not show_neg_response: return None rdata_tup.append((None, [], [], 'NODATA')) for soa_rrset_info in info.soa_rrset_info: children.append(self._serialize_response_component_simple(dns.rdatatype.SOA, response_info, soa_rrset_info, True)) children.extend(self._serialize_nsec_set_simple(info, response_info.name_obj.nodata_status, response_info)) elif info in self.nxdomain_status: warnings = [w.terse_description for w in response_info.name_obj.nxdomain_warnings[info]] errors = [e.terse_description for e in response_info.name_obj.nxdomain_errors[info]] # never show the negative response if show_neg_response is False if show_neg_response is False: return None # only show the negative response if there is a corresponding # status or show_neg_response is True if not self.nxdomain_status[info] and not show_neg_response: return None rdata_tup.append((None, [], [], 'NXDOMAIN')) for soa_rrset_info in info.soa_rrset_info: children.append(self._serialize_response_component_simple(dns.rdatatype.SOA, response_info, soa_rrset_info, True)) children.extend(self._serialize_nsec_set_simple(info, response_info.name_obj.nxdomain_status, response_info)) return (dns.rdatatype.to_text(rdtype), status, warnings, errors, rdata_tup, children) def _serialize_response_component_list_simple(self, rdtype, response_info, show_neg_response): tup = [] for info, cname_chain_info in response_info.response_info_list: val = self._serialize_response_component_simple(rdtype, response_info, info, show_neg_response) # this might not return a non-empty value for a negative response, # so we check for a non-empty value before appending it if val: tup.append(val) return tup def _serialize_status_simple(self, response_info_list, processed): tup = [] cname_info_map = OrderedDict() # just get the first one since the names are all supposed to be the # same response_info = response_info_list[0] # first build the ancestry in reverse order ancestry = [] parent_obj = response_info.zone_obj while parent_obj is not None: ancestry.insert(0, parent_obj) parent_obj = parent_obj.parent name_tup = None # now process the DS and DNSKEY for each name in the ancestry for parent_obj in ancestry: if (parent_obj.name, -1) in processed: continue processed.add((parent_obj.name, -1)) if parent_obj.stub: continue zone_status = None zone_warnings = [] zone_errors = [] delegation_status = None delegation_warnings = [] delegation_errors = [] if parent_obj.is_zone(): if self.response_component_status is not None: zone_status = Status.delegation_status_mapping[self.response_component_status[parent_obj]] else: zone_status = Status.delegation_status_mapping[Status.DELEGATION_STATUS_INSECURE] zone_warnings = [w.terse_description for w in parent_obj.zone_warnings] zone_errors = [e.terse_description for e in parent_obj.zone_errors] if parent_obj.parent is not None: delegation_status = Status.delegation_status_mapping[parent_obj.delegation_status[dns.rdatatype.DS]] delegation_warnings = [w.terse_description for w in parent_obj.delegation_warnings[dns.rdatatype.DS]] delegation_errors = [e.terse_description for e in parent_obj.delegation_errors[dns.rdatatype.DS]] if parent_obj.parent is not None: ds_response_info = parent_obj.get_response_info(parent_obj.name, dns.rdatatype.DS) else: ds_response_info = None name_tup = (fmt.humanize_name(parent_obj.name), zone_status, zone_warnings, zone_errors, delegation_status, delegation_warnings, delegation_errors, []) tup.append(name_tup) if ds_response_info is not None: name_tup[7].extend(parent_obj._serialize_response_component_list_simple(dns.rdatatype.DS, ds_response_info, None)) # if we only care about DS for the name itself, then don't # serialize the DNSKEY response if response_info.rdtype == dns.rdatatype.DS and parent_obj.name == response_info.qname: pass # if the servers were unresponsive, then it's possible that no # DNSKEY query was issued elif (parent_obj.name, dns.rdatatype.DNSKEY) not in parent_obj.queries: pass else: dnskey_response_info = parent_obj.get_response_info(parent_obj.name, dns.rdatatype.DNSKEY) name_tup[7].extend(parent_obj._serialize_response_component_list_simple(dns.rdatatype.DNSKEY, dnskey_response_info, False)) # handle NODATA in the zone, if there is an error/warning if not self.is_zone() and parent_obj == self.zone: has_warnings, has_errors = parent_obj.queries_with_errors_warnings(classes=Errors.ExistingCovered) has_warnings_or_errors = has_warnings.union(has_errors) qname = parent_obj.nxrrset_name rdtype = parent_obj.nxrrset_rdtype if qname is not None and (qname, rdtype) in has_warnings_or_errors: processed.add((qname, rdtype)) if (qname, rdtype) in has_warnings_or_errors: name_tup[7].extend(parent_obj._serialize_response_component_list_simple(rdtype, parent_obj.get_response_info(qname, rdtype), True)) parent_is_signed = parent_obj.signed # handle nxdomain_ancestor nxdomain_ancestor = response_info.name_obj.nxdomain_ancestor if nxdomain_ancestor is not None and \ (nxdomain_ancestor.name, -1) not in processed: processed.add((nxdomain_ancestor.name, -1)) name_tup = (fmt.humanize_name(nxdomain_ancestor.name), None, [], [], None, [], [], []) tup.append(name_tup) name_tup[7].extend(nxdomain_ancestor._serialize_response_component_list_simple(nxdomain_ancestor.referral_rdtype, nxdomain_ancestor.get_response_info(nxdomain_ancestor.name, nxdomain_ancestor.referral_rdtype), True)) # in recursive analysis, if we don't contact any servers that are # valid and responsive, then we get a zone_obj (and thus # parent_obj, in this case) that is None (because we couldn't # detect any NS records in the ancestry) # # in this case, or in the case where the name is not a zone (and # thus changes), we create a new tuple. if parent_obj is None or response_info.qname != parent_obj.name or name_tup is None: name_tup = (fmt.humanize_name(response_info.qname), None, [], [], None, [], [], []) tup.append(name_tup) for response_info in response_info_list: # if we've already done this one (above) then just move along. # These were only done if the name is a zone. if response_info.name_obj.is_zone() and \ response_info.rdtype in (dns.rdatatype.DNSKEY, dns.rdatatype.DS): continue name_tup[7].extend(response_info.name_obj._serialize_response_component_list_simple(response_info.rdtype, response_info, True)) # queue the cnames for later serialization for info, cname_info in response_info.response_info_list: if cname_info is None: continue if cname_info.qname not in cname_info_map: cname_info_map[cname_info.qname] = [] cname_info_map[cname_info.qname].append(cname_info) # now serialize the cnames for qname in cname_info_map: tup.extend(self._serialize_status_simple(cname_info_map[qname], processed)) return tup def serialize_status_simple(self, rdtypes=None, ignore_qnames_rdtypes=None, processed=None): if processed is None: processed = set() response_info_map = {} for qname, rdtype in self.queries: if rdtypes is None: # if rdtypes was not specified, then serialize all, with some exceptions if rdtype in (dns.rdatatype.DNSKEY, dns.rdatatype.DS, dns.rdatatype.DLV): continue if (qname, rdtype) in ignore_qnames_rdtypes: continue else: # if rdtypes was specified, then only serialize rdtypes that # were specified if qname != self.name or rdtype not in rdtypes: continue if qname not in response_info_map: response_info_map[qname] = {} response_info_map[qname][rdtype] = self.get_response_info(qname, rdtype) tuples = [] qnames = list(response_info_map.keys()) qnames.sort() for qname in qnames: rdtypes = list(response_info_map[qname].keys()) rdtypes.sort() response_info_list = [response_info_map[qname][r] for r in rdtypes] tuples.extend(self._serialize_status_simple(response_info_list, processed)) return tuples def _rdtypes_for_analysis_level(self, level): rdtypes = set([self.referral_rdtype, dns.rdatatype.NS]) if level == self.RDTYPES_DELEGATION: return rdtypes rdtypes.update([dns.rdatatype.DNSKEY, dns.rdatatype.DS, dns.rdatatype.DLV]) if level == self.RDTYPES_SECURE_DELEGATION: return rdtypes rdtypes.update([dns.rdatatype.A, dns.rdatatype.AAAA]) if level == self.RDTYPES_NS_TARGET: return rdtypes return None def _server_responsive_with_condition(self, server, client, tcp, response_test): for query in self.queries.values(): for query1 in query.queries.values(): try: if client is None: clients = list(query1.responses[server].keys()) else: clients = (client,) except KeyError: continue for c in clients: try: response = query1.responses[server][client] except KeyError: continue # if tcp is specified, then only follow through if the # query was ultimately issued according to that value if tcp is not None: if tcp and not response.effective_tcp: continue if not tcp and response.effective_tcp: continue if response_test(response): return True return False def server_responsive_for_action(self, server, client, tcp, action, action_arg, require_valid): '''Return True if at least one (optionally valid) response was returned by the server without the specified action. This action is the value of the responsive_cause_index in the response's history.''' if action == Q.RETRY_ACTION_NO_CHANGE: return True elif action == Q.RETRY_ACTION_CHANGE_SPORT: return True elif action == Q.RETRY_ACTION_SET_FLAG: return self._server_responsive_with_condition(server, client, tcp, lambda x: not (x.effective_flags & action_arg) and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_CLEAR_FLAG: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_flags & action_arg and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_DISABLE_EDNS: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns >= 0 and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns >= 0 and \ x.effective_edns_max_udp_payload > action_arg and \ x.msg_size > action_arg and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_SET_EDNS_FLAG: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns >= 0 and \ not (x.effective_edns_flags & action_arg) and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_CLEAR_EDNS_FLAG: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns >= 0 and \ x.effective_edns_flags & action_arg and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_ADD_EDNS_OPTION: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns >= 0 and \ not [y for y in x.effective_edns_options if action_arg == y.otype] and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns >= 0 and \ [y for y in x.effective_edns_options if action_arg == y.otype] and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) elif action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION: return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns == action_arg and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) else: return False def server_responsive_with_do(self, server, client, tcp, require_valid): return self._server_responsive_with_condition(server, client, tcp, lambda x: x.effective_edns >= 0 and \ x.effective_edns_flags & dns.flags.DO and \ ((x.effective_tcp and x.tcp_responsive) or \ (not x.effective_tcp and x.udp_responsive)) and \ (not require_valid or x.is_valid_response())) def _populate_status(self, trusted_keys, supported_algs=None, supported_digest_algs=None, is_dlv=False, trace=None, follow_mx=True, ignore_rfc8624=False, ignore_rfc9276=False, trust_cdnskey_cds=False, multi_signer=False): if trace is None: trace = [] # avoid loops if self in trace: self._populate_name_status() return # if status has already been populated, then don't reevaluate if self.rrsig_status is not None: return # if we're a stub, there's nothing to evaluate if self.stub: return # populate status of dependencies for cname in self.cname_targets: for target, cname_obj in self.cname_targets[cname].items(): if cname_obj is not None: cname_obj._populate_status(trusted_keys, supported_algs, supported_digest_algs, trace=trace + [self], ignore_rfc8624=ignore_rfc8624, ignore_rfc9276=ignore_rfc9276, trust_cdnskey_cds=trust_cdnskey_cds, multi_signer=multi_signer) if follow_mx: for target, mx_obj in self.mx_targets.items(): if mx_obj is not None: mx_obj._populate_status(trusted_keys, supported_algs, supported_digest_algs, trace=trace + [self], follow_mx=False, ignore_rfc8624=ignore_rfc8624, ignore_rfc9276=ignore_rfc9276, trust_cdnskey_cds=trust_cdnskey_cds, multi_signer=multi_signer) for signer, signer_obj in self.external_signers.items(): if signer_obj is not None: signer_obj._populate_status(trusted_keys, supported_algs, supported_digest_algs, trace=trace + [self], ignore_rfc8624=ignore_rfc8624, ignore_rfc9276=ignore_rfc9276, trust_cdnskey_cds=trust_cdnskey_cds, multi_signer=multi_signer) for target, ns_obj in self.ns_dependencies.items(): if ns_obj is not None: ns_obj._populate_status(trusted_keys, supported_algs, supported_digest_algs, trace=trace + [self], ignore_rfc8624=ignore_rfc8624, ignore_rfc9276=ignore_rfc9276, trust_cdnskey_cds=trust_cdnskey_cds, multi_signer=multi_signer) # populate status of ancestry if self.nxdomain_ancestor is not None: self.nxdomain_ancestor._populate_status(trusted_keys, supported_algs, supported_digest_algs, trace=trace + [self], ignore_rfc8624=ignore_rfc8624, ignore_rfc9276=ignore_rfc9276, trust_cdnskey_cds=trust_cdnskey_cds, multi_signer=multi_signer) if self.parent is not None: self.parent._populate_status(trusted_keys, supported_algs, supported_digest_algs, trace=trace + [self], ignore_rfc8624=ignore_rfc8624, ignore_rfc9276=ignore_rfc9276, trust_cdnskey_cds=trust_cdnskey_cds, multi_signer=multi_signer) if self.dlv_parent is not None: self.dlv_parent._populate_status(trusted_keys, supported_algs, supported_digest_algs, is_dlv=True, trace=trace + [self], ignore_rfc8624=ignore_rfc8624, ignore_rfc9276=ignore_rfc9276, trust_cdnskey_cds=trust_cdnskey_cds, multi_signer=multi_signer) _logger.debug('Assessing status of %s...' % (fmt.humanize_name(self.name))) self._populate_name_status() self._index_dnskeys() self._populate_rrsig_status_all(supported_algs, ignore_rfc8624, ignore_rfc9276) self._populate_nodata_status(supported_algs, ignore_rfc8624, ignore_rfc9276) self._populate_nxdomain_status(supported_algs, ignore_rfc8624, ignore_rfc9276) self._populate_inconsistent_negative_dnssec_responses_all(ignore_rfc9276) self._finalize_key_roles() if not is_dlv: self._populate_delegation_status(supported_algs, supported_digest_algs, ignore_rfc8624) if self.dlv_parent is not None: self._populate_ds_status(dns.rdatatype.DLV, supported_algs, supported_digest_algs, ignore_rfc8624) self._populate_cdnskey_cds_correctness(trust_cdnskey_cds) self._populate_cdnskey_cds_consistency() self._populate_cdnskey_cds_ds_consistency() self._populate_dnskey_status(trusted_keys, multi_signer) def populate_status(self, trusted_keys, supported_algs=None, supported_digest_algs=None, is_dlv=False, follow_mx=True, ignore_rfc8624=False, ignore_rfc9276=False, trust_cdnskey_cds=False, multi_signer=False): # identify supported algorithms as intersection of explicitly supported # and software supported if supported_algs is not None: supported_algs.intersection_update(crypto._supported_algs) else: supported_algs = copy.copy(crypto._supported_algs) if supported_digest_algs is not None: supported_digest_algs.intersection_update(crypto._supported_digest_algs) else: supported_digest_algs = copy.copy(crypto._supported_digest_algs) # unless we are overriding, mark prohibited algorithms as not supported if not ignore_rfc8624: supported_algs.difference_update(Status.DNSKEY_ALGS_VALIDATION_PROHIBITED) supported_digest_algs.difference_update(Status.DS_DIGEST_ALGS_VALIDATION_PROHIBITED) self._populate_status(trusted_keys, supported_algs, supported_digest_algs, is_dlv, None, follow_mx, ignore_rfc8624, ignore_rfc9276, trust_cdnskey_cds, multi_signer) def _populate_name_status(self, trace=None): # using trace allows _populate_name_status to be called independent of # populate_status if trace is None: trace = [] # avoid loops if self in trace: return self.status = Status.NAME_STATUS_INDETERMINATE self.yxdomain = set() self.yxrrset_proper = set() self.yxrrset = set() self.nxrrset = set() bailiwick_map, default_bailiwick = self.get_bailiwick_mapping() for (qname, rdtype), query in self.queries.items(): qname_obj = self.get_name(qname) if rdtype == dns.rdatatype.DS and \ qname_obj.name == qname and qname_obj.is_zone(): qname_obj = qname_obj.parent elif rdtype == dns.rdatatype.DLV and qname == qname_obj.dlv_name: qname_obj = qname_obj.dlv_parent for rrset_info in query.answer_info: self.yxdomain.add(rrset_info.rrset.name) # for ALL types, add the name and type to yxrrset self.yxrrset.add((rrset_info.rrset.name, rrset_info.rrset.rdtype)) # for all types EXCEPT where the record is a CNAME record # synthesized from a DNAME record, add the name and type to # yxrrset_proper if not (rrset_info.rrset.rdtype == dns.rdatatype.CNAME and rrset_info.cname_info_from_dname): self.yxrrset_proper.add((rrset_info.rrset.name, rrset_info.rrset.rdtype)) if rrset_info.dname_info is not None: self.yxrrset.add((rrset_info.dname_info.rrset.name, rrset_info.dname_info.rrset.rdtype)) for cname_rrset_info in rrset_info.cname_info_from_dname: self.yxrrset.add((cname_rrset_info.dname_info.rrset.name, cname_rrset_info.dname_info.rrset.rdtype)) self.yxrrset.add((cname_rrset_info.rrset.name, cname_rrset_info.rrset.rdtype)) for neg_response_info in query.nodata_info: for (server,client) in neg_response_info.servers_clients: for response in neg_response_info.servers_clients[(server,client)]: if neg_response_info.qname == qname or response.recursion_desired_and_available(): if not response.is_upward_referral(qname_obj.zone.name): self.yxdomain.add(neg_response_info.qname) self.nxrrset.add((neg_response_info.qname, neg_response_info.rdtype)) for neg_response_info in query.nxdomain_info: for (server,client) in neg_response_info.servers_clients: for response in neg_response_info.servers_clients[(server,client)]: if neg_response_info.qname == qname or response.recursion_desired_and_available(): self.nxrrset.add((neg_response_info.qname, neg_response_info.rdtype)) # now check referrals (if name hasn't already been identified as YXDOMAIN) if self.name == qname and self.name not in self.yxdomain: if rdtype not in (self.referral_rdtype, dns.rdatatype.NS): continue try: for query1 in query.queries.values(): for server in query1.responses: bailiwick = bailiwick_map.get(server, default_bailiwick) for client in query1.responses[server]: if query1.responses[server][client].is_referral(self.name, rdtype, query.rdclass, bailiwick, proper=True): self.yxdomain.add(self.name) raise FoundYXDOMAIN except FoundYXDOMAIN: pass # now add the values of CNAMEs for cname in self.cname_targets: for target, cname_obj in self.cname_targets[cname].items(): if cname_obj is self: continue if cname_obj is None: continue if cname_obj.yxrrset is None: cname_obj._populate_name_status(trace=trace + [self]) for name, rdtype in cname_obj.yxrrset: if name == target: self.yxrrset.add((cname,rdtype)) if self.name in self.yxdomain: self.status = Status.NAME_STATUS_NOERROR if self.status == Status.NAME_STATUS_INDETERMINATE: for (qname, rdtype), query in self.queries.items(): if rdtype == dns.rdatatype.DS: continue if [x for x in query.nxdomain_info if x.qname == qname]: self.status = Status.NAME_STATUS_NXDOMAIN break def _populate_responsiveness_errors(self, qname_obj, response, server, client, warnings, errors): # if we had to make some change to elicit a response, find out why that # was change_err = None if response.responsive_cause_index is not None: retry = response.history[response.responsive_cause_index] cause_err_class = None action_err_class = None cause_err_kwargs = { 'tcp': response.responsive_cause_index_tcp } action_err_kwargs = {} require_valid = False dnssec_downgrade_class = None #TODO - look for success ratio to servers due to timeout or network # error, for better determining if a problem is intermittent #################### # CAUSES # # Network error - kwargs: errno; don't require a valid response if retry.cause == Q.RETRY_CAUSE_NETWORK_ERROR: cause_err_class = Errors.NetworkError cause_err_kwargs['errno'] = errno.errorcode.get(retry.cause_arg, 'UNKNOWN') require_valid = False # Malformed response - kwargs: msg_size; require a valid response elif retry.cause == Q.RETRY_CAUSE_FORMERR: cause_err_class = Errors.FormError cause_err_kwargs['msg_size'] = response.msg_size require_valid = True # Timeout - kwargs: attempts; don't require a valid response elif retry.cause == Q.RETRY_CAUSE_TIMEOUT: cause_err_class = Errors.Timeout cause_err_kwargs['attempts'] = response.responsive_cause_index+1 require_valid = False # Invalid RCODE - kwargs: rcode; require a valid response elif retry.cause == Q.RETRY_CAUSE_RCODE: # If the RCODE was FORMERR, SERVFAIL, or NOTIMP, then this is a # signal to the client that the server doesn't support EDNS. # Thus, *independent of action*, we mark this as a DNSSEC # downgrade, if the zone is signed. if retry.cause_arg in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP) and \ qname_obj is not None and qname_obj.zone.signed: dnssec_downgrade_class = Errors.DNSSECDowngradeEDNSDisabled # if the RCODE was FORMERR, SERVFAIL, or NOTIMP, and the # corresponding action was to disable EDNS, then this was a # reasonable response from a server that doesn't support EDNS, # but it's only innocuous if the zone is not signed. if retry.cause_arg in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP) and \ retry.action == Q.RETRY_ACTION_DISABLE_EDNS and \ not (qname_obj is not None and qname_obj.zone.signed): pass # or if the RCODE was BADVERS, and the corresponding action was # to change EDNS version, then this was a reasonable response # from a server that doesn't support the EDNS version elif retry.cause_arg == dns.rcode.BADVERS and \ retry.action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION: pass # or if the RCODE was SERVFAIL, and the corresponding action was # to set the CD flag, then this was a reasonable response # from a server that couldn't validate the query elif retry.cause_arg == dns.rcode.SERVFAIL and \ retry.action == Q.RETRY_ACTION_SET_FLAG and \ retry.action_arg == dns.flags.CD: pass # or if the RCODE was BADCOOKIE, and the COOKIE opt we sent # contained only a client cookie or an invalid server cookie, # then this was a reasonable response from a server that # supports cookies elif retry.cause_arg == 23 and \ response.server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD) and \ retry.action == Q.RETRY_ACTION_UPDATE_DNS_COOKIE: pass # or if the RCODE was FORMERR, and the COOKIE opt we sent # contained a malformed cookie, then this was a reasonable # response from a server that supports cookies if retry.cause_arg == dns.rcode.FORMERR and \ response.server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH and \ (retry.action == Q.RETRY_ACTION_DISABLE_EDNS or \ (retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION and retry.action_arg == 10)): pass # otherwise, set the error class and instantiation kwargs # appropriately else: cause_err_class = Errors.InvalidRcode cause_err_kwargs['rcode'] = dns.rcode.to_text(retry.cause_arg) require_valid = True # Other errors elif retry.cause == Q.RETRY_CAUSE_OTHER: require_valid = True # by default, use the action argument as the argument action_arg = retry.action_arg #################### # ACTIONS # # No change was made; a valid response was received when the query # was issued again if retry.action == Q.RETRY_ACTION_NO_CHANGE: pass # Only the source port was changed; a valid response was received # when the query was issued again elif retry.action == Q.RETRY_ACTION_CHANGE_SPORT: pass # A flag was set to elicit a response; kwargs: flag elif retry.action == Q.RETRY_ACTION_SET_FLAG: action_err_class = Errors.ResponseErrorWithoutRequestFlag action_err_kwargs['flag'] = dns.flags.to_text(retry.action_arg) if not action_err_kwargs['flag']: action_err_kwargs['flag'] = retry.action_arg # A flag was cleared to elicit a response; kwargs: flag elif retry.action == Q.RETRY_ACTION_CLEAR_FLAG: action_err_class = Errors.ResponseErrorWithRequestFlag action_err_kwargs['flag'] = dns.flags.to_text(retry.action_arg) if not action_err_kwargs['flag']: action_err_kwargs['flag'] = retry.action_arg # EDNS was disabled to elicit a response; kwargs: None elif retry.action == Q.RETRY_ACTION_DISABLE_EDNS: action_err_class = Errors.ResponseErrorWithEDNS # DNSSEC was downgraded because DO bit is no longer available dnssec_downgrade_class = Errors.DNSSECDowngradeEDNSDisabled # The EDNS UDP max payload size was changed to elicit a response; # kwargs: pmtu_lower_bound, pmtu_upper_bound elif retry.action == Q.RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD: action_err_class = Errors.PMTUExceeded #TODO need bounding here action_err_kwargs['pmtu_lower_bound'] = None action_err_kwargs['pmtu_upper_bound'] = None # An EDNS flag was set to elicit a response; kwargs: flag elif retry.action == Q.RETRY_ACTION_SET_EDNS_FLAG: action_err_class = Errors.ResponseErrorWithoutEDNSFlag action_err_kwargs['flag'] = dns.flags.edns_to_text(retry.action_arg) if not action_err_kwargs['flag']: action_err_kwargs['flag'] = retry.action_arg # An EDNS flag was cleared to elicit a response; kwargs: flag elif retry.action == Q.RETRY_ACTION_CLEAR_EDNS_FLAG: action_err_class = Errors.ResponseErrorWithEDNSFlag action_err_kwargs['flag'] = dns.flags.edns_to_text(retry.action_arg) if not action_err_kwargs['flag']: action_err_kwargs['flag'] = retry.action_arg # if this was the DO flag, then DNSSEC was downgraded if retry.action_arg == dns.flags.DO: dnssec_downgrade_class = Errors.DNSSECDowngradeDOBitCleared # An EDNS option was added to elicit a response; kwargs: option elif retry.action == Q.RETRY_ACTION_ADD_EDNS_OPTION: action_err_class = Errors.ResponseErrorWithoutEDNSOption #TODO convert numeric option ID to text action_err_kwargs['option'] = fmt.EDNS_OPT_DESCRIPTIONS.get(retry.action_arg, retry.action_arg) # An EDNS option was removed to elicit a response; kwargs: option elif retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION: action_err_class = Errors.ResponseErrorWithEDNSOption #TODO convert numeric option ID to text action_err_kwargs['option'] = fmt.EDNS_OPT_DESCRIPTIONS.get(retry.action_arg, retry.action_arg) # The EDNS version was changed to elicit a response; kwargs: # edns_old, edns_new elif retry.action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION: action_err_class = Errors.ResponseErrorWithEDNSVersion action_err_kwargs['edns_old'] = response.query.edns action_err_kwargs['edns_new'] = retry.action_arg # if this was about changing EDNS version, then use the # original version number as the argument action_arg = response.query.edns if cause_err_class is not None and action_err_class is not None: if qname_obj is not None and qname_obj.zone.server_responsive_for_action(server, client, response.responsive_cause_index_tcp, \ retry.action, action_arg, require_valid): query_specific = True else: query_specific = False cause_err = cause_err_class(**cause_err_kwargs) change_err = action_err_class(response_error=cause_err, query_specific=query_specific, **action_err_kwargs) if change_err is not None: # if the error really matters (e.g., due to DNSSEC), note an error if dnssec_downgrade_class is not None and qname_obj is not None and qname_obj.zone.signed: Errors.DomainNameAnalysisError.insert_into_list(change_err, errors, server, client, response) Errors.DomainNameAnalysisError.insert_into_list(dnssec_downgrade_class(response_error=cause_err), errors, server, client, response) # otherwise, warn else: Errors.DomainNameAnalysisError.insert_into_list(change_err, warnings, server, client, response) def _populate_edns_errors(self, qname_obj, response, server, client, warnings, errors): # if we actually got a message response (as opposed to timeout, network # error, form error, etc.) if response.message is None: return edns_errs = [] # if the effective request used EDNS if response.effective_edns >= 0: # if the message response didn't use EDNS, then create an error if response.message.edns < 0: # if there were indicators that the server supported EDNS # (e.g., by RRSIGs in the answer), then report it as such if [x for x in response.message.answer if x.rdtype == dns.rdatatype.RRSIG]: edns_errs.append(Errors.EDNSSupportNoOpt()) # otherwise, simply report it as a server not responding # properly to EDNS requests else: edns_errs.append(Errors.EDNSIgnored()) # the message response did use EDNS else: if response.message.rcode() == dns.rcode.BADVERS: # if the message response code was BADVERS, then the EDNS # version in the response should have been less than # that of the request if response.message.edns >= response.effective_edns: edns_errs.append(Errors.ImplementedEDNSVersionNotProvided(request_version=response.effective_edns, response_version=response.message.edns)) # if the message response used a version of EDNS other than # that requested, then create an error (should have been # answered with BADVERS) elif response.message.edns != response.effective_edns: edns_errs.append(Errors.EDNSVersionMismatch(request_version=response.effective_edns, response_version=response.message.edns)) # check that all EDNS flags are all zero, except for DO undefined_edns_flags_set = (response.message.ednsflags & 0xffff) & ~EDNS_DEFINED_FLAGS if undefined_edns_flags_set: edns_errs.append(Errors.EDNSUndefinedFlagsSet(flags=undefined_edns_flags_set)) else: # if the effective request didn't use EDNS, and we got a # message response with an OPT record if response.message.edns >= 0: edns_errs.append(Errors.GratuitousOPT()) for edns_err in edns_errs: Errors.DomainNameAnalysisError.insert_into_list(edns_err, warnings, server, client, response) def _populate_cookie_errors(self, qname_obj, response, server, client, warnings, errors): if response.message is None: return cookie_errs = [] try: cookie_opt = [o for o in response.effective_edns_options if o.otype == 10][0] except IndexError: cookie_opt = None try: cookie_opt_from_server = [o for o in response.message.options if o.otype == 10][0] except IndexError: cookie_opt_from_server = None # supports_cookies is a boolean value that indicates whether the server # supports DNS cookies. Note that we are not looking for the value of # the server cookie itself, only whether the server supports cookies, # so we don't need to use get_cookie_jar_mapping(). supports_cookies = qname_obj is not None and server in qname_obj.cookie_jar # RFC 7873: 5.2.1. No OPT RR or No COOKIE Option if response.query.edns < 0 or cookie_opt is None: # response.effective_server_cookie_status == Q.DNS_COOKIE_NO_COOKIE if cookie_opt_from_server is not None: cookie_errs.append(Errors.GratuitousCookie()) elif supports_cookies: # The following are scenarios for DNS cookies. # RFC 7873: 5.2.2. Malformed COOKIE Option if response.server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH: issued_formerr = False if response.effective_server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH: if response.message.rcode() == dns.rcode.FORMERR: # The query resulting in the response we got was sent # with a COOKIE option with improper length, and the # return code for the response was FORMERR. issued_formerr = True elif response.responsive_cause_index is not None: retry = response.history[response.responsive_cause_index] if retry.cause == Q.RETRY_CAUSE_RCODE and \ retry.cause_arg == dns.rcode.FORMERR and \ (retry.action == Q.RETRY_ACTION_DISABLE_EDNS or \ (retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION and retry.action_arg == 10)): # We started with a COOKIE opt with improper length, # and, in response to FORMERR, from the server, we # changed EDNS behavior either by disabling EDNS or # removing the DNS COOKIE OPT, which resulted in us # getting a legitimate response. issued_formerr = True if not issued_formerr: cookie_errs.append(Errors.MalformedCookieWithoutFORMERR()) # RFC 7873: 5.2.3. Only a Client Cookie # RFC 7873: 5.2.4. A Client Cookie and an Invalid Server Cookie if response.server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD): if response.server_cookie_status == Q.DNS_COOKIE_CLIENT_COOKIE_ONLY: err_cls = Errors.NoServerCookieWithoutBADCOOKIE else: err_cls = Errors.InvalidServerCookieWithoutBADCOOKIE issued_badcookie = False if response.effective_server_cookie_status in (Q.DNS_COOKIE_CLIENT_COOKIE_ONLY, Q.DNS_COOKIE_SERVER_COOKIE_BAD): # The query resulting in the response we got was sent with # a bad server cookie. if cookie_opt_from_server is None: cookie_errs.append(Errors.NoCookieOption()) elif len(cookie_opt_from_server.data) == 8: cookie_errs.append(Errors.NoServerCookie()) if response.message.rcode() == 23: # The query resulting in the response we got was sent # with an invalid server cookie, and the result was # BADCOOKIE. issued_badcookie = True elif response.responsive_cause_index is not None: retry = response.history[response.responsive_cause_index] if retry.cause == Q.RETRY_CAUSE_RCODE and \ retry.cause_arg == 23 and \ retry.action == Q.RETRY_ACTION_UPDATE_DNS_COOKIE: # We started with a COOKIE opt with an invalid server # cookie, and, in response to a BADCOOKIE response from # the server, we updated to a fresh DNS server cookie, # which resulted in us getting a legitimate response. issued_badcookie = True if self._strict_cookies and not issued_badcookie: cookie_errs.append(err_cls()) # RFC 7873: 5.2.5. A Client Cookie and a Valid Server Cookie if response.effective_server_cookie_status == Q.DNS_COOKIE_SERVER_COOKIE_FRESH: # The query resulting in the response we got was sent with only # a client cookie. if cookie_opt_from_server is None: cookie_errs.append(Errors.NoCookieOption()) elif len(cookie_opt_from_server.data) == 8: cookie_errs.append(Errors.NoServerCookie()) if cookie_opt is not None and cookie_opt_from_server is not None: # RFC 7873: 5.3. Client cookie does not match if len(cookie_opt_from_server.data) >= 8 and \ cookie_opt_from_server.data[:8] != cookie_opt.data[:8]: cookie_errs.append(Errors.ClientCookieMismatch()) # RFC 7873: 5.3. Client cookie has and invalid length if len(cookie_opt_from_server.data) < 8 or \ len(cookie_opt_from_server.data) > 40: cookie_errs.append(Errors.CookieInvalidLength(length=len(cookie_opt_from_server.data))) for cookie_err in cookie_errs: Errors.DomainNameAnalysisError.insert_into_list(cookie_err, warnings, server, client, response) def _populate_response_errors(self, qname_obj, response, server, client, warnings, errors): query = response.query if qname_obj is not None: # if the response was complete (not truncated), then mark any # response flag issues as errors. Otherwise, mark them as # warnings. if response.is_complete_response(): group = errors else: group = warnings if qname_obj.analysis_type == ANALYSIS_TYPE_AUTHORITATIVE: if not response.is_authoritative(): ds_referral = False mygrp = group if query.rdtype == dns.rdatatype.DS: # if the parent zone is not signed, then only warn if not qname_obj.zone.signed: mygrp = warnings # handle DS as a special case if response.is_referral(query.qname, query.rdtype, query.rdclass, qname_obj.name): ds_referral = True if ds_referral: Errors.DomainNameAnalysisError.insert_into_list(Errors.ReferralForDSQuery(parent=fmt.humanize_name(qname_obj.name)), mygrp, server, client, response) else: Errors.DomainNameAnalysisError.insert_into_list(Errors.NotAuthoritative(), mygrp, server, client, response) elif qname_obj.analysis_type == ANALYSIS_TYPE_RECURSIVE: if response.recursion_desired() and not response.recursion_available(): Errors.DomainNameAnalysisError.insert_into_list(Errors.RecursionNotAvailable(), group, server, client, response) # check for NOERROR, inconsistent with NXDOMAIN in ancestor if response.is_complete_response() and response.message.rcode() == dns.rcode.NOERROR and qname_obj.nxdomain_ancestor is not None: Errors.DomainNameAnalysisError.insert_into_list(Errors.InconsistentNXDOMAINAncestry(qname=fmt.humanize_name(response.query.qname), ancestor_qname=fmt.humanize_name(qname_obj.nxdomain_ancestor.name)), errors, server, client, response) def _populate_foreign_class_warnings(self, qname_obj, response, server, client, warnings, errors): query = response.query cls = query.rdclass if response.message is None: return # if there was foreign class data, then warn about it -- except for SIG # records in the additional section ans_cls = [r.rdclass for r in response.message.answer if r.rdclass != cls] auth_cls = [r.rdclass for r in response.message.authority if r.rdclass != cls] add_cls = [r.rdclass for r in response.message.additional if r.rdclass != cls and r.rdtype != dns.rdatatype.SIG] if ans_cls: Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAnswer(cls=dns.rdataclass.to_text(ans_cls[0])), warnings, server, client, response) if auth_cls: Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAuthority(cls=dns.rdataclass.to_text(auth_cls[0])), warnings, server, client, response) if add_cls: Errors.DomainNameAnalysisError.insert_into_list(Errors.ForeignClassDataAdditional(cls=dns.rdataclass.to_text(add_cls[0])), warnings, server, client, response) def _populate_case_preservation_warnings(self, qname_obj, response, server, client, warnings, errors): query = response.query msg = response.message # if there was a case mismatch, then warn about it if msg.question and query.qname.to_text() != msg.question[0].name.to_text(): Errors.DomainNameAnalysisError.insert_into_list(Errors.CasePreservationError(qname=fmt.humanize_name(query.qname, canonicalize=False)), warnings, server, client, response) def _populate_wildcard_status(self, query, rrset_info, qname_obj, supported_algs, ignore_rfc8624, ignore_rfc9276): for wildcard_name in rrset_info.wildcard_info: if qname_obj is None: zone_name = wildcard_name.parent() else: zone_name = qname_obj.zone.name servers_missing_nsec = set() for server, client in rrset_info.wildcard_info[wildcard_name].servers_clients: for response in rrset_info.wildcard_info[wildcard_name].servers_clients[(server,client)]: servers_missing_nsec.add((server,client,response)) statuses = [] status_by_response = {} for nsec_set_info in rrset_info.wildcard_info[wildcard_name].nsec_set_info: if nsec_set_info.use_nsec3: status = Status.NSEC3StatusWildcard(rrset_info.rrset.name, wildcard_name, rrset_info.rrset.rdtype, zone_name, False, nsec_set_info, ignore_rfc9276) else: status = Status.NSECStatusWildcard(rrset_info.rrset.name, wildcard_name, rrset_info.rrset.rdtype, zone_name, False, nsec_set_info) for nsec_rrset_info in nsec_set_info.rrsets.values(): self._populate_rrsig_status(query, nsec_rrset_info, qname_obj, supported_algs, ignore_rfc8624, ignore_rfc9276) if status.validation_status == Status.NSEC_STATUS_VALID: if status not in statuses: statuses.append(status) for server, client in nsec_set_info.servers_clients: for response in nsec_set_info.servers_clients[(server,client)]: if (server,client,response) in servers_missing_nsec: servers_missing_nsec.remove((server,client,response)) if status.validation_status == Status.NSEC_STATUS_VALID: if (server,client,response) in status_by_response: del status_by_response[(server,client,response)] else: status_by_response[(server,client,response)] = status for (server,client,response), status in status_by_response.items(): if status not in statuses: statuses.append(status) self.wildcard_status[rrset_info.wildcard_info[wildcard_name]] = statuses for server, client, response in servers_missing_nsec: # by definition, DNSSEC was requested (otherwise we # wouldn't know this was a wildcard), so no need to # check for DO bit in request Errors.DomainNameAnalysisError.insert_into_list(Errors.MissingNSECForWildcard(), self.rrset_errors[rrset_info], server, client, response) def _detect_cname_loop(self, name, trace=None): if name not in self.cname_targets: return if trace is None: trace = [] if name in trace: raise CNAMELoopDetected() for target, cname_obj in self.cname_targets[name].items(): if cname_obj is not None: cname_obj._detect_cname_loop(target, trace=trace + [name]) def _populate_cname_status(self, rrset_info): if rrset_info.rrset.rdtype == dns.rdatatype.CNAME: rdtypes = [r for (n, r) in self.yxrrset_proper if n == rrset_info.rrset.name and r != dns.rdatatype.CNAME] if rdtypes: Errors.DomainNameAnalysisError.insert_into_list(Errors.CNAMEWithOtherData(name=fmt.humanize_name(rrset_info.rrset.name)), self.rrset_warnings[rrset_info], None, None, None) try: self._detect_cname_loop(rrset_info.rrset.name) except CNAMELoopDetected: Errors.DomainNameAnalysisError.insert_into_list(Errors.CNAMELoop(), self.rrset_errors[rrset_info], None, None, None) def _initialize_rrset_status(self, rrset_info): self.rrset_warnings[rrset_info] = [] self.rrset_errors[rrset_info] = [] self.rrsig_status[rrset_info] = {} def _populate_rrsig_status(self, query, rrset_info, qname_obj, supported_algs, ignore_rfc8624, ignore_rfc9276, populate_response_errors=True): self._initialize_rrset_status(rrset_info) if qname_obj is None: zone_name = None else: zone_name = qname_obj.zone.name if qname_obj is None: dnssec_algorithms_in_dnskey = set() dnssec_algorithms_in_ds = set() dnssec_algorithms_in_dlv = set() else: dnssec_algorithms_in_dnskey = qname_obj.zone.dnssec_algorithms_in_dnskey if query.rdtype == dns.rdatatype.DLV: dnssec_algorithms_in_ds = set() dnssec_algorithms_in_dlv = set() else: dnssec_algorithms_in_ds = qname_obj.zone.dnssec_algorithms_in_ds dnssec_algorithms_in_dlv = qname_obj.zone.dnssec_algorithms_in_dlv # handle DNAMEs has_dname = set() if rrset_info.rrset.rdtype == dns.rdatatype.CNAME: if rrset_info.dname_info is not None: dname_info_list = [rrset_info.dname_info] dname_status = Status.CNAMEFromDNAMEStatus(rrset_info, None) elif rrset_info.cname_info_from_dname: dname_info_list = [c.dname_info for c in rrset_info.cname_info_from_dname] dname_status = Status.CNAMEFromDNAMEStatus(rrset_info.cname_info_from_dname[0], rrset_info) else: dname_info_list = [] dname_status = None if dname_info_list: for dname_info in dname_info_list: for server, client in dname_info.servers_clients: has_dname.update([(server,client,response) for response in dname_info.servers_clients[(server,client)]]) if rrset_info not in self.dname_status: self.dname_status[rrset_info] = [] self.dname_status[rrset_info].append(dname_status) algs_signing_rrset = {} if dnssec_algorithms_in_dnskey or dnssec_algorithms_in_ds or dnssec_algorithms_in_dlv: for server, client in rrset_info.servers_clients: for response in rrset_info.servers_clients[(server, client)]: if (server, client, response) not in has_dname: algs_signing_rrset[(server, client, response)] = set() for rrsig in rrset_info.rrsig_info: self.rrsig_status[rrset_info][rrsig] = {} signer = self.get_name(rrsig.signer) #XXX if signer is not None: if signer.stub: continue for server, client in rrset_info.rrsig_info[rrsig].servers_clients: for response in rrset_info.rrsig_info[rrsig].servers_clients[(server,client)]: if (server,client,response) not in algs_signing_rrset: continue algs_signing_rrset[(server,client,response)].add(rrsig.algorithm) if not dnssec_algorithms_in_dnskey.difference(algs_signing_rrset[(server,client,response)]) and \ not dnssec_algorithms_in_ds.difference(algs_signing_rrset[(server,client,response)]) and \ not dnssec_algorithms_in_dlv.difference(algs_signing_rrset[(server,client,response)]): del algs_signing_rrset[(server,client,response)] # define self-signature self_sig = rrset_info.rrset.rdtype == dns.rdatatype.DNSKEY and rrsig.signer == rrset_info.rrset.name checked_keys = set() for dnskey_set, dnskey_meta in signer.get_dnskey_sets(): validation_status_mapping = { True: set(), False: set(), None: set() } for dnskey in dnskey_set: # if we've already checked this key (i.e., in # another DNSKEY RRset) then continue if dnskey in checked_keys: continue # if this is a RRSIG over DNSKEY RRset, then make sure we're validating # with a DNSKEY that is actually in the set if self_sig and dnskey.rdata not in rrset_info.rrset: continue checked_keys.add(dnskey) if not (dnskey.rdata.protocol == 3 and \ rrsig.key_tag in (dnskey.key_tag, dnskey.key_tag_no_revoke) and \ rrsig.algorithm == dnskey.rdata.algorithm): continue rrsig_status = Status.RRSIGStatus(rrset_info, rrsig, dnskey, zone_name, fmt.datetime_to_timestamp(self.analysis_end), supported_algs, ignore_rfc8624) validation_status_mapping[rrsig_status.signature_valid].add(rrsig_status) # if we got results for multiple keys, then just select the one that validates for status in True, False, None: if validation_status_mapping[status]: for rrsig_status in validation_status_mapping[status]: self.rrsig_status[rrsig_status.rrset][rrsig_status.rrsig][rrsig_status.dnskey] = rrsig_status if self.is_zone() and rrset_info.rrset.name == self.name and \ rrset_info.rrset.rdtype != dns.rdatatype.DS and \ rrsig_status.dnskey is not None: if rrset_info.rrset.rdtype == dns.rdatatype.DNSKEY: if self.ksks is not None: self.ksks.add(rrsig_status.dnskey) # Per RFC 7344, CDNSKEY and CDS RRsets are # signed by DNSKEYs that correspond to DS # records. This almost certainly # corresponds to KSKs. Thus, we should not # be calling the DNSKEY a ZSK simply # because it signs a CDNSKEY or CDS RRset. elif rrset_info.rrset.rdtype in (dns.rdatatype.CDNSKEY, dns.rdatatype.CDS): pass else: if self.zsks is not None: self.zsks.add(rrsig_status.dnskey) key = rrsig_status.rrset, rrsig_status.rrsig break # no corresponding DNSKEY if not self.rrsig_status[rrset_info][rrsig]: rrsig_status = Status.RRSIGStatus(rrset_info, rrsig, None, zone_name, fmt.datetime_to_timestamp(self.analysis_end), supported_algs, ignore_rfc8624) self.rrsig_status[rrsig_status.rrset][rrsig_status.rrsig][None] = rrsig_status # list errors for rrsets with which no RRSIGs were returned or not all algorithms were accounted for for server,client,response in algs_signing_rrset: # if DNSSEC was not requested (e.g., for diagnostics purposes), # then don't report an issue if not (response.query.edns >= 0 and response.query.edns_flags & dns.flags.DO): continue errors = self.rrset_errors[rrset_info] # report an error if all RRSIGs are missing if not algs_signing_rrset[(server,client,response)]: if response.dnssec_requested(): Errors.DomainNameAnalysisError.insert_into_list(Errors.MissingRRSIG(), errors, server, client, response) elif qname_obj is not None and qname_obj.zone.server_responsive_with_do(server,client,response.effective_tcp,True): Errors.DomainNameAnalysisError.insert_into_list(Errors.UnableToRetrieveDNSSECRecords(), errors, server, client, response) else: # report an error if RRSIGs for one or more algorithms are missing for alg in dnssec_algorithms_in_dnskey.difference(algs_signing_rrset[(server,client,response)]): Errors.DomainNameAnalysisError.insert_into_list(Errors.MissingRRSIGForAlgDNSKEY(algorithm=alg), errors, server, client, response) for alg in dnssec_algorithms_in_ds.difference(algs_signing_rrset[(server,client,response)]): Errors.DomainNameAnalysisError.insert_into_list(Errors.MissingRRSIGForAlgDS(algorithm=alg), errors, server, client, response) for alg in dnssec_algorithms_in_dlv.difference(algs_signing_rrset[(server,client,response)]): Errors.DomainNameAnalysisError.insert_into_list(Errors.MissingRRSIGForAlgDLV(algorithm=alg), errors, server, client, response) self._populate_wildcard_status(query, rrset_info, qname_obj, supported_algs, ignore_rfc8624, ignore_rfc9276) self._populate_cname_status(rrset_info) if populate_response_errors: for server,client in rrset_info.servers_clients: for response in rrset_info.servers_clients[(server,client)]: self._populate_responsiveness_errors(qname_obj, response, server, client, self.rrset_warnings[rrset_info], self.rrset_errors[rrset_info]) self._populate_response_errors(qname_obj, response, server, client, self.rrset_warnings[rrset_info], self.rrset_errors[rrset_info]) self._populate_edns_errors(qname_obj, response, server, client, self.rrset_warnings[rrset_info], self.rrset_errors[rrset_info]) self._populate_cookie_errors(qname_obj, response, server, client, self.rrset_warnings[rrset_info], self.rrset_errors[rrset_info]) self._populate_foreign_class_warnings(qname_obj, response, server, client, self.rrset_warnings[rrset_info], self.rrset_errors[rrset_info]) self._populate_case_preservation_warnings(qname_obj, response, server, client, self.rrset_warnings[rrset_info], self.rrset_errors[rrset_info]) def _populate_invalid_response_status(self, query): self.response_errors[query] = [] for error_info in query.error_info: for server, client in error_info.servers_clients: for response in error_info.servers_clients[(server, client)]: if error_info.code == Q.RESPONSE_ERROR_NETWORK_ERROR: Errors.DomainNameAnalysisError.insert_into_list(Errors.NetworkError(tcp=response.effective_tcp, errno=errno.errorcode.get(error_info.arg, 'UNKNOWN')), self.response_errors[query], server, client, response) if error_info.code == Q.RESPONSE_ERROR_FORMERR: #TODO determine if this was related to truncation; #TODO add EDNS opt missing error, as appropriate Errors.DomainNameAnalysisError.insert_into_list(Errors.FormError(tcp=response.effective_tcp, msg_size=response.msg_size), self.response_errors[query], server, client, response) elif error_info.code == Q.RESPONSE_ERROR_TIMEOUT: attempts = 1 for i in range(len(response.history) - 1, -1, -1): if response.history[i].action in (Q.RETRY_ACTION_USE_TCP, Q.RETRY_ACTION_USE_UDP): break attempts += 1 Errors.DomainNameAnalysisError.insert_into_list(Errors.Timeout(tcp=response.effective_tcp, attempts=attempts), self.response_errors[query], server, client, response) elif error_info.code == Q.RESPONSE_ERROR_INVALID_RCODE: # if we used EDNS, the response did not, and the RCODE # was FORMERR, SERVFAIL, or NOTIMP, then this is a # legitimate reason for the RCODE if response.effective_edns >= 0 and response.message.edns < 0 and \ response.message.rcode() in (dns.rcode.FORMERR, dns.rcode.SERVFAIL, dns.rcode.NOTIMP): pass # if we used EDNS, the response also used EDNS, and the # RCODE was BADVERS, then this is a legitimate reason # for the RCODE elif response.effective_edns >= 0 and response.message.edns >= 0 and \ response.message.rcode() == dns.rcode.BADVERS: pass else: Errors.DomainNameAnalysisError.insert_into_list(Errors.InvalidRcode(tcp=response.effective_tcp, rcode=dns.rcode.to_text(response.message.rcode())), self.response_errors[query], server, client, response) elif error_info.code == Q.RESPONSE_ERROR_OTHER: Errors.DomainNameAnalysisError.insert_into_list(Errors.UnknownResponseError(tcp=response.effective_tcp), self.response_errors[query], server, client, response) self.response_warnings[query] = [] for referral_info in query.referral_info: for server, client in referral_info.servers_clients: for response in referral_info.servers_clients[(server, client)]: if response.is_authoritative(): Errors.DomainNameAnalysisError.insert_into_list(Errors.AuthoritativeReferral(), self.response_warnings[query], server, client, response) for truncated_info in query.truncated_info: for server, client in truncated_info.servers_clients: for response in truncated_info.servers_clients[(server, client)]: self._populate_responsiveness_errors(self, response, server, client, self.response_warnings[query], self.response_errors[query]) self._populate_response_errors(self, response, server, client, self.response_warnings[query], self.response_errors[query]) self._populate_edns_errors(self, response, server, client, self.response_warnings[query], self.response_errors[query]) self._populate_cookie_errors(self, response, server, client, self.response_warnings[query], self.response_errors[query]) self._populate_foreign_class_warnings(self, response, server, client, self.response_warnings[query], self.response_errors[query]) self._populate_case_preservation_warnings(self, response, server, client, self.response_warnings[query], self.response_errors[query]) def _populate_rrsig_status_all(self, supported_algs, ignore_rfc8624, ignore_rfc9276): self.rrset_warnings = {} self.rrset_errors = {} self.rrsig_status = {} self.dname_status = {} self.wildcard_status = {} self.response_errors = {} self.response_warnings = {} if (self.name, dns.rdatatype.DNSKEY) in self.queries: self.zsks = set() self.ksks = set() _logger.debug('Assessing RRSIG status of %s...' % (fmt.humanize_name(self.name))) for (qname, rdtype), query in self.queries.items(): items_to_validate = [] for rrset_info in query.answer_info: items_to_validate.append(rrset_info) if rrset_info.dname_info is not None: items_to_validate.append(rrset_info.dname_info) for cname_rrset_info in rrset_info.cname_info_from_dname: items_to_validate.append(cname_rrset_info.dname_info) items_to_validate.append(cname_rrset_info) for rrset_info in items_to_validate: qname_obj = self.get_name(rrset_info.rrset.name) if rdtype == dns.rdatatype.DS and \ qname_obj.name == rrset_info.rrset.name and qname_obj.is_zone(): qname_obj = qname_obj.parent elif rdtype == dns.rdatatype.DLV: qname_obj = qname_obj.dlv_parent self._populate_rrsig_status(query, rrset_info, qname_obj, supported_algs, ignore_rfc8624, ignore_rfc9276) self._populate_invalid_response_status(query) def _finalize_key_roles(self): if (self.name, dns.rdatatype.DNSKEY) in self.queries: self.published_keys = set(self.get_dnskeys()).difference(self.zsks.union(self.ksks)) self.revoked_keys = set([x for x in self.get_dnskeys() if x.rdata.flags & fmt.DNSKEY_FLAGS['revoke']]) def _populate_ns_status(self, warn_no_ipv4=True, warn_no_ipv6=False): if not self.is_zone(): return if self.parent is None: return if self.analysis_type != ANALYSIS_TYPE_AUTHORITATIVE: return if self.explicit_delegation: return all_names = self.get_ns_names() names_from_child = self.get_ns_names_in_child() names_from_parent = self.get_ns_names_in_parent() auth_ns_response = self.queries[(self.name, dns.rdatatype.NS)].is_valid_complete_authoritative_response_any() glue_mapping = self.get_glue_ip_mapping() auth_mapping = self.get_auth_ns_ip_mapping() ns_names_not_in_child = [] ns_names_not_in_parent = [] names_error_resolving = [] names_with_glue_mismatch_ipv4 = [] names_with_glue_mismatch_ipv6 = [] names_with_no_glue_ipv4 = [] names_with_no_glue_ipv6 = [] names_with_no_auth_ipv4 = [] names_with_no_auth_ipv6 = [] names_missing_glue = [] names_missing_auth = [] names_auth_private = set() names_auth_zero = set() names_glue_private = set() names_glue_zero = set() for name in all_names: # if name resolution resulted in an error (other than NXDOMAIN) if name not in auth_mapping: auth_addrs = set() names_error_resolving.append(name) else: auth_addrs = auth_mapping[name] # if name resolution completed successfully, but the response was # negative for both A and AAAA (NXDOMAIN or NODATA) if not auth_mapping[name]: names_missing_auth.append(name) for addr in auth_addrs: if LOOPBACK_IPV4_RE.match(addr) or addr == LOOPBACK_IPV6 or \ RFC_1918_RE.match(addr) or LINK_LOCAL_RE.match(addr) or UNIQ_LOCAL_RE.match(addr): names_auth_private.add(name) if ZERO_SLASH8_RE.search(addr): names_auth_zero.add(name) if names_from_parent: name_in_parent = name in names_from_parent elif self.delegation_status == Status.DELEGATION_STATUS_INCOMPLETE: name_in_parent = False else: name_in_parent = None if name_in_parent: # if glue is required and not supplied if name.is_subdomain(self.name) and not glue_mapping[name]: names_missing_glue.append(name) for addr in glue_mapping[name]: if LOOPBACK_IPV4_RE.match(addr) or addr == LOOPBACK_IPV6 or \ RFC_1918_RE.match(addr) or LINK_LOCAL_RE.match(addr) or UNIQ_LOCAL_RE.match(addr): names_glue_private.add(name) if ZERO_SLASH8_RE.search(addr): names_glue_zero.add(name) # if there are both glue and authoritative addresses supplied, check that it matches the authoritative response if glue_mapping[name] and auth_addrs: # there are authoritative address records either of type A # or AAAA and also glue records of either type A or AAAA glue_addrs_ipv4 = set([x for x in glue_mapping[name] if x.version == 4]) glue_addrs_ipv6 = set([x for x in glue_mapping[name] if x.version == 6]) auth_addrs_ipv4 = set([x for x in auth_addrs if x.version == 4]) auth_addrs_ipv6 = set([x for x in auth_addrs if x.version == 6]) if auth_addrs_ipv4: # there are authoritative A records for the name... if not glue_addrs_ipv4: # ...but no A glue names_with_no_glue_ipv4.append(name) elif glue_addrs_ipv4 != auth_addrs_ipv4: # ...but the A glue does not match names_with_glue_mismatch_ipv4.append((name, glue_addrs_ipv4, auth_addrs_ipv4)) elif glue_addrs_ipv4: # there are A glue records for the name # but no authoritative A records. names_with_no_auth_ipv4.append(name) if auth_addrs_ipv6: # there are authoritative AAAA records for the name if not glue_addrs_ipv6: # ...but no AAAA glue names_with_no_glue_ipv6.append(name) elif glue_addrs_ipv6 != auth_addrs_ipv6: # ...but the AAAA glue does not match names_with_glue_mismatch_ipv6.append((name, glue_addrs_ipv6, auth_addrs_ipv6)) elif glue_addrs_ipv6: # there are AAAA glue records for the name # but no authoritative AAAA records. names_with_no_auth_ipv6.append(name) elif name_in_parent is False: ns_names_not_in_parent.append(name) if name not in names_from_child and auth_ns_response: ns_names_not_in_child.append(name) if ns_names_not_in_child: ns_names_not_in_child.sort() self.delegation_warnings[dns.rdatatype.DS].append(Errors.NSNameNotInChild(names=[fmt.humanize_name(x) for x in ns_names_not_in_child], parent=fmt.humanize_name(self.parent_name()))) if ns_names_not_in_parent: ns_names_not_in_child.sort() self.delegation_warnings[dns.rdatatype.DS].append(Errors.NSNameNotInParent(names=[fmt.humanize_name(x) for x in ns_names_not_in_parent], parent=fmt.humanize_name(self.parent_name()))) if names_error_resolving: names_error_resolving.sort() self.zone_errors.append(Errors.ErrorResolvingNSName(names=[fmt.humanize_name(x) for x in names_error_resolving])) if not self._allow_private: if names_auth_private: names_auth_private = list(names_auth_private) names_auth_private.sort() self.zone_errors.append(Errors.NSNameResolvesToPrivateIP(names=[fmt.humanize_name(x) for x in names_auth_private])) if names_glue_private: names_glue_private = list(names_glue_private) names_glue_private.sort() self.delegation_errors[dns.rdatatype.DS].append(Errors.GlueReferencesPrivateIP(names=[fmt.humanize_name(x) for x in names_glue_private])) if names_with_no_glue_ipv4: names_with_no_glue_ipv4.sort() for name in names_with_no_glue_ipv4: self.delegation_warnings[dns.rdatatype.DS].append(Errors.MissingGlueIPv4(name=fmt.humanize_name(name))) if names_with_no_glue_ipv6: names_with_no_glue_ipv6.sort() for name in names_with_no_glue_ipv6: self.delegation_warnings[dns.rdatatype.DS].append(Errors.MissingGlueIPv6(name=fmt.humanize_name(name))) if names_with_no_auth_ipv4: names_with_no_auth_ipv4.sort() for name in names_with_no_auth_ipv4: self.delegation_warnings[dns.rdatatype.DS].append(Errors.ExtraGlueIPv4(name=fmt.humanize_name(name))) if names_with_no_auth_ipv6: names_with_no_auth_ipv6.sort() for name in names_with_no_auth_ipv6: self.delegation_warnings[dns.rdatatype.DS].append(Errors.ExtraGlueIPv6(name=fmt.humanize_name(name))) if names_with_glue_mismatch_ipv4: names_with_glue_mismatch_ipv4.sort() for name, glue_addrs, auth_addrs in names_with_glue_mismatch_ipv4: glue_addrs = list(glue_addrs) glue_addrs.sort() auth_addrs = list(auth_addrs) auth_addrs.sort() self.delegation_warnings[dns.rdatatype.DS].append(Errors.GlueMismatchError(name=fmt.humanize_name(name), glue_addresses=glue_addrs, auth_addresses=auth_addrs)) if names_with_glue_mismatch_ipv6: names_with_glue_mismatch_ipv6.sort() for name, glue_addrs, auth_addrs in names_with_glue_mismatch_ipv6: glue_addrs = list(glue_addrs) glue_addrs.sort() auth_addrs = list(auth_addrs) auth_addrs.sort() self.delegation_warnings[dns.rdatatype.DS].append(Errors.GlueMismatchError(name=fmt.humanize_name(name), glue_addresses=glue_addrs, auth_addresses=auth_addrs)) if names_missing_glue: names_missing_glue.sort() self.delegation_warnings[dns.rdatatype.DS].append(Errors.MissingGlueForNSName(names=[fmt.humanize_name(x) for x in names_missing_glue])) if names_missing_auth: names_missing_auth.sort() self.zone_errors.append(Errors.NoAddressForNSName(names=[fmt.humanize_name(x) for x in names_missing_auth])) ips_from_parent = self.get_servers_in_parent() ips_from_parent_ipv4 = [x for x in ips_from_parent if x.version == 4] ips_from_parent_ipv6 = [x for x in ips_from_parent if x.version == 6] ips_from_child = self.get_servers_in_child() ips_from_child_ipv4 = [x for x in ips_from_child if x.version == 4] ips_from_child_ipv6 = [x for x in ips_from_child if x.version == 6] if not (ips_from_parent_ipv4 or ips_from_child_ipv4) and warn_no_ipv4: if ips_from_parent_ipv4: reference = 'child' elif ips_from_child_ipv4: reference = 'parent' else: reference = 'parent or child' self.zone_warnings.append(Errors.NoNSAddressesForIPv4(reference=reference)) if not (ips_from_parent_ipv6 or ips_from_child_ipv6) and warn_no_ipv6: if ips_from_parent_ipv6: reference = 'child' elif ips_from_child_ipv6: reference = 'parent' else: reference = 'parent or child' self.zone_warnings.append(Errors.NoNSAddressesForIPv6(reference=reference)) def _populate_delegation_status(self, supported_algs, supported_digest_algs, ignore_rfc8624): self.ds_status_by_ds = {} self.ds_status_by_dnskey = {} self.zone_errors = [] self.zone_warnings = [] self.zone_status = [] self.delegation_errors = {} self.delegation_warnings = {} self.delegation_status = {} self.dnskey_with_ds = {} self._populate_ds_status(dns.rdatatype.DS, supported_algs, supported_digest_algs, ignore_rfc8624) self._populate_ds_status(dns.rdatatype.CDS, supported_algs, supported_digest_algs, ignore_rfc8624) self._populate_ds_status(dns.rdatatype.CDNSKEY, supported_algs, supported_digest_algs, ignore_rfc8624) if self.dlv_parent is not None: self._populate_ds_status(dns.rdatatype.DLV, supported_algs, supported_digest_algs, ignore_rfc8624) self._populate_ns_status() self._populate_server_status() def _populate_ds_status(self, rdtype, supported_algs, supported_digest_algs, ignore_rfc8624): if rdtype not in (dns.rdatatype.DS, dns.rdatatype.CDS, dns.rdatatype.CDNSKEY, dns.rdatatype.DLV): raise ValueError('Type can only be DS, CDS, CDNSKEY, or DLV.') if self.parent is None: return if rdtype == dns.rdatatype.DLV: name = self.dlv_name if name is None: raise ValueError('No DLV specified for DomainNameAnalysis object.') else: name = self.name _logger.debug('Assessing delegation status of %s...' % (fmt.humanize_name(self.name))) self.ds_status_by_ds[rdtype] = {} self.ds_status_by_dnskey[rdtype] = {} self.delegation_warnings[rdtype] = [] self.delegation_errors[rdtype] = [] self.delegation_status[rdtype] = None self.dnskey_with_ds[rdtype] = set() try: ds_rrset_answer_info = self.queries[(name, rdtype)].answer_info except KeyError: # zones should have DS queries, even if there # aren't any for CDS or CDNSKEY. if rdtype == dns.rdatatype.DS and self.is_zone(): raise else: return ds_rrset_exists = False secure_path = False bailiwick_map, default_bailiwick = self.get_bailiwick_mapping() if (self.name, dns.rdatatype.DNSKEY) in self.queries: dnskey_multiquery = self.queries[(self.name, dns.rdatatype.DNSKEY)] else: dnskey_multiquery = self._query_cls(self.name, dns.rdatatype.DNSKEY, dns.rdataclass.IN) # populate all the servers queried for DNSKEYs to determine # what problems there were with regard to DS records and if # there is at least one match dnskey_server_client_responses = set() for dnskey_query in dnskey_multiquery.queries.values(): # for responsive servers consider only those designated as # authoritative for server in set(dnskey_query.responses).intersection(self.zone.get_auth_or_designated_servers()): bailiwick = bailiwick_map.get(server, default_bailiwick) for client in dnskey_query.responses[server]: response = dnskey_query.responses[server][client] if response.is_valid_response() and response.is_complete_response() and not response.is_referral(self.name, dns.rdatatype.DNSKEY, dnskey_query.rdclass, bailiwick): dnskey_server_client_responses.add((server,client,response)) # Use digest alg 2 for generating CDS from CDNSKEY digest_algs_for_cds = (2,) for ds_rrset_info in ds_rrset_answer_info: # there are CNAMEs that show up here... if not (ds_rrset_info.rrset.name == name and ds_rrset_info.rrset.rdtype == rdtype): continue ds_rrset_exists = True # XXX This is a bit of a hack. Basically, if CDNSKEY is used, then # ds_rrset_info actually contains information on CDNSKEY answers. # We need to convert them to CDS responses for them to work with # the rest of the logic here. An more elegant solution is desirable. orig_ds_rrset_info = ds_rrset_info if rdtype == dns.rdatatype.CDNSKEY: ds_rrset_info = Response.dnskey_rrset_to_ds_rrset(ds_rrset_info, {}, digest_algs_for_cds) # for each set of DS records provided by one or more servers, # identify the set of DNSSEC algorithms and the set of digest # algorithms per algorithm/key tag combination ds_algs = set() supported_ds_algs = set() for ds_rdata in ds_rrset_info.rrset: if ds_rdata.algorithm in supported_algs and ds_rdata.digest_type in supported_digest_algs: supported_ds_algs.add(ds_rdata.algorithm) ds_algs.add(ds_rdata.algorithm) if supported_ds_algs: secure_path = True algs_signing_sep = {} algs_validating_sep = {} for server,client,response in dnskey_server_client_responses: algs_signing_sep[(server,client,response)] = set() algs_validating_sep[(server,client,response)] = set() for ds_rdata in ds_rrset_info.rrset: self.ds_status_by_ds[rdtype][ds_rdata] = {} for dnskey_info in dnskey_multiquery.answer_info: # there are CNAMEs that show up here... if not (dnskey_info.rrset.name == self.name and dnskey_info.rrset.rdtype == dns.rdatatype.DNSKEY): continue validation_status_mapping = { True: set(), False: set(), None: set() } for dnskey_rdata in dnskey_info.rrset: dnskey = self._dnskeys[dnskey_rdata] if dnskey not in self.ds_status_by_dnskey[rdtype]: self.ds_status_by_dnskey[rdtype][dnskey] = {} # if the key tag doesn't match, then go any farther if not (ds_rdata.key_tag in (dnskey.key_tag, dnskey.key_tag_no_revoke) and \ ds_rdata.algorithm == dnskey.rdata.algorithm): continue # check if the digest is a match ds_status = Status.DSStatus(ds_rdata, ds_rrset_info, dnskey, supported_digest_algs, ignore_rfc8624) validation_status_mapping[ds_status.digest_valid].add(ds_status) # if dnskey exists, then add to dnskey_with_ds if ds_status.validation_status not in \ (Status.DS_STATUS_INDETERMINATE_NO_DNSKEY, Status.DS_STATUS_INDETERMINATE_MATCH_PRE_REVOKE): self.dnskey_with_ds[rdtype].add(dnskey) for rrsig in dnskey_info.rrsig_info: # move along if DNSKEY is not self-signing if dnskey not in self.rrsig_status[dnskey_info][rrsig]: continue # move along if key tag is not the same (i.e., revoke) if dnskey.key_tag != rrsig.key_tag: continue for (server,client) in dnskey_info.rrsig_info[rrsig].servers_clients: for response in dnskey_info.rrsig_info[rrsig].servers_clients[(server,client)]: if (server,client,response) in algs_signing_sep: # note that this algorithm is part of a self-signing DNSKEY algs_signing_sep[(server,client,response)].add(rrsig.algorithm) if not ds_algs.difference(algs_signing_sep[(server,client,response)]): del algs_signing_sep[(server,client,response)] if (server,client,response) in algs_validating_sep: # retrieve the status of the DNSKEY RRSIG rrsig_status = self.rrsig_status[dnskey_info][rrsig][dnskey] # if the DS digest and the RRSIG are both valid, and the digest algorithm # is not deprecated then mark it as a SEP if ds_status.validation_status == Status.DS_STATUS_VALID and \ rrsig_status.validation_status == Status.RRSIG_STATUS_VALID: # note that this algorithm is part of a successful self-signing DNSKEY algs_validating_sep[(server,client,response)].add(rrsig.algorithm) if not ds_algs.difference(algs_validating_sep[(server,client,response)]): del algs_validating_sep[(server,client,response)] # if we got results for multiple keys, then just select the one that validates for status in True, False, None: if validation_status_mapping[status]: for ds_status in validation_status_mapping[status]: self.ds_status_by_ds[rdtype][ds_status.ds][ds_status.dnskey] = ds_status self.ds_status_by_dnskey[rdtype][ds_status.dnskey][ds_status.ds] = ds_status break # no corresponding DNSKEY if not self.ds_status_by_ds[rdtype][ds_rdata]: ds_status = Status.DSStatus(ds_rdata, ds_rrset_info, None, supported_digest_algs, ignore_rfc8624) self.ds_status_by_ds[rdtype][ds_rdata][None] = ds_status if None not in self.ds_status_by_dnskey[rdtype]: self.ds_status_by_dnskey[rdtype][None] = {} self.ds_status_by_dnskey[rdtype][None][ds_rdata] = ds_status if dnskey_server_client_responses: if not algs_validating_sep: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_SECURE else: for server,client,response in dnskey_server_client_responses: if (server,client,response) not in algs_validating_sep or \ supported_ds_algs.intersection(algs_validating_sep[(server,client,response)]): self.delegation_status[rdtype] = Status.DELEGATION_STATUS_SECURE elif supported_ds_algs: if rdtype in (dns.rdatatype.CDS, dns.rdatatype.CDNSKEY): # Associate with the RRSet err = Errors.NoSEPCDNSKEY(source=dns.rdatatype.to_text(rdtype)) Errors.DomainNameAnalysisError.insert_into_list(err, self.rrset_errors[orig_ds_rrset_info], server, client, response) else: # Associate with the delegation err = Errors.NoSEP(source=dns.rdatatype.to_text(rdtype)) Errors.DomainNameAnalysisError.insert_into_list(err, self.delegation_errors[rdtype], server, client, response) # report an error if one or more algorithms are incorrectly validated for (server,client,response) in algs_signing_sep: for alg in ds_algs.difference(algs_signing_sep[(server,client,response)]): if rdtype in (dns.rdatatype.CDS, dns.rdatatype.CDNSKEY): if alg == DNSSEC_DELETE_ALG: continue # Associate with the RRSet err = Errors.MissingSEPForAlgCDNSKEY(algorithm=alg, source=dns.rdatatype.to_text(rdtype)) Errors.DomainNameAnalysisError.insert_into_list(err, self.rrset_errors[orig_ds_rrset_info], server, client, response) else: # Associate with the delegation err = Errors.MissingSEPForAlg(algorithm=alg, source=dns.rdatatype.to_text(rdtype)) Errors.DomainNameAnalysisError.insert_into_list(err, self.delegation_errors[rdtype], server, client, response) else: if rdtype in (dns.rdatatype.CDS, dns.rdatatype.CDNSKEY): # Associate with the RRSet err = Errors.NoSEPCDNSKEY(source=dns.rdatatype.to_text(rdtype)) Errors.DomainNameAnalysisError.insert_into_list(err, self.rrset_errors[orig_ds_rrset_info], None, None, None) else: # Associate with the delegation err = Errors.NoSEP(source=dns.rdatatype.to_text(rdtype)) Errors.DomainNameAnalysisError.insert_into_list(err, self.delegation_errors[rdtype], None, None, None) if self.delegation_status[rdtype] is None: if ds_rrset_answer_info: if ds_rrset_exists: # DS RRs exist if secure_path: # If any DNSSEC algorithms are supported, then status # is bogus because there should have been matching KSK. self.delegation_status[rdtype] = Status.DELEGATION_STATUS_BOGUS else: # If no algorithms are supported, then this is a # provably insecure delegation. self.delegation_status[rdtype] = Status.DELEGATION_STATUS_INSECURE else: # Only CNAME returned for DS query. With no DS records and # no valid non-existence proof, the delegation is bogus. self.delegation_status[rdtype] = Status.DELEGATION_STATUS_BOGUS elif self.parent.signed: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_BOGUS for nsec_status_list in [self.nxdomain_status[n] for n in self.nxdomain_status if n.qname == name and n.rdtype == dns.rdatatype.DS] + \ [self.nodata_status[n] for n in self.nodata_status if n.qname == name and n.rdtype == dns.rdatatype.DS]: for nsec_status in nsec_status_list: if nsec_status.validation_status == Status.NSEC_STATUS_VALID: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_INSECURE break else: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_INSECURE # if no servers (designated or stealth authoritative) respond or none # respond authoritatively, then make the delegation as lame if not self.get_auth_or_designated_servers(): if self.delegation_status[rdtype] == Status.DELEGATION_STATUS_INSECURE: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_LAME elif not self.get_responsive_auth_or_designated_servers(): if self.delegation_status[rdtype] == Status.DELEGATION_STATUS_INSECURE: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_LAME elif not self.get_valid_auth_or_designated_servers(): if self.delegation_status[rdtype] == Status.DELEGATION_STATUS_INSECURE: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_LAME elif self.analysis_type == ANALYSIS_TYPE_AUTHORITATIVE and not self._auth_servers_clients: if self.delegation_status[rdtype] == Status.DELEGATION_STATUS_INSECURE: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_LAME if rdtype == dns.rdatatype.DS: try: ds_nxdomain_info = [x for x in self.queries[(name, rdtype)].nxdomain_info if x.qname == name and x.rdtype == dns.rdatatype.DS][0] except IndexError: pass else: if self.referral_rdtype is not None: # now check if there is a parent server that is providing an # NXDOMAIN for the referral. If so, this is due to the # delegation not being found on all servers. try: delegation_nxdomain_info = [x for x in self.queries[(name, self.referral_rdtype)].nxdomain_info if x.qname == name and x.rdtype == self.referral_rdtype][0] except IndexError: # if there were not NXDOMAINs received in response to the # referral query, then use all the servers/clients servers_clients = ds_nxdomain_info.servers_clients else: # if there were NXDOMAINs received in response to the # referral query, then filter those out servers_clients = set(ds_nxdomain_info.servers_clients).difference(delegation_nxdomain_info.servers_clients) else: # if there was no referral query, then use all the # servers/clients servers_clients = ds_nxdomain_info.servers_clients # if there were any remaining NXDOMAIN responses, then add the # error if servers_clients: err = Errors.NoNSInParentNXDOMAIN(parent=fmt.humanize_name(self.parent_name())) for server, client in servers_clients: for response in ds_nxdomain_info.servers_clients[(server, client)]: err.add_server_client(server, client, response) self.delegation_errors[rdtype].append(err) if self.delegation_status[rdtype] == Status.DELEGATION_STATUS_INSECURE: self.delegation_status[rdtype] = Status.DELEGATION_STATUS_INCOMPLETE try: ds_nodata_info = [x for x in self.queries[(name, rdtype)].nodata_info if x.qname == name and x.rdtype == dns.rdatatype.DS][0] except IndexError: pass else: ns_bit_error = None for status in self.nodata_status[ds_nodata_info]: try: ns_bit_error = [x for x in status.errors if isinstance(x, Errors.ReferralWithoutNSBitNSEC)][0] except IndexError: pass else: break if ns_bit_error is not None: err = Errors.NoNSInParentNoData(parent=fmt.humanize_name(self.parent_name())) for server, client in status.nsec_set_info.servers_clients: for response in status.nsec_set_info.servers_clients[(server, client)]: err.add_server_client(server, client, response) self.delegation_errors[rdtype].append(err) def _populate_server_status(self): if not self.is_zone(): return if self.parent is None: return designated_servers = self.get_designated_servers() servers_queried_udp = set([x for x in self._all_servers_clients_queried if x[0] in designated_servers]) servers_queried_tcp = set([x for x in self._all_servers_clients_queried_tcp if x[0] in designated_servers]) servers_queried = servers_queried_udp.union(servers_queried_tcp) unresponsive_udp = servers_queried_udp.difference(self._responsive_servers_clients_udp) unresponsive_tcp = servers_queried_tcp.difference(self._responsive_servers_clients_tcp) invalid_response_udp = servers_queried.intersection(self._responsive_servers_clients_udp).difference(self._valid_servers_clients_udp) invalid_response_tcp = servers_queried.intersection(self._responsive_servers_clients_tcp).difference(self._valid_servers_clients_tcp) not_authoritative = servers_queried.intersection(self._valid_servers_clients_udp.union(self._valid_servers_clients_tcp)).difference(self._auth_servers_clients) if unresponsive_udp: err = Errors.ServerUnresponsiveUDP() for server, client in unresponsive_udp: err.add_server_client(server, client, None) self.zone_errors.append(err) if unresponsive_tcp: err = Errors.ServerUnresponsiveTCP() for server, client in unresponsive_tcp: err.add_server_client(server, client, None) self.zone_errors.append(err) if invalid_response_udp: err = Errors.ServerInvalidResponseUDP() for server, client in invalid_response_udp: err.add_server_client(server, client, None) self.zone_errors.append(err) if invalid_response_tcp: err = Errors.ServerInvalidResponseTCP() for server, client in invalid_response_tcp: err.add_server_client(server, client, None) self.zone_errors.append(err) if self.analysis_type == ANALYSIS_TYPE_AUTHORITATIVE: if not_authoritative: err = Errors.ServerNotAuthoritative() for server, client in not_authoritative: err.add_server_client(server, client, None) self.zone_errors.append(err) def _populate_negative_response_status(self, query, neg_response_info, \ bad_soa_error_cls, missing_soa_error_cls, upward_referral_error_cls, missing_nsec_error_cls, \ nsec_status_cls, nsec3_status_cls, warnings, errors, supported_algs, ignore_rfc8624, ignore_rfc9276): qname_obj = self.get_name(neg_response_info.qname) is_zone = qname_obj.name == neg_response_info.qname and qname_obj.is_zone() if query.rdtype == dns.rdatatype.DS and is_zone: qname_obj = qname_obj.parent soa_owner_name_for_servers = {} servers_without_soa = set() servers_missing_nsec = set() #TODO Handle the case where a parent server sends NXDOMAIN for a # delegated child, even when other parent servers, send a proper # referral. # populate NXDOMAIN status for only those responses that are from # servers authoritative or designated as such auth_servers = qname_obj.zone.get_auth_or_designated_servers() for server, client in neg_response_info.servers_clients: if server not in auth_servers: continue for response in neg_response_info.servers_clients[(server, client)]: servers_without_soa.add((server, client, response)) servers_missing_nsec.add((server, client, response)) self._populate_responsiveness_errors(qname_obj, response, server, client, warnings, errors) self._populate_response_errors(qname_obj, response, server, client, warnings, errors) self._populate_edns_errors(qname_obj, response, server, client, warnings, errors) self._populate_cookie_errors(qname_obj, response, server, client, warnings, errors) self._populate_foreign_class_warnings(qname_obj, response, server, client, warnings, errors) self._populate_case_preservation_warnings(qname_obj, response, server, client, warnings, errors) for soa_rrset_info in neg_response_info.soa_rrset_info: soa_owner_name = soa_rrset_info.rrset.name self._populate_rrsig_status(query, soa_rrset_info, self.get_name(soa_owner_name), supported_algs, ignore_rfc8624, ignore_rfc9276, populate_response_errors=False) # make sure this query was made to a server designated as # authoritative if not set([s for (s,c) in soa_rrset_info.servers_clients]).intersection(auth_servers): continue if soa_owner_name != qname_obj.zone.name: err = Errors.DomainNameAnalysisError.insert_into_list(bad_soa_error_cls(soa_owner_name=fmt.humanize_name(soa_owner_name), zone_name=fmt.humanize_name(qname_obj.zone.name)), errors, None, None, None) else: err = None for server, client in soa_rrset_info.servers_clients: if server not in auth_servers: continue for response in soa_rrset_info.servers_clients[(server, client)]: servers_without_soa.remove((server, client, response)) soa_owner_name_for_servers[(server,client,response)] = soa_owner_name if err is not None: if neg_response_info.qname == query.qname or response.recursion_desired_and_available(): err.add_server_client(server, client, response) for server,client,response in servers_without_soa: if neg_response_info.qname == query.qname or response.recursion_desired_and_available(): # check for an upward referral if upward_referral_error_cls is not None and response.is_upward_referral(qname_obj.zone.name): Errors.DomainNameAnalysisError.insert_into_list(upward_referral_error_cls(), errors, server, client, response) else: ds_referral = False if query.rdtype == dns.rdatatype.DS: # handle DS as a special case if response.is_referral(query.qname, query.rdtype, query.rdclass, qname_obj.name): ds_referral = True if not ds_referral: Errors.DomainNameAnalysisError.insert_into_list(missing_soa_error_cls(), errors, server, client, response) if upward_referral_error_cls is not None: try: index = errors.index(upward_referral_error_cls()) except ValueError: pass else: upward_referral_error = errors[index] for notices in errors, warnings: not_auth_notices = [x for x in notices if isinstance(x, Errors.NotAuthoritative)] for notice in not_auth_notices: for server, client in upward_referral_error.servers_clients: for response in upward_referral_error.servers_clients[(server, client)]: notice.remove_server_client(server, client, response) if not notice.servers_clients: notices.remove(notice) statuses = [] status_by_response = {} for nsec_set_info in neg_response_info.nsec_set_info: status_by_soa_name = {} for nsec_rrset_info in nsec_set_info.rrsets.values(): self._populate_rrsig_status(query, nsec_rrset_info, qname_obj, supported_algs, ignore_rfc8624, ignore_rfc9276, populate_response_errors=False) for server, client in nsec_set_info.servers_clients: if server not in auth_servers: continue for response in nsec_set_info.servers_clients[(server,client)]: soa_owner_name = soa_owner_name_for_servers.get((server,client,response), qname_obj.zone.name) if soa_owner_name not in status_by_soa_name: if nsec_set_info.use_nsec3: status = nsec3_status_cls(neg_response_info.qname, query.rdtype, \ soa_owner_name, is_zone, nsec_set_info, ignore_rfc9276) else: status = nsec_status_cls(neg_response_info.qname, query.rdtype, \ soa_owner_name, is_zone, nsec_set_info) if status.validation_status == Status.NSEC_STATUS_VALID: if status not in statuses: statuses.append(status) status_by_soa_name[soa_owner_name] = status status = status_by_soa_name[soa_owner_name] if (server,client,response) in servers_missing_nsec: servers_missing_nsec.remove((server,client,response)) if status.validation_status == Status.NSEC_STATUS_VALID: if (server,client,response) in status_by_response: del status_by_response[(server,client,response)] elif neg_response_info.qname == query.qname or response.recursion_desired_and_available(): status_by_response[(server,client,response)] = status for (server,client,response), status in status_by_response.items(): if status not in statuses: statuses.append(status) for server, client, response in servers_missing_nsec: # if DNSSEC was not requested (e.g., for diagnostics purposes), # then don't report an issue if not (response.query.edns >= 0 and response.query.edns_flags & dns.flags.DO): continue # report that no NSEC(3) records were returned if qname_obj.zone.signed and (neg_response_info.qname == query.qname or response.recursion_desired_and_available()): if response.dnssec_requested(): Errors.DomainNameAnalysisError.insert_into_list(missing_nsec_error_cls(), errors, server, client, response) elif qname_obj is not None and qname_obj.zone.server_responsive_with_do(server,client,response.effective_tcp,True): Errors.DomainNameAnalysisError.insert_into_list(Errors.UnableToRetrieveDNSSECRecords(), errors, server, client, response) return statuses def _populate_nxdomain_status(self, supported_algs, ignore_rfc8624, ignore_rfc9276): self.nxdomain_status = {} self.nxdomain_warnings = {} self.nxdomain_errors = {} _logger.debug('Assessing NXDOMAIN response status of %s...' % (fmt.humanize_name(self.name))) for (qname, rdtype), query in self.queries.items(): for neg_response_info in query.nxdomain_info: self.nxdomain_warnings[neg_response_info] = [] self.nxdomain_errors[neg_response_info] = [] self.nxdomain_status[neg_response_info] = \ self._populate_negative_response_status(query, neg_response_info, \ Errors.SOAOwnerNotZoneForNXDOMAIN, Errors.MissingSOAForNXDOMAIN, None, \ Errors.MissingNSECForNXDOMAIN, Status.NSECStatusNXDOMAIN, Status.NSEC3StatusNXDOMAIN, \ self.nxdomain_warnings[neg_response_info], self.nxdomain_errors[neg_response_info], \ supported_algs, ignore_rfc8624, ignore_rfc9276) # check for NOERROR/NXDOMAIN inconsistencies if neg_response_info.qname in self.yxdomain and rdtype not in (dns.rdatatype.DS, dns.rdatatype.DLV): for (qname2, rdtype2), query2 in self.queries.items(): if rdtype2 in (dns.rdatatype.DS, dns.rdatatype.DLV): continue for rrset_info in [x for x in query2.answer_info if x.rrset.name == neg_response_info.qname]: shared_servers_clients = set(rrset_info.servers_clients).intersection(neg_response_info.servers_clients) if shared_servers_clients: err1 = Errors.DomainNameAnalysisError.insert_into_list(Errors.InconsistentNXDOMAIN(qname=fmt.humanize_name(neg_response_info.qname), rdtype_nxdomain=dns.rdatatype.to_text(rdtype), rdtype_noerror=dns.rdatatype.to_text(query2.rdtype)), self.nxdomain_warnings[neg_response_info], None, None, None) err2 = Errors.DomainNameAnalysisError.insert_into_list(Errors.InconsistentNXDOMAIN(qname=fmt.humanize_name(neg_response_info.qname), rdtype_nxdomain=dns.rdatatype.to_text(rdtype), rdtype_noerror=dns.rdatatype.to_text(query2.rdtype)), self.rrset_warnings[rrset_info], None, None, None) for server, client in shared_servers_clients: for response in neg_response_info.servers_clients[(server, client)]: err1.add_server_client(server, client, response) err2.add_server_client(server, client, response) for neg_response_info2 in [x for x in query2.nodata_info if x.qname == neg_response_info.qname]: shared_servers_clients = set(neg_response_info2.servers_clients).intersection(neg_response_info.servers_clients) if shared_servers_clients: err1 = Errors.DomainNameAnalysisError.insert_into_list(Errors.InconsistentNXDOMAIN(qname=fmt.humanize_name(neg_response_info.qname), rdtype_nxdomain=dns.rdatatype.to_text(rdtype), rdtype_noerror=dns.rdatatype.to_text(query2.rdtype)), self.nxdomain_warnings[neg_response_info], None, None, None) err2 = Errors.DomainNameAnalysisError.insert_into_list(Errors.InconsistentNXDOMAIN(qname=fmt.humanize_name(neg_response_info.qname), rdtype_nxdomain=dns.rdatatype.to_text(rdtype), rdtype_noerror=dns.rdatatype.to_text(query2.rdtype)), self.nodata_warnings[neg_response_info2], None, None, None) for server, client in shared_servers_clients: for response in neg_response_info.servers_clients[(server, client)]: err1.add_server_client(server, client, response) err2.add_server_client(server, client, response) def _populate_nodata_status(self, supported_algs, ignore_rfc8624, ignore_rfc9276): self.nodata_status = {} self.nodata_warnings = {} self.nodata_errors = {} _logger.debug('Assessing NODATA response status of %s...' % (fmt.humanize_name(self.name))) for (qname, rdtype), query in self.queries.items(): for neg_response_info in query.nodata_info: self.nodata_warnings[neg_response_info] = [] self.nodata_errors[neg_response_info] = [] self.nodata_status[neg_response_info] = \ self._populate_negative_response_status(query, neg_response_info, \ Errors.SOAOwnerNotZoneForNODATA, Errors.MissingSOAForNODATA, Errors.UpwardReferral, \ Errors.MissingNSECForNODATA, Status.NSECStatusNODATA, Status.NSEC3StatusNODATA, \ self.nodata_warnings[neg_response_info], self.nodata_errors[neg_response_info], \ supported_algs, ignore_rfc8624, ignore_rfc9276) def _populate_inconsistent_negative_dnssec_responses(self, neg_response_info, neg_status, ignore_rfc9276): for nsec_status in neg_status[neg_response_info]: queries_by_error = { Errors.ExistingTypeNotInBitmapNSEC3: [], Errors.ExistingTypeNotInBitmapNSEC: [], Errors.ExistingCoveredNSEC3: [], Errors.ExistingCoveredNSEC: [], } nsec_set_info = nsec_status.nsec_set_info for (qname, rdtype) in self.yxrrset_proper: if rdtype in (dns.rdatatype.DS, dns.rdatatype.DLV): continue if nsec_set_info.use_nsec3: status = Status.NSEC3StatusNXDOMAIN(qname, rdtype, nsec_status.origin, nsec_status.is_zone, nsec_set_info, ignore_rfc9276) err_cls = Errors.ExistingCoveredNSEC3 else: status = Status.NSECStatusNXDOMAIN(qname, rdtype, nsec_status.origin, nsec_status.is_zone, nsec_set_info) err_cls = Errors.ExistingCoveredNSEC if status.validation_status == Status.NSEC_STATUS_VALID and not status.opt_out: queries_by_error[err_cls].append((qname, rdtype)) if nsec_set_info.use_nsec3: status = Status.NSEC3StatusNODATA(qname, rdtype, nsec_status.origin, nsec_status.is_zone, nsec_set_info, ignore_rfc9276) err_cls = Errors.ExistingTypeNotInBitmapNSEC3 else: status = Status.NSECStatusNODATA(qname, rdtype, nsec_status.origin, nsec_status.is_zone, nsec_set_info, sname_must_match=True) err_cls = Errors.ExistingTypeNotInBitmapNSEC if status.validation_status == Status.NSEC_STATUS_VALID and not status.opt_out: queries_by_error[err_cls].append((qname, rdtype)) for err_cls in queries_by_error: if not queries_by_error[err_cls]: continue queries = [(fmt.humanize_name(qname), dns.rdatatype.to_text(rdtype)) for qname, rdtype in queries_by_error[err_cls]] err = Errors.DomainNameAnalysisError.insert_into_list(err_cls(queries=queries), nsec_status.errors, None, None, None) def _populate_inconsistent_negative_dnssec_responses_all(self, ignore_rfc9276): _logger.debug('Looking for negative responses that contradict positive responses (%s)...' % (fmt.humanize_name(self.name))) for (qname, rdtype), query in self.queries.items(): if rdtype in (dns.rdatatype.DS, dns.rdatatype.DLV): continue for neg_response_info in query.nodata_info: self._populate_inconsistent_negative_dnssec_responses(neg_response_info, self.nodata_status, ignore_rfc9276) for neg_response_info in query.nxdomain_info: self._populate_inconsistent_negative_dnssec_responses(neg_response_info, self.nxdomain_status, ignore_rfc9276) if self.is_zone() or self.zone is None: return for (qname, rdtype), query in self.zone.queries.items(): if rdtype in (dns.rdatatype.DS, dns.rdatatype.DLV): continue for neg_response_info in query.nodata_info: self._populate_inconsistent_negative_dnssec_responses(neg_response_info, self.zone.nodata_status, ignore_rfc9276) for neg_response_info in query.nxdomain_info: self._populate_inconsistent_negative_dnssec_responses(neg_response_info, self.zone.nxdomain_status, ignore_rfc9276) def _populate_cdnskey_cds_correctness(self, trust_cdnskey_cds): for rdtype in (dns.rdatatype.CDS, dns.rdatatype.CDNSKEY): if (self.name, rdtype) not in self.queries: continue if rdtype == dns.rdatatype.CDNSKEY: err_cls = Errors.MultipleCDNSKEY else: err_cls = Errors.MultipleCDS rrset_answer_info = self.queries[(self.name, rdtype)].answer_info if len(rrset_answer_info) > 1: # more than one answer! for rrset_info in rrset_answer_info: self.rrset_errors[rrset_info].append(err_cls()) for rrset_info in rrset_answer_info: # there are CNAMEs that show up here... if not (rrset_info.rrset.name == self.name and rrset_info.rrset.rdtype == rdtype): continue # Check that the RRset is signed by a DNSKEY that is # represented in both the current DNSKEY set and the current DS # set. The exception is if trust_cdnskey_cds is True, which # means that "the Parent validates the CDS/CDNSKEY through some # other means" (RFC 7344). if not trust_cdnskey_cds: signer_in_dnskey_and_ds = False for rrsig in self.rrsig_status[rrset_info]: for dnskey in self.rrsig_status[rrset_info][rrsig]: rrsig_status = self.rrsig_status[rrset_info][rrsig][dnskey] if rrsig_status.dnskey is not None and \ dns.rdatatype.DS in self.dnskey_with_ds and \ rrsig_status.dnskey in self.dnskey_with_ds[dns.rdatatype.DS]: signer_in_dnskey_and_ds = True break if not signer_in_dnskey_and_ds: if rdtype == dns.rdatatype.CDNSKEY: err_cls = Errors.CDNSKEYSignerInvalid else: err_cls = Errors.CDSSignerInvalid self.rrset_errors[rrset_info].append(err_cls()) # From this point on, consider only records associated with # DNSSEC deletion (CDS or CDNSKEY with algorithm 0) rdata_with_alg_0 = [r for r in rrset_info.rrset if r.algorithm == DNSSEC_DELETE_ALG] if not rdata_with_alg_0: continue # Add an error if there is more than one record in the CDS / # CDNSKEY RRset. if len(rrset_info.rrset) > 1: if rdtype == dns.rdatatype.CDNSKEY: err_cls = Errors.CDNSKEYDeleteMultipleRecords else: err_cls = Errors.CDSDeleteMultipleRecords self.rrset_errors[rrset_info].append(err_cls()) # Add an error if the values for CDS / CDNSKEY record are # incorrect. for rdata in rdata_with_alg_0: if rdtype == dns.rdatatype.CDNSKEY and \ (rdata.protocol, rdata.flags, rdata.key) != (3, 0, b'\x00'): self.rrset_errors[rrset_info].append(Errors.CDNSKEYRecordIncorrectDeleteValues()) elif rdtype == dns.rdatatype.CDS and \ (rdata.key_tag, rdata.digest_type, rdata.digest) != (0, 0, b'\x00'): self.rrset_errors[rrset_info].append(Errors.CDSRecordIncorrectDeleteValues()) def _populate_cdnskey_cds_ds_consistency(self): if (self.name, dns.rdatatype.DS) not in self.queries: return ds_rrset_answer_info = self.queries[(self.name, dns.rdatatype.DS)].answer_info cds_has_error = set() for ds_rrset_info in ds_rrset_answer_info: # there are CNAMEs that show up here... if not (ds_rrset_info.rrset.name == self.name and \ ds_rrset_info.rrset.rdtype == dns.rdatatype.DS): continue digest_alg_map = {} # Create a list of digest algs for every algorithm/key tag. We use # this for creating a set of DS from the CDNSKEY with digest # algorithms matching those of the DS set. for ds_rdata in ds_rrset_info.rrset: if (ds_rdata.algorithm, ds_rdata.key_tag) not in digest_alg_map: digest_alg_map[(ds_rdata.algorithm, ds_rdata.key_tag)] = set() digest_alg_map[(ds_rdata.algorithm, ds_rdata.key_tag)].add(ds_rdata.digest_type) # Create a canonicalized list of rdata for comparing DS to CDS # (We cannot just use straight-up set comparison because they are # different types.) ds_rr_list = Response.RRsetInfo.sorted_rdata_list(ds_rrset_info.rrset) if (self.name, dns.rdatatype.CDS) in self.queries: rrset_answer_info = self.queries[(self.name, dns.rdatatype.CDS)].answer_info for rrset_info in rrset_answer_info: # there are CNAMEs that show up here... if not (rrset_info.rrset.name == self.name and \ rrset_info.rrset.rdtype == dns.rdatatype.CDS): continue # Created a sorted list of rdata for comparison cds_rr_list = Response.RRsetInfo.sorted_rdata_list(rrset_info.rrset) if ds_rr_list != cds_rr_list: # If any CDS RRset has contents different than those of # the DS RRset, then warn. if rrset_info not in cds_has_error: self.rrset_warnings[rrset_info].append(Errors.CDSInconsistentWithDS()) cds_has_error.add(rrset_info) if (self.name, dns.rdatatype.CDNSKEY) in self.queries: rrset_answer_info = self.queries[(self.name, dns.rdatatype.CDNSKEY)].answer_info for rrset_info in rrset_answer_info: # there are CNAMEs that show up here... if not (rrset_info.rrset.name == self.name and \ rrset_info.rrset.rdtype == dns.rdatatype.CDNSKEY): continue # Create a DS RRset from the CDNSKEY RRset cds_rrset_info = Response.dnskey_rrset_to_ds_rrset(rrset_info, digest_alg_map, (2,)) # Created a sorted list of rdata for comparison cds_rr_list = Response.RRsetInfo.sorted_rdata_list(cds_rrset_info.rrset) if ds_rr_list != cds_rr_list: # If any CDNSKEY RRset has contents different than # those of the DS RRset, then warn. if rrset_info not in cds_has_error: self.rrset_warnings[rrset_info].append(Errors.CDNSKEYInconsistentWithDS()) cds_has_error.add(rrset_info) def _populate_cdnskey_cds_consistency(self): if (self.name, dns.rdatatype.CDS) not in self.queries or \ (self.name, dns.rdatatype.CDNSKEY) not in self.queries: return ds_rrset_answer_info = self.queries[(self.name, dns.rdatatype.CDS)].answer_info cds_has_error = set() for ds_rrset_info in ds_rrset_answer_info: # there are CNAMEs that show up here... if not (ds_rrset_info.rrset.name == self.name and \ ds_rrset_info.rrset.rdtype == dns.rdatatype.CDS): continue digest_alg_map = {} # Create a list of digest algs for every algorithm/key tag. We use # this for creating a set of DS from the CDNSKEY with digest # algorithms matching those of the DS set. for ds_rdata in ds_rrset_info.rrset: if (ds_rdata.algorithm, ds_rdata.key_tag) not in digest_alg_map: digest_alg_map[(ds_rdata.algorithm, ds_rdata.key_tag)] = set() digest_alg_map[(ds_rdata.algorithm, ds_rdata.key_tag)].add(ds_rdata.digest_type) # Create a canonicalized list of rdata for comparing DS to CDS # (We cannot just use straight-up set comparison because they are # different types.) ds_rr_list = Response.RRsetInfo.sorted_rdata_list(ds_rrset_info.rrset) rrset_answer_info = self.queries[(self.name, dns.rdatatype.CDNSKEY)].answer_info for rrset_info in rrset_answer_info: # there are CNAMEs that show up here... if not (rrset_info.rrset.name == self.name and \ rrset_info.rrset.rdtype == dns.rdatatype.CDNSKEY): continue # Create a DS RRset from the CDNSKEY RRset cds_rrset_info = Response.dnskey_rrset_to_ds_rrset(rrset_info, digest_alg_map, (2,)) # Created a sorted list of rdata for comparison cds_rr_list = Response.RRsetInfo.sorted_rdata_list(cds_rrset_info.rrset) if ds_rr_list != cds_rr_list: # If any CDNSKEY RRset has contents different than # those of the DS RRset, then warn. if rrset_info not in cds_has_error: self.rrset_errors[rrset_info].append(Errors.CDNSKEYInconsistentWithCDS()) cds_has_error.add(rrset_info) if ds_rrset_info not in cds_has_error: self.rrset_errors[ds_rrset_info].append(Errors.CDNSKEYInconsistentWithCDS()) cds_has_error.add(ds_rrset_info) def _populate_dnskey_status(self, trusted_keys, multi_signer): if (self.name, dns.rdatatype.DNSKEY) not in self.queries: return trusted_keys_rdata = set([k for z, k in trusted_keys if z == self.name]) trusted_keys_self_signing = set() # build a list of responsive servers bailiwick_map, default_bailiwick = self.get_bailiwick_mapping() servers_responsive = set() servers_authoritative = self.zone.get_auth_or_designated_servers() # only consider those servers that are supposed to answer authoritatively for query in self.queries[(self.name, dns.rdatatype.DNSKEY)].queries.values(): servers_responsive.update([(server,client,query.responses[server][client]) for (server,client) in query.servers_with_valid_complete_response(bailiwick_map, default_bailiwick) if server in servers_authoritative]) # any errors point to their own servers_clients value for dnskey in self.get_dnskeys(): if dnskey.rdata in trusted_keys_rdata and dnskey in self.ksks: trusted_keys_self_signing.add(dnskey) if dnskey in self.revoked_keys and dnskey not in self.ksks: err = Errors.RevokedNotSigning() err.servers_clients = dnskey.servers_clients dnskey.errors.append(err) if not self.is_zone(): err = Errors.DNSKEYNotAtZoneApex(zone=fmt.humanize_name(self.zone.name), name=fmt.humanize_name(self.name)) err.servers_clients = dnskey.servers_clients dnskey.errors.append(err) # if there were servers responsive for the query but that didn't return the dnskey servers_with_dnskey = set() for (server,client) in dnskey.servers_clients: for response in dnskey.servers_clients[(server,client)]: servers_with_dnskey.add((server,client,response)) servers_clients_without = servers_responsive.difference(servers_with_dnskey) if servers_clients_without: err = Errors.DNSKEYMissingFromServers() # If the key is observed to be signing RRsets other than the # DNSKEY RRset, then it is inferred that this DNSKEY should be # available to validate those RRsets. Thus, if the DNSKEY is # not returned by all servers, then this could lead to # validation failures (noting that the same RRsets might be # covered by RRSIGs associated with other DNSKEYs, which might # still make them validatable.) This includes the multi-signer # approach (RFC 8901) in which every DNSKEY RRset returned # should include every ZSK, per both Model 1 and Model 2. # # Additionally, if the DNSKEY corresponds to a DS record (or # trust anchor), then it is inferred that this DNSKEY is used # for associating trust to the child zone. Thus, if the DNSKEY # is not returned by all servers, then this could lead to # validation failures, associated with bogus delegation in the # case of the DS record (noting that there might be other # DNSKEY records that create a proper link between parent and # child, which might keep the delegation secure.) # # The reason that the DNSKEY RRset (with no DS or trust anchor) # is treated differently is that it is always validated using # the DNSKEY(s) and RRSIG(s) that are included in a single # response. Thus, a missing key might still be indicative of a # problem, but the problem is not immediate. if dnskey in self.zsks or dnskey.rdata in trusted_keys_rdata: dnskey.errors.append(err) elif (dns.rdatatype.DS in self.dnskey_with_ds and dnskey in self.dnskey_with_ds[dns.rdatatype.DS]): # If multi_signer was specified, then the user has # indicated that the "missing" KSK is simply an artifact of # their use of RFC 8901, using Model 2. With Model 2, # different providers will serve different DNSKEY RRsets, # each including (and signed by) their own KSK, which # corresponds to a DS record (section 2.1). Thus, if the # DNSKEY is a KSK (i.e., signs the DNSKEY RRset), and the # DNSKEY corresponds to a DS record, then it might be a # multi-signer environment. If multi_signer was # specified, then we don't issue an error. if dnskey in self.ksks and multi_signer: pass else: dnskey.errors.append(err) else: dnskey.warnings.append(err) for (server,client,response) in servers_clients_without: err.add_server_client(server, client, response) if not dnskey.rdata.key: dnskey.errors.append(Errors.DNSKEYZeroLength()) elif dnskey.rdata.algorithm in DNSSEC_KEY_LENGTHS_BY_ALGORITHM and \ dnskey.key_len != DNSSEC_KEY_LENGTHS_BY_ALGORITHM[dnskey.rdata.algorithm]: dnskey.errors.append(DNSSEC_KEY_LENGTH_ERRORS[dnskey.rdata.algorithm](length=dnskey.key_len)) if trusted_keys_rdata and not trusted_keys_self_signing: self.zone_errors.append(Errors.NoTrustAnchorSigning(zone=fmt.humanize_name(self.zone.name))) def populate_response_component_status(self, G): response_component_status = {} for obj in G.node_reverse_mapping: if isinstance(obj, (Response.DNSKEYMeta, Response.RRsetInfo, Response.NSECSet, Response.NegativeResponseInfo, self.__class__)): node_str = G.node_reverse_mapping[obj] status = G.status_for_node(node_str) response_component_status[obj] = status if isinstance(obj, Response.DNSKEYMeta): for rrset_info in obj.rrset_info: if rrset_info in G.secure_dnskey_rrsets: response_component_status[rrset_info] = Status.RRSET_STATUS_SECURE else: response_component_status[rrset_info] = status # Mark each individual NSEC in the set elif isinstance(obj, Response.NSECSet): for nsec_name in obj.rrsets: nsec_name_str = lb2s(nsec_name.canonicalize().to_text()).replace(r'"', r'\"') response_component_status[obj.rrsets[nsec_name]] = G.status_for_node(node_str, nsec_name_str) elif isinstance(obj, Response.NegativeResponseInfo): # the following two cases are only for zones if G.is_invis(node_str): # A negative response info for a DS query points to the # "top node" of a zone in the graph. If this "top node" is # colored "insecure", then it indicates that the negative # response has been authenticated. To reflect this # properly, we change the status to "secure". if obj.rdtype == dns.rdatatype.DS: if status == Status.RRSET_STATUS_INSECURE: if G.secure_nsec_nodes_covering_node(node_str): response_component_status[obj] = Status.RRSET_STATUS_SECURE # for non-DNSKEY responses, verify that the negative # response is secure by checking that the SOA is also # secure (the fact that it is marked "secure" indicates # that the NSEC proof was already authenticated) if obj.rdtype != dns.rdatatype.DNSKEY: # check for secure opt out opt_out_secure = bool(G.secure_nsec3_optout_nodes_covering_node(node_str)) if status == Status.RRSET_STATUS_SECURE or \ (status == Status.RRSET_STATUS_INSECURE and opt_out_secure): soa_secure = False for soa_rrset in obj.soa_rrset_info: if G.status_for_node(G.node_reverse_mapping[soa_rrset]) == Status.RRSET_STATUS_SECURE: soa_secure = True if not soa_secure: response_component_status[obj] = Status.RRSET_STATUS_BOGUS self._set_response_component_status(response_component_status) def _set_response_component_status(self, response_component_status, is_dlv=False, trace=None, follow_mx=True): if trace is None: trace = [] # avoid loops if self in trace: return # populate status of dependencies for cname in self.cname_targets: for target, cname_obj in self.cname_targets[cname].items(): if cname_obj is not None: cname_obj._set_response_component_status(response_component_status, trace=trace + [self]) if follow_mx: for target, mx_obj in self.mx_targets.items(): if mx_obj is not None: mx_obj._set_response_component_status(response_component_status, trace=trace + [self], follow_mx=False) for signer, signer_obj in self.external_signers.items(): if signer_obj is not None: signer_obj._set_response_component_status(response_component_status, trace=trace + [self]) for target, ns_obj in self.ns_dependencies.items(): if ns_obj is not None: ns_obj._set_response_component_status(response_component_status, trace=trace + [self]) # populate status of ancestry if self.nxdomain_ancestor is not None: self.nxdomain_ancestor._set_response_component_status(response_component_status, trace=trace + [self]) if self.parent is not None: self.parent._set_response_component_status(response_component_status, trace=trace + [self]) if self.dlv_parent is not None: self.dlv_parent._set_response_component_status(response_component_status, is_dlv=True, trace=trace + [self]) self.response_component_status = response_component_status def _serialize_rrset_info(self, rrset_info, consolidate_clients=False, show_servers=True, show_server_meta=True, loglevel=logging.DEBUG, html_format=False): d = OrderedDict() rrsig_list = [] if self.rrsig_status[rrset_info]: rrsigs = list(self.rrsig_status[rrset_info].keys()) rrsigs.sort() for rrsig in rrsigs: dnskeys = list(self.rrsig_status[rrset_info][rrsig].keys()) dnskeys.sort() for dnskey in dnskeys: rrsig_status = self.rrsig_status[rrset_info][rrsig][dnskey] rrsig_serialized = rrsig_status.serialize(consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip) if rrsig_serialized: rrsig_list.append(rrsig_serialized) dname_list = [] if rrset_info in self.dname_status: for dname_status in self.dname_status[rrset_info]: dname_serialized = dname_status.serialize(self._serialize_rrset_info, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip) if dname_serialized: dname_list.append(dname_serialized) wildcard_proof_list = OrderedDict() if rrset_info.wildcard_info: wildcard_names = list(rrset_info.wildcard_info.keys()) wildcard_names.sort() for wildcard_name in wildcard_names: wildcard_name_str = lb2s(wildcard_name.canonicalize().to_text()) wildcard_proof_list[wildcard_name_str] = [] for nsec_status in self.wildcard_status[rrset_info.wildcard_info[wildcard_name]]: nsec_serialized = nsec_status.serialize(self._serialize_rrset_info, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip) if nsec_serialized: wildcard_proof_list[wildcard_name_str].append(nsec_serialized) if not wildcard_proof_list[wildcard_name_str]: del wildcard_proof_list[wildcard_name_str] show_id = loglevel <= logging.INFO or \ (self.rrset_warnings[rrset_info] and loglevel <= logging.WARNING) or \ (self.rrset_errors[rrset_info] and loglevel <= logging.ERROR) or \ (rrsig_list or dname_list or wildcard_proof_list) if show_id: if rrset_info.rrset.rdtype == dns.rdatatype.NSEC3: d['id'] = '%s/%s/%s' % (fmt.format_nsec3_name(rrset_info.rrset.name), dns.rdataclass.to_text(rrset_info.rrset.rdclass), dns.rdatatype.to_text(rrset_info.rrset.rdtype)) else: d['id'] = '%s/%s/%s' % (lb2s(rrset_info.rrset.name.canonicalize().to_text()), dns.rdataclass.to_text(rrset_info.rrset.rdclass), dns.rdatatype.to_text(rrset_info.rrset.rdtype)) if loglevel <= logging.DEBUG: d['description'] = str(rrset_info) d.update(rrset_info.serialize(consolidate_clients=consolidate_clients, show_servers=False, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip)) if rrsig_list: d['rrsig'] = rrsig_list if dname_list: d['dname'] = dname_list if wildcard_proof_list: d['wildcard_proof'] = wildcard_proof_list if loglevel <= logging.INFO and self.response_component_status is not None: d['status'] = Status.rrset_status_mapping[self.response_component_status[rrset_info]] if loglevel <= logging.INFO and show_servers: servers = tuple_to_dict(rrset_info.servers_clients) server_list = list(servers) server_list.sort() if consolidate_clients: servers = server_list d['servers'] = servers try: ns_names = list(set([lb2s(self.zone.get_ns_name_for_ip(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names if show_server_meta: tags = set() nsids = set() cookie_tags = {} for server,client in rrset_info.servers_clients: for response in rrset_info.servers_clients[(server,client)]: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) cookie_tags[server] = OrderedDict(( ('request', response.request_cookie_tag()), ('response', response.response_cookie_tag()), )) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() cookie_tag_mapping = OrderedDict() for server in server_list: cookie_tag_mapping[server] = cookie_tags[server] d['cookie_status'] = cookie_tag_mapping if self.rrset_warnings[rrset_info] and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.rrset_warnings[rrset_info]] if self.rrset_errors[rrset_info] and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.rrset_errors[rrset_info]] return d def _serialize_negative_response_info(self, neg_response_info, neg_status, warnings, errors, consolidate_clients=False, loglevel=logging.DEBUG, html_format=False): d = OrderedDict() proof_list = [] for nsec_status in neg_status[neg_response_info]: nsec_serialized = nsec_status.serialize(self._serialize_rrset_info, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip) if nsec_serialized: proof_list.append(nsec_serialized) soa_list = [] for soa_rrset_info in neg_response_info.soa_rrset_info: rrset_serialized = self._serialize_rrset_info(soa_rrset_info, consolidate_clients=consolidate_clients, show_server_meta=False, loglevel=loglevel, html_format=html_format) if rrset_serialized: soa_list.append(rrset_serialized) show_id = loglevel <= logging.INFO or \ (warnings[neg_response_info] and loglevel <= logging.WARNING) or \ (errors[neg_response_info] and loglevel <= logging.ERROR) or \ (proof_list or soa_list) if show_id: d['id'] = '%s/%s/%s' % (lb2s(neg_response_info.qname.canonicalize().to_text()), 'IN', dns.rdatatype.to_text(neg_response_info.rdtype)) if proof_list: d['proof'] = proof_list if soa_list: d['soa'] = soa_list if loglevel <= logging.INFO and self.response_component_status is not None: d['status'] = Status.rrset_status_mapping[self.response_component_status[neg_response_info]] if loglevel <= logging.INFO: servers = tuple_to_dict(neg_response_info.servers_clients) server_list = list(servers) server_list.sort() if consolidate_clients: servers = server_list d['servers'] = servers try: ns_names = list(set([lb2s(self.zone.get_ns_name_for_ip(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() cookie_tags = {} for server,client in neg_response_info.servers_clients: for response in neg_response_info.servers_clients[(server,client)]: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) cookie_tags[server] = OrderedDict(( ('request', response.request_cookie_tag()), ('response', response.response_cookie_tag()), )) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() cookie_tag_mapping = OrderedDict() for server in server_list: cookie_tag_mapping[server] = cookie_tags[server] d['cookie_status'] = cookie_tag_mapping if warnings[neg_response_info] and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in warnings[neg_response_info]] if errors[neg_response_info] and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in errors[neg_response_info]] return d def _serialize_query_status(self, query, consolidate_clients=False, loglevel=logging.DEBUG, html_format=False): d = OrderedDict() d['answer'] = [] d['nxdomain'] = [] d['nodata'] = [] d['error'] = [] d['warning'] = [] for rrset_info in query.answer_info: if rrset_info.rrset.name == query.qname or self.analysis_type == ANALYSIS_TYPE_RECURSIVE: rrset_serialized = self._serialize_rrset_info(rrset_info, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if rrset_serialized: d['answer'].append(rrset_serialized) for neg_response_info in query.nxdomain_info: # make sure this query was made to a server designated as # authoritative if not set([s for (s,c) in neg_response_info.servers_clients]).intersection(self.zone.get_auth_or_designated_servers()): continue # only look at qname if neg_response_info.qname == query.qname or self.analysis_type == ANALYSIS_TYPE_RECURSIVE: neg_response_serialized = self._serialize_negative_response_info(neg_response_info, self.nxdomain_status, self.nxdomain_warnings, self.nxdomain_errors, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if neg_response_serialized: d['nxdomain'].append(neg_response_serialized) for neg_response_info in query.nodata_info: # only look at qname if neg_response_info.qname == query.qname or self.analysis_type == ANALYSIS_TYPE_RECURSIVE: neg_response_serialized = self._serialize_negative_response_info(neg_response_info, self.nodata_status, self.nodata_warnings, self.nodata_errors, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if neg_response_serialized: d['nodata'].append(neg_response_serialized) if loglevel <= logging.WARNING: for warning in self.response_warnings[query]: warning_serialized = warning.serialize(consolidate_clients=consolidate_clients, html_format=html_format) if warning_serialized: d['warning'].append(warning_serialized) for error in self.response_errors[query]: error_serialized = error.serialize(consolidate_clients=consolidate_clients, html_format=html_format) if error_serialized: d['error'].append(error_serialized) if not d['answer']: del d['answer'] if not d['nxdomain']: del d['nxdomain'] if not d['nodata']: del d['nodata'] if not d['error']: del d['error'] if not d['warning']: del d['warning'] return d def _serialize_dnskey_status(self, consolidate_clients=False, loglevel=logging.DEBUG, html_format=False): d = [] for dnskey in self.get_dnskeys(): dnskey_serialized = dnskey.serialize(consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip) if dnskey_serialized: if loglevel <= logging.INFO and self.response_component_status is not None: dnskey_serialized['status'] = Status.rrset_status_mapping[self.response_component_status[dnskey]] d.append(dnskey_serialized) return d def _serialize_delegation_status(self, rdtype, consolidate_clients=False, loglevel=logging.DEBUG, html_format=False): d = OrderedDict() dss = list(self.ds_status_by_ds[rdtype].keys()) d['ds'] = [] dss.sort() for ds in dss: dnskeys = list(self.ds_status_by_ds[rdtype][ds].keys()) dnskeys.sort() for dnskey in dnskeys: ds_status = self.ds_status_by_ds[rdtype][ds][dnskey] ds_serialized = ds_status.serialize(consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip) if ds_serialized: d['ds'].append(ds_serialized) if not d['ds']: del d['ds'] try: neg_response_info = [x for x in self.nodata_status if x.qname == self.name and x.rdtype == rdtype][0] status = self.nodata_status except IndexError: try: neg_response_info = [x for x in self.nxdomain_status if x.qname == self.name and x.rdtype == rdtype][0] status = self.nxdomain_status except IndexError: neg_response_info = None if neg_response_info is not None: d['insecurity_proof'] = [] for nsec_status in status[neg_response_info]: nsec_serialized = nsec_status.serialize(self._serialize_rrset_info, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=self.zone.get_ns_name_for_ip) if nsec_serialized: d['insecurity_proof'].append(nsec_serialized) if not d['insecurity_proof']: del d['insecurity_proof'] erroneous_status = self.delegation_status[rdtype] not in (Status.DELEGATION_STATUS_SECURE, Status.DELEGATION_STATUS_INSECURE) if loglevel <= logging.INFO or erroneous_status: d['status'] = Status.delegation_status_mapping[self.delegation_status[rdtype]] if self.delegation_warnings[rdtype] and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.delegation_warnings[rdtype]] if self.delegation_errors[rdtype] and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.delegation_errors[rdtype]] return d def _serialize_zone_status(self, consolidate_clients=False, loglevel=logging.DEBUG, html_format=False): d = OrderedDict() if loglevel <= logging.DEBUG: glue_ip_mapping = self.get_glue_ip_mapping() auth_ns_ip_mapping = self.get_auth_ns_ip_mapping() d['servers'] = OrderedDict() names = list(self.get_ns_names()) names.sort() for name in names: name_str = lb2s(name.canonicalize().to_text()) d['servers'][name_str] = OrderedDict() if name in glue_ip_mapping and glue_ip_mapping[name]: servers = list(glue_ip_mapping[name]) servers.sort() d['servers'][name_str]['glue'] = servers if name in auth_ns_ip_mapping and auth_ns_ip_mapping[name]: servers = list(auth_ns_ip_mapping[name]) servers.sort() d['servers'][name_str]['auth'] = servers if not d['servers']: del d['servers'] stealth_servers = self.get_stealth_servers() if stealth_servers: stealth_mapping = {} for server in stealth_servers: names, ancestor_name = self.get_ns_name_for_ip(server) for name in names: if name not in stealth_mapping: stealth_mapping[name] = [] stealth_mapping[name].append(server) names = list(stealth_mapping) names.sort() for name in names: name_str = lb2s(name.canonicalize().to_text()) servers = stealth_mapping[name] servers.sort() d['servers'][name_str] = OrderedDict(( ('stealth', servers), )) if loglevel <= logging.INFO and self.response_component_status is not None: d['status'] = Status.delegation_status_mapping[self.response_component_status[self]] if self.zone_warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.zone_warnings] if self.zone_errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.zone_errors] return d def serialize_status(self, d=None, is_dlv=False, loglevel=logging.DEBUG, ancestry_only=False, level=RDTYPES_ALL, trace=None, follow_mx=True, html_format=False): if d is None: d = OrderedDict() if trace is None: trace = [] # avoid loops if self in trace: return d # if we're a stub, there's no status to serialize if self.stub: return d name_str = lb2s(self.name.canonicalize().to_text()) if name_str in d: return d cname_ancestry_only = self.analysis_type == ANALYSIS_TYPE_RECURSIVE # serialize status of dependencies first because their version of the # analysis might be the most complete (considering re-dos) if level <= self.RDTYPES_NS_TARGET: for cname in self.cname_targets: for target, cname_obj in self.cname_targets[cname].items(): if cname_obj is not None: cname_obj.serialize_status(d, loglevel=loglevel, ancestry_only=cname_ancestry_only, level=max(self.RDTYPES_ALL_SAME_NAME, level), trace=trace + [self], html_format=html_format) if follow_mx: for target, mx_obj in self.mx_targets.items(): if mx_obj is not None: mx_obj.serialize_status(d, loglevel=loglevel, level=max(self.RDTYPES_ALL_SAME_NAME, level), trace=trace + [self], follow_mx=False, html_format=html_format) if level <= self.RDTYPES_SECURE_DELEGATION: for signer, signer_obj in self.external_signers.items(): signer_obj.serialize_status(d, loglevel=loglevel, level=self.RDTYPES_SECURE_DELEGATION, trace=trace + [self], html_format=html_format) for target, ns_obj in self.ns_dependencies.items(): if ns_obj is not None: ns_obj.serialize_status(d, loglevel=loglevel, level=self.RDTYPES_NS_TARGET, trace=trace + [self], html_format=html_format) # serialize status of ancestry if level <= self.RDTYPES_SECURE_DELEGATION: if self.nxdomain_ancestor is not None: self.nxdomain_ancestor.serialize_status(d, loglevel=loglevel, level=self.RDTYPES_ALL_SAME_NAME, trace=trace + [self], html_format=html_format) if self.parent is not None: self.parent.serialize_status(d, loglevel=loglevel, level=self.RDTYPES_SECURE_DELEGATION, trace=trace + [self], html_format=html_format) if self.dlv_parent is not None: self.dlv_parent.serialize_status(d, is_dlv=True, loglevel=loglevel, level=self.RDTYPES_SECURE_DELEGATION, trace=trace + [self], html_format=html_format) # if we're only looking for the secure ancestry of a name, and not the # name itself (i.e., because this is a subsequent name in a CNAME # chain) if ancestry_only: # only proceed if the name is a zone (and thus as DNSKEY, DS, etc.) if not self.is_zone(): return d # explicitly set the level to self.RDTYPES_SECURE_DELEGATION, so # the other query types aren't retrieved. level = self.RDTYPES_SECURE_DELEGATION consolidate_clients = self.single_client() erroneous_status = self.status not in (Status.NAME_STATUS_NOERROR, Status.NAME_STATUS_NXDOMAIN) d[name_str] = OrderedDict() if loglevel <= logging.INFO or erroneous_status: d[name_str]['status'] = Status.name_status_mapping[self.status] d[name_str]['queries'] = OrderedDict() query_keys = list(self.queries.keys()) query_keys.sort() required_rdtypes = self._rdtypes_for_analysis_level(level) # don't serialize NS data in names for which delegation-only # information is required if level >= self.RDTYPES_SECURE_DELEGATION: required_rdtypes.difference_update([self.referral_rdtype, dns.rdatatype.NS]) for (qname, rdtype) in query_keys: if level > self.RDTYPES_ALL and qname not in (self.name, self.dlv_name): continue if required_rdtypes is not None and rdtype not in required_rdtypes: continue query_serialized = self._serialize_query_status(self.queries[(qname, rdtype)], consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if query_serialized: qname_type_str = '%s/%s/%s' % (lb2s(qname.canonicalize().to_text()), dns.rdataclass.to_text(dns.rdataclass.IN), dns.rdatatype.to_text(rdtype)) d[name_str]['queries'][qname_type_str] = query_serialized # handle NODATA in the zone or the original parent, if there is an error/warning if trace and not trace[0].is_zone() and self == trace[0].zone: has_warnings, has_errors = self.queries_with_errors_warnings(classes=Errors.ExistingCovered) has_warnings_or_errors = has_warnings.union(has_errors) qname = self.nxrrset_name rdtype = self.nxrrset_rdtype if qname is not None and (qname, rdtype) in has_warnings_or_errors: query_serialized = self._serialize_query_status(self.queries[(qname, rdtype)], consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if query_serialized: qname_type_str = '%s/%s/%s' % (lb2s(qname.canonicalize().to_text()), dns.rdataclass.to_text(dns.rdataclass.IN), dns.rdatatype.to_text(rdtype)) d[name_str]['queries'][qname_type_str] = query_serialized if not d[name_str]['queries']: del d[name_str]['queries'] if level <= self.RDTYPES_SECURE_DELEGATION and (self.name, dns.rdatatype.DNSKEY) in self.queries: dnskey_serialized = self._serialize_dnskey_status(consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if dnskey_serialized: d[name_str]['dnskey'] = dnskey_serialized if self.is_zone(): zone_serialized = self._serialize_zone_status(consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if zone_serialized: d[name_str]['zone'] = zone_serialized if self.parent is not None and not is_dlv: delegation_serialized = self._serialize_delegation_status(dns.rdatatype.DS, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if delegation_serialized: d[name_str]['delegation'] = delegation_serialized if self.dlv_parent is not None: if (self.dlv_name, dns.rdatatype.DLV) in self.queries: delegation_serialized = self._serialize_delegation_status(dns.rdatatype.DLV, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format) if delegation_serialized: d[name_str]['dlv'] = delegation_serialized if not d[name_str]: del d[name_str] return d def queries_with_errors_warnings(self, classes=None): has_warnings = set() has_errors = set() for rrset_info in self.rrset_warnings: if not self.rrset_warnings[rrset_info]: continue if classes is not None and \ not [e for e in self.rrset_warnings[rrset_info] if isinstance(e, classes)]: continue for (server, client), responses in rrset_info.servers_clients.items(): for response in responses: has_warnings.add((response.query.qname, response.query.rdtype)) for rrset_info in self.rrset_errors: if not self.rrset_errors[rrset_info]: continue if classes is not None and \ not [e for e in self.rrset_errors[rrset_info] if isinstance(e, classes)]: continue for (server, client), responses in rrset_info.servers_clients.items(): for response in responses: has_errors.add((response.query.qname, response.query.rdtype)) for rrset_info in self.rrsig_status: for rrsig in self.rrsig_status[rrset_info]: for dnskey in self.rrsig_status[rrset_info][rrsig]: rrsig_status = self.rrsig_status[rrset_info][rrsig][dnskey] for (server, client), responses in rrset_info.servers_clients.items(): for response in responses: if rrsig_status.warnings: if classes is None or \ [e for e in rrsig_status.warnings if isinstance(e, classes)]: has_warnings.add((response.query.qname, response.query.rdtype)) if rrsig_status.errors: if classes is None or \ [e for e in rrsig_status.errors if isinstance(e, classes)]: has_errors.add((response.query.qname, response.query.rdtype)) for wildcard_info, statuses in self.wildcard_status.items(): for status in statuses: for (server, client), responses in wildcard_info.servers_clients.items(): for response in responses: if status.warnings: if classes is None or \ [e for e in status.warnings if isinstance(e, classes)]: has_warnings.add((response.query.qname, response.query.rdtype)) if status.errors: if classes is None or \ [e for e in status.errors if isinstance(e, classes)]: has_errors.add((response.query.qname, response.query.rdtype)) for dname_info, statuses in self.dname_status.items(): for status in statuses: for (server, client), responses in dname_info.servers_clients.items(): for response in responses: if status.warnings: if classes is None or \ [e for e in status.warnings if isinstance(e, classes)]: has_warnings.add((response.query.qname, response.query.rdtype)) if status.errors: if classes is None or \ [e for e in status.errors if isinstance(e, classes)]: has_errors.add((response.query.qname, response.query.rdtype)) for neg_response_info in self.nxdomain_status: for status in self.nxdomain_status[neg_response_info]: if status.warnings: if classes is None or \ [e for e in status.warnings if isinstance(e, classes)]: has_warnings.add((neg_response_info.qname, neg_response_info.rdtype)) if status.errors: if classes is None or \ [e for e in status.errors if isinstance(e, classes)]: has_errors.add((neg_response_info.qname, neg_response_info.rdtype)) for neg_response_info in self.nxdomain_warnings: if not self.nxdomain_warnings[neg_response_info]: continue if classes is not None and \ not [e for e in self.nxdomain_warnings[neg_response_info] if isinstance(e, classes)]: continue for (server, client), responses in neg_response_info.servers_clients.items(): for response in responses: has_warnings.add((response.query.qname, response.query.rdtype)) for neg_response_info in self.nxdomain_errors: if not self.nxdomain_errors[neg_response_info]: continue if classes is not None and \ not [e for e in self.nxdomain_errors[neg_response_info] if isinstance(e, classes)]: continue for (server, client), responses in neg_response_info.servers_clients.items(): for response in responses: has_errors.add((response.query.qname, response.query.rdtype)) for neg_response_info in self.nodata_warnings: if not self.nodata_warnings[neg_response_info]: continue if classes is not None and \ not [e for e in self.nodata_warnings[neg_response_info] if isinstance(e, classes)]: continue for (server, client), responses in neg_response_info.servers_clients.items(): for response in responses: has_warnings.add((response.query.qname, response.query.rdtype)) for neg_response_info in self.nodata_errors: if not self.nodata_errors[neg_response_info]: continue if classes is not None and \ not [e for e in self.nodata_errors[neg_response_info] if isinstance(e, classes)]: continue for (server, client), responses in neg_response_info.servers_clients.items(): for response in responses: has_errors.add((response.query.qname, response.query.rdtype)) for neg_response_info in self.nodata_status: for status in self.nodata_status[neg_response_info]: if status.warnings: if classes is None or \ [e for e in status.warnings if isinstance(e, classes)]: has_warnings.add((neg_response_info.qname, neg_response_info.rdtype)) if status.errors: if classes is None or \ [e for e in status.errors if isinstance(e, classes)]: has_errors.add((neg_response_info.qname, neg_response_info.rdtype)) for query in self.response_warnings: if self.response_warnings[query]: if classes is None or \ [e for e in self.response_warnings[query] if isinstance(e, classes)]: has_warnings.add((query.qname, query.rdtype)) for query in self.response_errors: if self.response_errors[query]: if classes is None or \ [e for e in self.response_errors[query] if isinstance(e, classes)]: has_errors.add((query.qname, query.rdtype)) return has_warnings, has_errors class TTLAgnosticOfflineDomainNameAnalysis(OfflineDomainNameAnalysis): QUERY_CLASS = Q.MultiQueryAggregateDNSResponse def deserialize(name, d1, cache=None, **kwargs): name_str = lb2s(name.canonicalize().to_text()) d = d1[name_str] analysis_type = analysis_type_codes[d['type']] if analysis_type == ANALYSIS_TYPE_RECURSIVE: cls = TTLAgnosticOfflineDomainNameAnalysis elif analysis_type == ANALYSIS_TYPE_AUTHORITATIVE: cls = OfflineDomainNameAnalysis else: raise ValueError('Unsupported analysis type: %s' % (d['type'])) return cls.deserialize(name, d1, cache=cache, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/analysis/online.py0000644000175000017500000034422515001472663017155 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import binascii import datetime import logging import random import re import socket import sys import threading import time import uuid # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict import dns.flags, dns.name, dns.rdataclass, dns.rdatatype, dns.resolver import dnsviz.format as fmt from dnsviz.ipaddr import * import dnsviz.query as Q import dnsviz.resolver as Resolver from dnsviz import transport from dnsviz import util lb2s = fmt.latin1_binary_to_string _logger = logging.getLogger(__name__) DNS_RAW_VERSION = 1.2 class NetworkConnectivityException(Exception): pass class IPv4ConnectivityException(NetworkConnectivityException): pass class IPv6ConnectivityException(NetworkConnectivityException): pass class NoNameservers(NetworkConnectivityException): pass ARPA_NAME = dns.name.from_text('arpa') IP6_ARPA_NAME = dns.name.from_text('ip6', ARPA_NAME) INADDR_ARPA_NAME = dns.name.from_text('in-addr', ARPA_NAME) E164_ARPA_NAME = dns.name.from_text('e164', ARPA_NAME) DANE_PORT_RE = re.compile(r'^_(\d+)$') SRV_PORT_RE = re.compile(r'^_.*[^\d].*$') PROTO_LABEL_RE = re.compile(r'^_(tcp|udp|sctp)$') WILDCARD_EXPLICIT_DELEGATION = dns.name.from_text('*') COOKIE_STANDIN = binascii.unhexlify('cccccccccccccccc') COOKIE_BAD = binascii.unhexlify('bbbbbbbbbbbbbbbb') ANALYSIS_TYPE_AUTHORITATIVE = 0 ANALYSIS_TYPE_RECURSIVE = 1 ANALYSIS_TYPE_CACHE = 2 analysis_types = { ANALYSIS_TYPE_AUTHORITATIVE: 'authoritative', ANALYSIS_TYPE_RECURSIVE: 'recursive', ANALYSIS_TYPE_CACHE: 'cache', } analysis_type_codes = { 'authoritative': ANALYSIS_TYPE_AUTHORITATIVE, 'recursive': ANALYSIS_TYPE_RECURSIVE, 'cache': ANALYSIS_TYPE_CACHE, } class OnlineDomainNameAnalysis(object): QUERY_CLASS = Q.MultiQuery def __init__(self, name, stub=False, analysis_type=ANALYSIS_TYPE_AUTHORITATIVE, cookie_standin=None, cookie_bad=None): ################################################## # General attributes ################################################## # The name that is the focus of the analysis (serialized). self.name = name self.analysis_type = analysis_type self.stub = stub # Attributes related to DNS cookie if cookie_standin is None: cookie_standin = COOKIE_STANDIN self.cookie_standin = cookie_standin if cookie_bad is None: cookie_bad = COOKIE_BAD self.cookie_bad = cookie_bad # a class for constructing the queries self._query_cls = self.QUERY_CLASS # A unique identifier for the analysis self.uuid = uuid.uuid4() # Analysis start and end (serialized). self.analysis_start = None self.analysis_end = None # The record types queried with the name when eliciting a referral, # eliciting authority section NS records, and eliciting DNS cookies # (serialized). self.referral_rdtype = None self.auth_rdtype = None self.cookie_rdtype = None # Whether or not the delegation was specified explicitly or learned # by delegation. This is for informational purposes more than # functional purposes (serialized). self.explicit_delegation = False # The queries issued to and corresponding responses received from the # servers (serialized). self.queries = {} # A reference to the analysis of the parent authority (and that of the # DLV parent, if any). self.parent = None self._dlv_parent = None self._dlv_name = None # A reference to the highest ancestor for which NXDOMAIN was received # (serialized). self.nxdomain_ancestor = None # The clients used for queries (serialized - for convenience) self.clients_ipv4 = set() self.clients_ipv6 = set() # Meta information associated with the domain name. These are # set when responses are processed. self.has_soa = False self.has_ns = False self.cname_targets = {} self.ns_dependencies = {} self.mx_targets = {} self.external_signers = {} ################################################## # Zone-specific attributes ################################################## # The DNS names and record types queried to analyze negative responses # of different types (serialized). self.nxdomain_name = None self.nxdomain_rdtype = None self.nxrrset_name = None self.nxrrset_rdtype = None # A mapping of names of authoritative servers to IP addresses returned # in authoritative responses (serialized). self._auth_ns_ip_mapping = {} # These are populated as responses are added. self._glue_ip_mapping = {} self._ns_names_in_child = set() self._all_servers_queried = set() self._all_servers_clients_queried = set() self._all_servers_clients_queried_tcp = set() self._responsive_servers_clients_udp = set() self._responsive_servers_clients_tcp = set() self._auth_servers_clients = set() self._valid_servers_clients_udp = set() self._valid_servers_clients_tcp = set() # A mapping of server to server-provided DNS cookie self.cookie_jar = {} def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.__str__()) def __str__(self): return fmt.humanize_name(self.name, True) def __eq__(self, other): return self.name == other.name def __hash__(self): return hash(self.name) def parent_name(self): if self.parent is not None: return self.parent.name return None def dlv_parent_name(self): if self.dlv_parent is not None: return self.dlv_parent.name return None def nxdomain_ancestor_name(self): if self.nxdomain_ancestor is not None: return self.nxdomain_ancestor.name return None def _set_dlv_parent(self, dlv_parent): self._dlv_parent = dlv_parent if dlv_parent is None: self._dlv_name = None else: try: self._dlv_name = dns.name.Name(self.name.labels[:-1] + dlv_parent.name.labels) except dns.name.NameTooLong: self._dlv_parent = None self._dlv_name = None def _get_dlv_parent(self): return self._dlv_parent dlv_parent = property(_get_dlv_parent, _set_dlv_parent) def _get_dlv_name(self): return self._dlv_name dlv_name = property(_get_dlv_name) def is_zone(self): return bool(self.has_ns or self.name == dns.name.root or self._auth_ns_ip_mapping) def _get_zone(self): if self.is_zone(): return self else: return self.parent zone = property(_get_zone) def single_client(self, exclude_loopback=True, exclude_ipv4_mapped=True): clients_ipv4 = [x for x in self.clients_ipv4 if not exclude_loopback or LOOPBACK_IPV4_RE.match(x) is None] clients_ipv6 = [x for x in self.clients_ipv6 if (not exclude_loopback or x != LOOPBACK_IPV6) and (not exclude_ipv4_mapped or IPV4_MAPPED_IPV6_RE.match(x) is None)] return len(clients_ipv4) <= 1 and len(clients_ipv6) <= 1 def get_name(self, name, trace=None): #XXX this whole method is a hack if trace is None: trace = [] if self in trace: return None if name in (self.name, self.nxdomain_name, self.nxrrset_name, self.dlv_name): return self for cname in self.cname_targets: for target, cname_obj in self.cname_targets[cname].items(): #XXX it is possible for cname_obj to be None where # this name was populated with level RDTYPES_SECURE_DELEGATION. # when this method is refactored appropriately, this check won't # be necessary. if cname_obj is None: continue ref = cname_obj.get_name(name, trace=trace + [self]) if ref is not None: return ref if name in self.external_signers: return self.external_signers[name] if name in self.ns_dependencies and self.ns_dependencies[name] is not None: return self.ns_dependencies[name] if name in self.mx_targets and self.mx_targets[name] is not None: return self.mx_targets[name] if self.name.is_subdomain(name) and self.parent is not None: return self.parent.get_name(name, trace=trace + [self]) elif name == self.dlv_parent_name(): return self.dlv_parent elif name == self.nxdomain_ancestor_name(): return self.nxdomain_ancestor return None def get_bailiwick_mapping(self): if not hasattr(self, '_bailiwick_mapping') or self._bailiwick_mapping is None: if self.parent is None: self._bailiwick_mapping = {}, self.name else: self._bailiwick_mapping = dict([(s,self.parent_name()) for s in self.parent.get_auth_or_designated_servers()]), self.name return self._bailiwick_mapping def get_cookie_jar_mapping(self): if not hasattr(self, '_cookie_jar_mapping') or self._cookie_jar_mapping is None: if self.parent is None: self._cookie_jar_mapping = {}, self.cookie_jar else: self._cookie_jar_mapping = dict([(s,self.parent.cookie_jar) for s in self.parent.get_auth_or_designated_servers()]), self.cookie_jar return self._cookie_jar_mapping def _add_glue_ip_mapping(self, response): '''Extract a mapping of NS targets to IP addresses from A and AAAA records in the additional section of a referral.''' ip_mapping = response.ns_ip_mapping_from_additional(self.name, self.parent_name()) for name, ip_set in ip_mapping.items(): if name not in self._glue_ip_mapping: self._glue_ip_mapping[name] = set() self._glue_ip_mapping[name].update(ip_set) # this includes both out-of-bailiwick names (because # ns_ip_mapping_from_additional() is called with # self.parent_name()) and those that have no IPs # in the additional section. if not ip_set: self.ns_dependencies[name] = None def _handle_mx_response(self, rrset): '''Save the targets from an MX RRset with the name which is the subject of this analysis.''' for mx in rrset: self.mx_targets[mx.exchange] = None def _handle_cname_response(self, rrset): '''Save the targets from a CNAME RRset with the name which is the subject of this analysis.''' if rrset.name not in self.cname_targets: self.cname_targets[rrset.name] = {} self.cname_targets[rrset.name][rrset[0].target] = None def _handle_ns_response(self, rrset, update_ns_names): '''Indicate that there exist NS records for the name which is the subject of this analysis, and, if authoritative, save the NS targets.''' self.has_ns = True if update_ns_names: for ns in rrset: self._ns_names_in_child.add(ns.target) def set_ns_dependencies(self): # the following check includes explicit delegations if self.parent is None: return for ns in self.get_ns_names_in_child().difference(self.get_ns_names_in_parent()): self.ns_dependencies[ns] = None def _set_server_cookies(self, response, server): server_cookie = response.get_server_cookie() if server_cookie is not None and server not in self.cookie_jar: self.cookie_jar[server] = server_cookie def _process_response_answer_rrset(self, rrset, query, response): if query.qname in (self.name, self.dlv_name): if rrset.rdtype == dns.rdatatype.MX: self._handle_mx_response(rrset) elif rrset.rdtype == dns.rdatatype.NS: self._handle_ns_response(rrset, not self.explicit_delegation) # check whether it is signed and whether the signer matches try: rrsig_rrset = response.message.find_rrset(response.message.answer, query.qname, query.rdclass, dns.rdatatype.RRSIG, rrset.rdtype) for rrsig in rrsig_rrset: if rrsig_rrset.covers == dns.rdatatype.DS and self.parent is None: pass elif rrsig_rrset.covers == dns.rdatatype.DS and rrsig.signer == self.parent_name(): pass elif rrsig_rrset.covers == dns.rdatatype.DLV and rrsig.signer == self.dlv_parent_name(): pass elif rrsig.signer == self.zone.name: pass else: self.external_signers[rrsig.signer] = None except KeyError: pass if rrset.rdtype == dns.rdatatype.CNAME: self._handle_cname_response(rrset) def _process_response(self, response, server, client, query, bailiwick, detect_ns, detect_cookies): '''Process a DNS response from a query, setting and updating instance variables appropriately, and calling helper methods as necessary.''' if response.message is None: return if detect_cookies: self._set_server_cookies(response, server) is_authoritative = response.is_authoritative() if response.is_valid_response(): if response.effective_tcp: self._valid_servers_clients_tcp.add((server, client)) else: self._valid_servers_clients_udp.add((server, client)) if is_authoritative: # we're marking servers as authoritative for the zone. if the query # type is DS or DLV, then query is to the parent zone, so its # authoritativeness means nothing to us. if query.rdtype in (dns.rdatatype.DS, dns.rdatatype.DLV): pass # If this response comes from a parent server, and the response was # NXDOMAIN, then don't count it as authoritative. Either it was # the child of a zone, in which case _auth_servers_clients doesn't # matter (since _auth_servers_clients is only used for zones), or # it was a zone itself, and not all servers in the parent carry the # delegation elif response.message.rcode() == dns.rcode.NXDOMAIN and bailiwick != self.name: pass else: self._auth_servers_clients.add((server, client)) if not response.is_complete_response(): return # retrieve the corresponding RRset in the answer section rrset = None try: rrset = response.message.find_rrset(response.message.answer, query.qname, query.rdclass, query.rdtype) except KeyError: try: rrset = response.message.find_rrset(response.message.answer, query.qname, query.rdclass, dns.rdatatype.CNAME) except KeyError: pass # in the case where a corresponding RRset is found, analyze it here if rrset is not None: self._process_response_answer_rrset(rrset, query, response) # look for SOA in authority section, in the case of negative responses try: soa_rrset = [x for x in response.message.authority if x.rdtype == dns.rdatatype.SOA and x.rdclass == query.rdclass][0] if soa_rrset.name == self.name: self.has_soa = True except IndexError: pass if query.qname == self.name and detect_ns: # if this is a referral, also grab the referral information, if it # pertains to this name (could alternatively be a parent) if response.is_referral(query.qname, query.rdtype, query.rdclass, bailiwick): try: rrset = response.message.find_rrset(response.message.authority, self.name, query.rdclass, dns.rdatatype.NS) except KeyError: pass else: self._add_glue_ip_mapping(response) self._handle_ns_response(rrset, False) # if it is an (authoritative) answer that has authority information, then add it else: try: rrset = response.message.find_rrset(response.message.authority, query.qname, query.rdclass, dns.rdatatype.NS) self._handle_ns_response(rrset, is_authoritative and not self.explicit_delegation) except KeyError: pass def add_auth_ns_ip_mappings(self, *mappings): '''Add one or more mappings from NS targets to IPv4 or IPv6 addresses, as resolved by querying authoritative sources. Arguments are 2-tuples of the form (DNS name, address).''' for name, ip in mappings: if name not in self._auth_ns_ip_mapping: self._auth_ns_ip_mapping[name] = set() if ip is not None: self._auth_ns_ip_mapping[name].add(ip) def add_query(self, query, detect_ns, detect_cookies): '''Process a DNS query and its responses, setting and updating instance variables appropriately, and calling helper methods as necessary.''' bailiwick_map, default_bailiwick = self.get_bailiwick_mapping() key = (query.qname, query.rdtype) if key not in self.queries: self.queries[key] = self._query_cls(query.qname, query.rdtype, query.rdclass) self.queries[key].add_query(query, bailiwick_map, default_bailiwick) for server in query.responses: bailiwick = bailiwick_map.get(server, default_bailiwick) # note the fact that the server was queried self._all_servers_queried.add(server) for client in query.responses[server]: response = query.responses[server][client] # note clients used if client.version == 6: self.clients_ipv6.add(client) else: self.clients_ipv4.add(client) # note server responsiveness if response.udp_attempted: self._all_servers_clients_queried.add((server, client)) if response.tcp_attempted: self._all_servers_clients_queried_tcp.add((server, client)) if response.udp_responsive: self._responsive_servers_clients_udp.add((server, client)) if response.tcp_responsive: self._responsive_servers_clients_tcp.add((server, client)) self._process_response(query.responses[server][client], server, client, query, bailiwick, detect_ns, detect_cookies) def get_glue_ip_mapping(self): '''Return a reference to the mapping of targets of delegation records (i.e., NS records in the parent zone) and their corresponding IPv4 or IPv6 glue, if any.''' return self._glue_ip_mapping def get_root_hint_mapping(self): servers = {} hints = util.get_root_hints() for rdata in hints[(dns.name.root, dns.rdatatype.NS)]: servers[rdata.target] = set() for rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): if (rdata.target, rdtype) in hints: servers[rdata.target].update([IPAddr(r.address) for r in hints[(rdata.target, rdtype)]]) for name, server in util.HISTORICAL_ROOT_IPS: if name not in servers: servers[name] = set() servers[name].add(server) return servers def _get_servers_from_hints(self, name, hints): servers = set() server_mapping = self._get_server_ip_mapping_from_hints(name, hints) for ns_name in server_mapping: servers.update(server_mapping[ns_name]) return servers def get_auth_ns_ip_mapping(self): '''Return a reference to the mapping of NS targets from delegation or authoritative source to their authoritative IPv4 and IPv6 addresses.''' return self._auth_ns_ip_mapping def get_ns_names_in_parent(self): '''Return the set of names corresponding to targets of delegation records.''' return set(self.get_glue_ip_mapping()) def get_ns_names_in_child(self): '''Return the set of names corresponding to targets of authoritative NS records.''' return self._ns_names_in_child def get_ns_names(self): '''Return the comprehensive set of names corresponding to NS targets.''' return self.get_ns_names_in_parent().union(self.get_ns_names_in_child()) def get_servers_in_parent(self): '''Return the IP addresses of servers corresponding to names in the delegation records. If the name is a subset of the name being queried, then glue is required, and the glue is used exclusively. If the name is in-bailiwick and there is glue in the referral, then the glue records alone are used; otherwise the authoritative IPs are used. If the name is out-of-bailiwick, then only the authoritative IPs are used.''' if not hasattr(self, '_servers_in_parent') or self._servers_in_parent is None: servers = set() if self.parent is None: return servers glue_ips = self.get_glue_ip_mapping() auth_ips = self.get_auth_ns_ip_mapping() for name in glue_ips: in_bailiwick = name.is_subdomain(self.parent_name()) glue_required = name.is_subdomain(self.name) if glue_required: servers.update(glue_ips[name]) elif in_bailiwick: if glue_ips[name]: servers.update(glue_ips[name]) elif name in auth_ips: servers.update(auth_ips[name]) elif name in auth_ips: servers.update(auth_ips[name]) self._servers_in_parent = servers return self._servers_in_parent def get_servers_in_child(self): '''Return the authoritative IP addresses of servers corresponding to names in the authoritative NS records.''' if not hasattr(self, '_servers_in_child') or self._servers_in_child is None: servers = set() auth_ips = self.get_auth_ns_ip_mapping() for name in self.get_ns_names_in_child(): if name in auth_ips: servers.update(auth_ips[name]) self._servers_in_child = servers return self._servers_in_child def get_designated_servers(self, no_cache=False): '''Return the set of glue or authoritative IP addresses of servers corresponding to names in the delegation or authoritative NS records.''' if not hasattr(self, '_designated_servers') or self._designated_servers is None: servers = set() glue_ips = self.get_glue_ip_mapping() auth_ips = self.get_auth_ns_ip_mapping() for name in glue_ips: servers.update(glue_ips[name]) for name in auth_ips: servers.update(auth_ips[name]) if no_cache: return servers self._designated_servers = servers return self._designated_servers def get_valid_servers_udp(self, proto=None): '''Return the set of servers that responded with a valid (rcode of NOERROR or NXDOMAIN) response.''' valid_servers = set([x[0] for x in self._valid_servers_clients_udp]) if proto is not None: return set([x for x in valid_servers if x.version == proto]) else: return valid_servers def get_valid_servers_tcp(self, proto=None): '''Return the set of servers that responded with a valid (rcode of NOERROR or NXDOMAIN) response.''' valid_servers = set([x[0] for x in self._valid_servers_clients_tcp]) if proto is not None: return set([x for x in valid_servers if x.version == proto]) else: return valid_servers def get_responsive_servers_udp(self, proto=None): '''Return the set of servers for which some type of response was received from any client over UDP.''' responsive_servers = set([x[0] for x in self._responsive_servers_clients_udp]) if proto is not None: return set([x for x in responsive_servers if x.version == proto]) else: return responsive_servers def get_responsive_servers_tcp(self, proto=None): '''Return the set of servers for which some type of response was received from any client over TCP.''' responsive_servers = set([x[0] for x in self._responsive_servers_clients_tcp]) if proto is not None: return set([x for x in responsive_servers if x.version == proto]) else: return responsive_servers def get_auth_or_designated_servers(self, proto=None, no_cache=False): '''Return the set of servers that either answered authoritatively or were explicitly designated by NS and glue or authoritative IP.''' if not hasattr(self, '_auth_or_designated_servers') or self._auth_or_designated_servers is None: servers = set([x[0] for x in self._auth_servers_clients]).union(self.get_designated_servers(no_cache)) if not no_cache: self._auth_or_designated_servers = servers else: servers = self._auth_or_designated_servers if proto is not None: return set([x for x in servers if x.version == proto]) else: return servers def get_responsive_auth_or_designated_servers(self, proto=None, no_cache=False): '''Return the set of servers that either answered authoritatively or were explicitly designated by NS and glue or authoritative IP and were responsive to queries.''' return self.get_auth_or_designated_servers(proto, no_cache).intersection(self.get_responsive_servers_udp(proto)) def get_valid_auth_or_designated_servers(self, proto=None, no_cache=False): '''Return the set of servers that either answered authoritatively or were explicitly designated by NS and glue or authoritative IP and returned a valid (rcode of NOERROR or NXDOMAIN) response.''' return self.get_auth_or_designated_servers(proto, no_cache).intersection(self.get_valid_servers_udp(proto)) def get_stealth_servers(self): '''Return the set of servers that authoritatively but weren't explicitly designated by NS and glue or authoritative IP.''' if not hasattr(self, '_stealth_auth_servers') or self._stealth_auth_servers is None: servers = self.get_auth_or_designated_servers().difference(self.get_designated_servers()) self._stealth_auth_servers = servers return self._stealth_auth_servers def get_ip_ns_name_mapping(self): '''Return a mapping of each designated server to the NS target name that it resolves to. The result for each IP is a list of names in which names that resolve to it authoritatively appear before names that map to it in glue.''' if not hasattr(self, '_ip_ns_name_mapping') or self._ip_ns_name_mapping is None: self._ip_ns_name_mapping = {} if self.name == dns.name.root: glue_ips = self.get_root_hint_mapping() else: glue_ips = self.get_glue_ip_mapping() auth_ips = self.get_auth_ns_ip_mapping() if self.stub: auth_names = set(auth_ips) else: auth_names = self.get_ns_names() # if there are no names from glue or from authoritative responses, # then use the authoritative IP. Such is the case with explicit # delegation if not auth_names: auth_names = auth_ips for name in auth_names: if name in auth_ips: for ip in auth_ips[name]: if ip not in self._ip_ns_name_mapping: self._ip_ns_name_mapping[ip] = [] self._ip_ns_name_mapping[ip].append(name) for name in glue_ips: for ip in glue_ips[name]: if ip not in self._ip_ns_name_mapping: self._ip_ns_name_mapping[ip] = [name] elif name not in self._ip_ns_name_mapping[ip]: self._ip_ns_name_mapping[ip].append(name) return self._ip_ns_name_mapping def get_ns_name_for_ip(self, ip): '''Return the NS target name(s) that resolve to the given IP, either authoritatively or using glue.''' ip_name_mapping = self.get_ip_ns_name_mapping() try: return ip_name_mapping[ip], self.name except KeyError: pass if self.parent is None: return [], None return self.parent.get_ns_name_for_ip(ip) def serialize(self, d=None, meta_only=False, trace=None): if d is None: d = OrderedDict() if trace is None: trace = [] if self in trace: return name_str = lb2s(self.name.canonicalize().to_text()) if name_str in d: return # serialize dependencies first because their version of the analysis # might be the most complete (considering re-dos) self._serialize_dependencies(d, meta_only, trace) if self.parent is not None: self.parent.serialize(d, meta_only, trace + [self]) if self.dlv_parent is not None: self.dlv_parent.serialize(d, meta_only, trace + [self]) if self.nxdomain_ancestor is not None: self.nxdomain_ancestor.serialize(d, meta_only, trace + [self]) clients_ipv4 = list(self.clients_ipv4) clients_ipv4.sort() clients_ipv6 = list(self.clients_ipv6) clients_ipv6.sort() d[name_str] = OrderedDict() d[name_str]['type'] = analysis_types[self.analysis_type] d[name_str]['stub'] = self.stub if self.cookie_standin is not None: d[name_str]['cookie_standin'] = lb2s(binascii.hexlify(self.cookie_standin)) if self.cookie_bad is not None: d[name_str]['cookie_bad'] = lb2s(binascii.hexlify(self.cookie_bad)) d[name_str]['analysis_start'] = fmt.datetime_to_str(self.analysis_start) d[name_str]['analysis_end'] = fmt.datetime_to_str(self.analysis_end) if not self.stub: d[name_str]['clients_ipv4'] = clients_ipv4 d[name_str]['clients_ipv6'] = clients_ipv6 if self.parent is not None: d[name_str]['parent'] = lb2s(self.parent_name().canonicalize().to_text()) if self.dlv_parent is not None: d[name_str]['dlv_parent'] = lb2s(self.dlv_parent_name().canonicalize().to_text()) if self.nxdomain_ancestor is not None: d[name_str]['nxdomain_ancestor'] = lb2s(self.nxdomain_ancestor_name().canonicalize().to_text()) if self.referral_rdtype is not None: d[name_str]['referral_rdtype'] = dns.rdatatype.to_text(self.referral_rdtype) if self.auth_rdtype is not None: d[name_str]['auth_rdtype'] = dns.rdatatype.to_text(self.auth_rdtype) if self.cookie_rdtype is not None: d[name_str]['cookie_rdtype'] = dns.rdatatype.to_text(self.cookie_rdtype) d[name_str]['explicit_delegation'] = self.explicit_delegation if self.nxdomain_name is not None: d[name_str]['nxdomain_name'] = lb2s(self.nxdomain_name.to_text()) d[name_str]['nxdomain_rdtype'] = dns.rdatatype.to_text(self.nxdomain_rdtype) if self.nxrrset_name is not None: d[name_str]['nxrrset_name'] = lb2s(self.nxrrset_name.to_text()) d[name_str]['nxrrset_rdtype'] = dns.rdatatype.to_text(self.nxrrset_rdtype) self._serialize_related(d[name_str], meta_only) def _serialize_related(self, d, meta_only): if self._auth_ns_ip_mapping: d['auth_ns_ip_mapping'] = OrderedDict() ns_names = list(self._auth_ns_ip_mapping.keys()) ns_names.sort() for name in ns_names: addrs = list(self._auth_ns_ip_mapping[name]) addrs.sort() d['auth_ns_ip_mapping'][lb2s(name.canonicalize().to_text())] = addrs if self.stub: return d['queries'] = [] query_keys = list(self.queries.keys()) query_keys.sort() for (qname, rdtype) in query_keys: for query in self.queries[(qname, rdtype)].queries.values(): d['queries'].append(query.serialize(meta_only)) def _serialize_dependencies(self, d, meta_only, trace): if self.stub: return for cname in self.cname_targets: for target, cname_obj in self.cname_targets[cname].items(): if cname_obj is not None: cname_obj.serialize(d, meta_only, trace=trace + [self]) for signer, signer_obj in self.external_signers.items(): if signer_obj is not None: signer_obj.serialize(d, meta_only, trace=trace + [self]) for target, ns_obj in self.ns_dependencies.items(): if ns_obj is not None: ns_obj.serialize(d, meta_only, trace=trace + [self]) for target, mx_obj in self.mx_targets.items(): if mx_obj is not None: mx_obj.serialize(d, meta_only, trace=trace + [self]) @classmethod def deserialize(cls, name, d1, cache=None, **kwargs): if cache is None: cache = {} if name in cache: return cache[name] name_str = lb2s(name.canonicalize().to_text()) d = d1[name_str] analysis_type = analysis_type_codes[d['type']] stub = d['stub'] if 'parent' in d: parent_name = dns.name.from_text(d['parent']) parent = cls.deserialize(parent_name, d1, cache=cache, **kwargs) else: parent = None if name != dns.name.root and 'dlv_parent' in d: dlv_parent_name = dns.name.from_text(d['dlv_parent']) dlv_parent = cls.deserialize(dlv_parent_name, d1, cache=cache, **kwargs) else: dlv_parent_name = None dlv_parent = None if 'nxdomain_ancestor' in d: nxdomain_ancestor_name = dns.name.from_text(d['nxdomain_ancestor']) nxdomain_ancestor = cls.deserialize(nxdomain_ancestor_name, d1, cache=cache, **kwargs) else: nxdomain_ancestor_name = None nxdomain_ancestor = None if 'cookie_standin' in d: cookie_standin = binascii.unhexlify(d['cookie_standin']) else: cookie_standin = None if 'cookie_bad' in d: cookie_bad = binascii.unhexlify(d['cookie_bad']) else: cookie_bad = None _logger.info('Loading %s' % fmt.humanize_name(name)) cache[name] = a = cls(name, stub=stub, analysis_type=analysis_type, cookie_standin=cookie_standin, cookie_bad=cookie_bad, **kwargs) a.parent = parent if dlv_parent is not None: a.dlv_parent = dlv_parent if nxdomain_ancestor is not None: a.nxdomain_ancestor = nxdomain_ancestor a.analysis_start = fmt.str_to_datetime(d['analysis_start']) a.analysis_end = fmt.str_to_datetime(d['analysis_end']) if not a.stub: if 'referral_rdtype' in d: a.referral_rdtype = dns.rdatatype.from_text(d['referral_rdtype']) if 'auth_rdtype' in d: a.auth_rdtype = dns.rdatatype.from_text(d['auth_rdtype']) if 'cookie_rdtype' in d: a.cookie_rdtype = dns.rdatatype.from_text(d['cookie_rdtype']) a.explicit_delegation = d['explicit_delegation'] if 'nxdomain_name' in d: a.nxdomain_name = dns.name.from_text(d['nxdomain_name']) a.nxdomain_rdtype = dns.rdatatype.from_text(d['nxdomain_rdtype']) if 'nxrrset_name' in d: a.nxrrset_name = dns.name.from_text(d['nxrrset_name']) a.nxrrset_rdtype = dns.rdatatype.from_text(d['nxrrset_rdtype']) a._deserialize_related(d) a._deserialize_dependencies(d1, cache) return a def _deserialize_related(self, d): if 'auth_ns_ip_mapping' in d: for target in d['auth_ns_ip_mapping']: self.add_auth_ns_ip_mappings((dns.name.from_text(target), None)) for addr in d['auth_ns_ip_mapping'][target]: self.add_auth_ns_ip_mappings((dns.name.from_text(target), IPAddr(addr))) if self.stub: return bailiwick_map, default_bailiwick = self.get_bailiwick_mapping() cookie_jar_map, default_cookie_jar = self.get_cookie_jar_mapping() cookie_standin = self.cookie_standin cookie_bad = self.cookie_bad query_map = {} #XXX backwards compatibility with previous version if isinstance(d['queries'], list): for query in d['queries']: key = (dns.name.from_text(query['qname']), dns.rdatatype.from_text(query['qtype'])) if key not in query_map: query_map[key] = [] query_map[key].append(query) else: for query_str in d['queries']: vals = query_str.split('/') qname = dns.name.from_text('/'.join(vals[:-2])) rdtype = dns.rdatatype.from_text(vals[-1]) key = (qname, rdtype) if key not in query_map: query_map[key] = [] for query in d['queries'][query_str]: query_map[key].append(query) # Import the following first, in this order: # - Queries used to detect delegation (NS and referral_rdtype) # - Queries used to detect NS records from authority section (auth_rdtype) # - Queries used to detect server cookies (cookie_rdtype) delegation_types = OrderedDict(((dns.rdatatype.NS, None),)) if self.referral_rdtype is not None: delegation_types[self.referral_rdtype] = None if self.auth_rdtype is not None: delegation_types[self.auth_rdtype] = None if self.cookie_rdtype is not None: delegation_types[self.cookie_rdtype] = None for rdtype in delegation_types: # if the query has already been imported, then # don't re-import if (self.name, rdtype) in self.queries: continue key = (self.name, rdtype) if key in query_map: _logger.debug('Importing %s/%s...' % (fmt.humanize_name(self.name), dns.rdatatype.to_text(rdtype))) for query in query_map[key]: detect_ns = rdtype in (dns.rdatatype.NS, self.referral_rdtype, self.auth_rdtype) detect_cookies = rdtype == self.cookie_rdtype self.add_query(Q.DNSQuery.deserialize(query, bailiwick_map, default_bailiwick, cookie_jar_map, default_cookie_jar, cookie_standin, cookie_bad), detect_ns, detect_cookies) # set the NS dependencies for the name if self.is_zone(): self.set_ns_dependencies() for key in query_map: qname, rdtype = key # if the query has already been imported, then # don't re-import if (qname, rdtype) in self.queries: continue if qname == self.name and rdtype in delegation_types: continue if (qname, rdtype) == (self.nxdomain_name, self.nxdomain_rdtype): extra = ' (NXDOMAIN)' elif (qname, rdtype) == (self.nxrrset_name, self.nxrrset_rdtype): extra = ' (NODATA)' else: extra = '' _logger.debug('Importing %s/%s%s...' % (fmt.humanize_name(qname), dns.rdatatype.to_text(rdtype), extra)) for query in query_map[key]: self.add_query(Q.DNSQuery.deserialize(query, bailiwick_map, default_bailiwick, cookie_jar_map, default_cookie_jar, cookie_standin, cookie_bad), False, False) def _deserialize_dependencies(self, d, cache): if self.stub: return for cname in self.cname_targets: for target in self.cname_targets[cname]: self.cname_targets[cname][target] = self.__class__.deserialize(target, d, cache=cache) # these are optional for signer in self.external_signers: if lb2s(signer.canonicalize().to_text()) in d: self.external_signers[signer] = self.__class__.deserialize(signer, d, cache=cache) for target in self.ns_dependencies: if lb2s(target.canonicalize().to_text()) in d: self.ns_dependencies[target] = self.__class__.deserialize(target, d, cache=cache) for target in self.mx_targets: if lb2s(target.canonicalize().to_text()) in d: self.mx_targets[target] = self.__class__.deserialize(target, d, cache=cache) class ActiveDomainNameAnalysis(OnlineDomainNameAnalysis): def __init__(self, *args, **kwargs): super(ActiveDomainNameAnalysis, self).__init__(*args, **kwargs) self.complete = threading.Event() class Analyst(object): analysis_model = ActiveDomainNameAnalysis _simple_query = Q.SimpleDNSQuery _quick_query = Q.QuickDNSSECQuery _diagnostic_query = Q.DiagnosticQuery _tcp_diagnostic_query = Q.TCPDiagnosticQuery _pmtu_diagnostic_query = Q.PMTUDiagnosticQuery _truncation_diagnostic_query = Q.TruncationDiagnosticQuery _edns_version_diagnostic_query = Q.EDNSVersionDiagnosticQuery _edns_flag_diagnostic_query = Q.EDNSFlagDiagnosticQuery _edns_opt_diagnostic_query = Q.EDNSOptDiagnosticQuery default_th_factory = transport.DNSQueryTransportHandlerDNSFactory() qname_only = True analysis_type = ANALYSIS_TYPE_AUTHORITATIVE clone_attrnames = ['rdclass', 'dlv_domain', 'try_ipv4', 'try_ipv6', 'client_ipv4', 'client_ipv6', 'query_class_mixin', 'logger', 'ceiling', 'edns_diagnostics', 'follow_ns', 'explicit_delegations', 'stop_at_explicit', 'odd_ports', 'analysis_cache', 'cache_level', 'analysis_cache_lock', 'transport_manager', 'th_factories', 'resolver'] def __init__(self, name, rdclass=dns.rdataclass.IN, dlv_domain=None, try_ipv4=True, try_ipv6=True, client_ipv4=None, client_ipv6=None, query_class_mixin=None, logger=_logger, ceiling=None, edns_diagnostics=False, follow_ns=False, follow_mx=False, trace=None, explicit_delegations=None, stop_at_explicit=None, odd_ports=None, extra_rdtypes=None, explicit_only=False, analysis_cache=None, cache_level=None, analysis_cache_lock=None, th_factories=None, transport_manager=None, resolver=None): self.simple_query = self._simple_query self.quick_query = self._quick_query.add_mixin(query_class_mixin).add_server_cookie(COOKIE_STANDIN) self.diagnostic_query_no_server_cookie = self._diagnostic_query.add_mixin(query_class_mixin) self.diagnostic_query_bad_server_cookie = self._diagnostic_query.add_mixin(query_class_mixin).add_server_cookie(COOKIE_BAD) self.diagnostic_query = self._diagnostic_query.add_mixin(query_class_mixin).add_server_cookie(COOKIE_STANDIN) self.tcp_diagnostic_query = self._tcp_diagnostic_query.add_mixin(query_class_mixin).remove_cookie_option() self.pmtu_diagnostic_query = self._pmtu_diagnostic_query.add_mixin(query_class_mixin).add_server_cookie(COOKIE_STANDIN) self.truncation_diagnostic_query = self._truncation_diagnostic_query.add_mixin(query_class_mixin).add_server_cookie(COOKIE_STANDIN) self.edns_version_diagnostic_query = self._edns_version_diagnostic_query self.edns_flag_diagnostic_query = self._edns_flag_diagnostic_query.add_mixin(query_class_mixin).add_server_cookie(COOKIE_STANDIN) self.edns_opt_diagnostic_query = self._edns_opt_diagnostic_query.add_mixin(query_class_mixin).add_server_cookie(COOKIE_STANDIN) self.query_class_mixin = query_class_mixin if transport_manager is None: self.transport_manager = transport.DNSQueryTransportManager() else: self.transport_manager = transport_manager if th_factories is None: self.th_factories = (self.default_th_factory,) else: self.th_factories = th_factories self.allow_loopback_query = not bool([x for x in self.th_factories if not x.cls.allow_loopback_query]) self.allow_private_query = not bool([x for x in self.th_factories if not x.cls.allow_private_query]) self.name = name self.rdclass = rdclass self.dlv_domain = dlv_domain if explicit_delegations is None: self.explicit_delegations = {} else: self.explicit_delegations = explicit_delegations if stop_at_explicit is None: self.stop_at_explicit = {} else: self.stop_at_explicit = stop_at_explicit if odd_ports is None: self.odd_ports = {} else: self.odd_ports = odd_ports if resolver is None: resolver = self._get_resolver() self.resolver = resolver self.ceiling = ceiling # if an ancestor of the name (not wildcard!) is explicitly delegated, # then set the ceiling to the lowest ancestor with an explicit # delegation. c = self.name try: while True: if (c, dns.rdatatype.NS) in self.explicit_delegations and self.stop_at_explicit[c]: break c = c.parent() except dns.name.NoParent: # if no ancestors are explicitly delegated, then don't modify the # ceiling pass else: # if ceiling was not specified, if there is a ceiling, but the name # is not a subdomain of the ceiling, or if the lowest explicit # delegation is a subdomain of the specified ceiling, then replace # it with the modified lowest ancestor that is explicitly # delegated. if ceiling is None or not self.name.is_subdomain(ceiling) or c.is_subdomain(ceiling): ceiling = c self.local_ceiling = self._detect_ceiling(ceiling)[0] self.try_ipv4 = try_ipv4 self.try_ipv6 = try_ipv6 self.client_ipv4 = client_ipv4 self.client_ipv6 = client_ipv6 self.logger = logger self.edns_diagnostics = edns_diagnostics cookie_opt = self.diagnostic_query.get_cookie_opt() self.dns_cookies = cookie_opt is not None self.follow_ns = follow_ns self.follow_mx = follow_mx if trace is None: self.trace = [] else: self.trace = trace assert not explicit_only or extra_rdtypes is not None or self._force_dnskey_query(name), 'If explicit_only is specified, then extra_rdtypes must be specified or force_dnskey must be true.' self.extra_rdtypes = extra_rdtypes self.explicit_only = explicit_only if analysis_cache is None: self.analysis_cache = {} else: self.analysis_cache = analysis_cache self.cache_level = cache_level if analysis_cache_lock is None: self.analysis_cache_lock = threading.Lock() else: self.analysis_cache_lock = analysis_cache_lock self._detect_cname_chain() def _get_resolver(self): hints = util.get_root_hints() for key in self.explicit_delegations: hints[key] = self.explicit_delegations[key] return Resolver.FullResolver(hints, query_cls=(self.quick_query, self.diagnostic_query), odd_ports=self.odd_ports, cookie_standin=COOKIE_STANDIN, transport_manager=self.transport_manager) def _detect_cname_chain(self): self._cname_chain = [] if self.dlv_domain == self.name: return if len(self.name) < 3: return try: rdtype = self._rdtypes_to_query(self.name)[0] except IndexError: rdtype = dns.rdatatype.A try: ans = self.resolver.query_for_answer(self.name, rdtype, self.rdclass, allow_noanswer=True) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN, dns.exception.DNSException): return cname = self.name for i in range(Resolver.MAX_CNAME_REDIRECTION): try: cname = ans.response.find_rrset(ans.response.answer, cname, self.rdclass, dns.rdatatype.CNAME)[0].target self._cname_chain.append(cname) except KeyError: return def _detect_ceiling(self, ceiling): if ceiling == dns.name.root or ceiling is None: # make sure we can communicate with a resolver; we must be able to # resolve the root server, response = self.resolver.query(dns.name.root, dns.rdatatype.NS) if server is None: raise NetworkConnectivityException('No network connectivity available!') return ceiling, None # if there is a ceiling, but the name is not a subdomain # of the ceiling, then use the name itself as a base if not self.name.is_subdomain(ceiling): ceiling = self.name try: ans = self.resolver.query_for_answer(ceiling, dns.rdatatype.NS, self.rdclass) try: ans.response.find_rrset(ans.response.answer, ceiling, self.rdclass, dns.rdatatype.NS) return ceiling, False except KeyError: pass except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): pass except dns.exception.DNSException as e: parent_ceiling, fail = self._detect_ceiling(ceiling.parent()) if fail: return parent_ceiling, True else: return ceiling, True return self._detect_ceiling(ceiling.parent()) def _get_servers_from_hints(self, name, hints): servers = set() for rdata in hints[(name, dns.rdatatype.NS)]: for rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): if (rdata.target, rdtype) in hints: servers.update([IPAddr(r.address) for r in hints[(rdata.target, rdtype)]]) return servers def _root_servers(self, proto=None): key = None if (dns.name.root, dns.rdatatype.NS) in self.explicit_delegations: key = dns.name.root elif (WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS) in self.explicit_delegations: key = WILDCARD_EXPLICIT_DELEGATION if key is not None: servers = self._get_servers_from_hints(key, self.explicit_delegations) else: servers = self._get_servers_from_hints(dns.name.root, util.get_root_hints()) if proto == 4: servers = set([x for x in servers if x.version == 4]) elif proto == 6: servers = set([x for x in servers if x.version == 6]) return servers def _is_referral_of_type(self, rdtype): '''Return True if analysis of this name was invoked as a dependency (specified by rdtype) for another name; False otherwise. Examples are CNAME, NS, MX.''' try: return self.trace[-1][1] == rdtype except IndexError: return False def _original_alias_of_cname(self): name = self.name for i in range(len(self.trace) - 1, -1, -1): if self.trace[i][1] != dns.rdatatype.CNAME: return name name = self.trace[i][0].name return name def _force_dnskey_query(self, name): return name == self.name and self._is_referral_of_type(dns.rdatatype.RRSIG) def _rdtypes_to_query(self, name): orig_name = self._original_alias_of_cname() rdtypes = [] if self.explicit_only: if self.name == name: if self.extra_rdtypes is not None: rdtypes.extend(self.extra_rdtypes) else: if name in self._cname_chain: rdtypes.extend(self._rdtypes_to_query(self.name)) else: rdtypes.extend(self._rdtypes_to_query_for_name(name)) if self.name == name: if orig_name != name: rdtypes.extend(self._rdtypes_to_query_for_name(orig_name)) if self.extra_rdtypes is not None: rdtypes.extend(self.extra_rdtypes) else: if name in self._cname_chain: rdtypes.extend(self._rdtypes_to_query(self.name)) if self._ask_tlsa_queries(self.name) and len(name) == len(self.name) - 2: rdtypes.extend([dns.rdatatype.A, dns.rdatatype.AAAA]) # remove duplicates rdtypes = list(OrderedDict.fromkeys(rdtypes)) return rdtypes def _rdtypes_to_query_for_name(self, name): rdtypes = [] if self._ask_ptr_queries(name): rdtypes.append(dns.rdatatype.PTR) elif self._ask_naptr_queries(name): rdtypes.append(dns.rdatatype.NAPTR) elif self._ask_tlsa_queries(name): rdtypes.append(dns.rdatatype.TLSA) elif self._ask_srv_queries(name): rdtypes.append(dns.rdatatype.SRV) elif self._is_dkim(name): rdtypes.append(dns.rdatatype.TXT) elif name.is_subdomain(ARPA_NAME): pass elif PROTO_LABEL_RE.search(lb2s(name[0])): pass elif self._is_sld_or_lower(name): rdtypes.extend([dns.rdatatype.A, dns.rdatatype.AAAA]) return rdtypes def _ask_ptr_queries(self, name): '''Return True if PTR queries should be asked for this name, as guessed by the nature of the name, based on its length and its presence in the in-addr.arpa or ip6.arpa trees.''' # if this name is in the ip6.arpa tree and is the length of a full # reverse IPv6 address, then return True if name.is_subdomain(IP6_ARPA_NAME) and len(name) == 35: return True # if this name is in the in-addr.arpa tree and is the length of a full # reverse IPv4 address, then return True if name.is_subdomain(INADDR_ARPA_NAME) and len(name) == 7: return True return False def _ask_naptr_queries(self, name): '''Return True if NAPTR queries should be asked for this name, as guessed by the nature of the name, based on its presence in the e164.arpa tree.''' if name.is_subdomain(E164_ARPA_NAME) and name != E164_ARPA_NAME: return True return False def _ask_tlsa_queries(self, name): '''Return True if TLSA queries should be asked for this name, which is determined by examining the structure of the name for _._ format.''' if len(name) > 2 and DANE_PORT_RE.search(lb2s(name[0])) is not None and PROTO_LABEL_RE.search(lb2s(name[1])) is not None: return True return False def _ask_srv_queries(self, name): '''Return True if SRV queries should be asked for this name, which is determined by examining the structure of the name for common service-related names.''' if len(name) > 2 and SRV_PORT_RE.search(lb2s(name[0])) is not None and PROTO_LABEL_RE.search(lb2s(name[1])) is not None: return True return False def _is_dkim(self, name): '''Return True if the name is a DKIM name.''' return '_domainkey' in name def _is_sld_or_lower(self, name): '''Return True if the name is an SLD or lower.''' return len(name) >= 3 def _ask_non_delegation_queries(self, name): '''Return True if non-delegation-related queries should be asked for name.''' if self.qname_only and not \ (name == self.name or \ name in self._cname_chain or \ (self._ask_tlsa_queries(self.name) and len(name) == len(self.name) - 2)): return False if self.dlv_domain == self.name: return False return True def _add_query(self, name_obj, query, detect_ns, detect_cookies, iterative=False): # if this query is empty (i.e., nothing was actually asked, e.g., due # to client-side connectivity failure), then raise a connectivity # failure if not query.responses and not iterative: self._raise_connectivity_error_local(query.servers) name_obj.add_query(query, detect_ns, detect_cookies) def _filter_servers_network(self, servers): if not self.try_ipv6: servers = [x for x in servers if x.version != 6] if not self.try_ipv4: servers = [x for x in servers if x.version != 4] return servers def _filter_servers_locality(self, servers): if not self.allow_loopback_query: servers = [x for x in servers if not LOOPBACK_IPV4_RE.match(x) and not x == LOOPBACK_IPV6] if not self.allow_private_query: servers = [x for x in servers if not RFC_1918_RE.match(x) and not LINK_LOCAL_RE.match(x) and not UNIQ_LOCAL_RE.match(x)] return [x for x in servers if ZERO_SLASH8_RE.search(x) is None] def _filter_servers(self, servers, no_raise=False): filtered_servers = self._filter_servers_network(servers) if servers and not filtered_servers and not no_raise: self._raise_connectivity_error_remote() return self._filter_servers_locality(filtered_servers) def _get_name_for_analysis(self, name, stub=False, lock=True): with self.analysis_cache_lock: try: name_obj = self.analysis_cache[name] except KeyError: if lock: name_obj = self.analysis_cache[name] = self.analysis_model(name, stub=stub, analysis_type=self.analysis_type, cookie_standin=COOKIE_STANDIN) return name_obj # if not locking, then return None else: return None # if there is a complete event, then wait on it if hasattr(name_obj, 'complete'): name_obj.complete.wait() # loop and wait for analysis to be completed while name_obj.analysis_end is None: time.sleep(1) name_obj = self.analysis_cache[name] # check if this analysis needs to be re-done if self.name == name: redo_analysis = False # re-do analysis if force_dnskey is True and dnskey hasn't been queried if self._force_dnskey_query(self.name) and (self.name, dns.rdatatype.DNSKEY) not in name_obj.queries: redo_analysis = True # re-do analysis if there were no queries (previously an # "empty" non-terminal) but now it is the name in question if not name_obj.queries: redo_analysis = True # re-do analysis if this name is referenced by an alias # and previously the necessary queries weren't asked if self._is_referral_of_type(dns.rdatatype.CNAME): rdtypes_to_query = set(self._rdtypes_to_query(name)) rdtypes_queried = set([r for n,r in name_obj.queries if n == name_obj.name]) if rdtypes_to_query.difference(rdtypes_queried): redo_analysis = True # if the previous analysis was a stub, but now we want the # whole analysis if name_obj.stub and not stub: redo_analysis = True if redo_analysis: with self.analysis_cache_lock: if name_obj.uuid == self.analysis_cache[name].uuid: del self.analysis_cache[name] return self._get_name_for_analysis(name, stub, lock) return name_obj def analyze_async(self, callback=None, exc_callback=None): def _analyze(): try: result = self.analyze() if callback is not None: callback(result) except: if exc_callback is not None: exc_callback(sys.exc_info()) t = threading.Thread(target=_analyze) t.start() return t def analyze(self): self._analyze_dlv() return self._analyze(self.name) def _analyze_dlv(self): if self.dlv_domain is not None and self.dlv_domain != self.name and self.dlv_domain not in self.analysis_cache: kwargs = dict([(n, getattr(self, n)) for n in self.clone_attrnames]) kwargs['ceiling'] = self.dlv_domain a = self.__class__(self.dlv_domain, **kwargs) a.analyze() def _finalize_analysis_proper(self, name_obj): pass def _finalize_analysis_all(self, name_obj): pass def _cleanup_analysis_proper(self, name_obj): if hasattr(name_obj, 'complete'): name_obj.complete.set() def _cleanup_analysis_all(self, name_obj): if self.cache_level is not None and len(name_obj.name) > self.cache_level: del self.analysis_cache[name_obj.name] def _handle_explicit_delegations(self, name_obj): key = None if (name_obj.name, dns.rdatatype.NS) in self.explicit_delegations: key = name_obj.name elif (WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS) in self.explicit_delegations: key = WILDCARD_EXPLICIT_DELEGATION if key is not None: for ns_rdata in self.explicit_delegations[(key, dns.rdatatype.NS)]: for a_rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): if (ns_rdata.target, a_rdtype) in self.explicit_delegations: name_obj.add_auth_ns_ip_mappings(*[(ns_rdata.target, IPAddr(r.address)) for r in self.explicit_delegations[(ns_rdata.target, a_rdtype)]]) name_obj.explicit_delegation = True def _analyze_stub(self, name): name_obj = self._get_name_for_analysis(name, stub=True) if name_obj.analysis_end is not None: return name_obj try: self.logger.info('Analyzing %s (stub)' % fmt.humanize_name(name)) name_obj.analysis_start = datetime.datetime.now(fmt.utc).replace(microsecond=0) self._handle_explicit_delegations(name_obj) if not name_obj.explicit_delegation: try: ans = self.resolver.query_for_answer(name, dns.rdatatype.NS, self.rdclass) # resolve every name in the NS RRset query_tuples = [] for rr in ans.rrset: query_tuples.extend([(rr.target, dns.rdatatype.A, self.rdclass), (rr.target, dns.rdatatype.AAAA, self.rdclass)]) answer_map = self.resolver.query_multiple_for_answer(*query_tuples) for query_tuple in answer_map: a = answer_map[query_tuple] if isinstance(a, Resolver.DNSAnswer): for a_rr in a.rrset: name_obj.add_auth_ns_ip_mappings((query_tuple[0], IPAddr(a_rr.to_text()))) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): name_obj.parent = self._analyze_stub(name.parent()).zone except dns.exception.DNSException: if name == dns.name.root: raise NetworkConnectivityException('DNS resolver is unresponsive!') name_obj.parent = self._analyze_stub(name.parent()).zone name_obj.analysis_end = datetime.datetime.now(fmt.utc).replace(microsecond=0) self._finalize_analysis_proper(name_obj) self._finalize_analysis_all(name_obj) finally: self._cleanup_analysis_proper(name_obj) self._cleanup_analysis_all(name_obj) return name_obj def _analyze_ancestry(self, name): # only analyze the parent if the name is not root and if there is no # ceiling or the name is a subdomain of the ceiling if name == dns.name.root: parent_obj = None elif (name, dns.rdatatype.NS) in self.explicit_delegations and self.stop_at_explicit[name]: parent_obj = None elif self.local_ceiling is not None and self.local_ceiling.is_subdomain(name): parent_obj = self._analyze_stub(name.parent()) else: parent_obj = self._analyze(name.parent()) if parent_obj is not None: nxdomain_ancestor = parent_obj.nxdomain_ancestor if nxdomain_ancestor is None and \ parent_obj.referral_rdtype is not None and \ parent_obj.queries[(parent_obj.name, parent_obj.referral_rdtype)].is_nxdomain_all(): nxdomain_ancestor = parent_obj # for zones other than the root assign parent_obj to the zone apex, # rather than the simply the domain formed by dropping its lower # leftmost label parent_obj = parent_obj.zone else: nxdomain_ancestor = None # retrieve the dlv if self.dlv_domain is not None and self.name != self.dlv_domain: dlv_parent_obj = self.analysis_cache[self.dlv_domain] else: dlv_parent_obj = None return parent_obj, dlv_parent_obj, nxdomain_ancestor def _analyze(self, name): '''Analyze a DNS name to learn about its health using introspective queries.''' # determine immediately if we need to do anything name_obj = self._get_name_for_analysis(name, lock=False) if name_obj is not None and name_obj.analysis_end is not None: return name_obj parent_obj, dlv_parent_obj, nxdomain_ancestor = \ self._analyze_ancestry(name) # get or create the name name_obj = self._get_name_for_analysis(name) if name_obj.analysis_end is not None: return name_obj try: try: name_obj.parent = parent_obj name_obj.dlv_parent = dlv_parent_obj name_obj.nxdomain_ancestor = nxdomain_ancestor name_obj.analysis_start = datetime.datetime.now(fmt.utc).replace(microsecond=0) # perform the actual analysis on this name self._analyze_name(name_obj) # set analysis_end name_obj.analysis_end = datetime.datetime.now(fmt.utc).replace(microsecond=0) # remove dlv_parent if there are no DLV queries associated with it if name_obj.dlv_parent is not None and \ (name_obj.dlv_name, dns.rdatatype.DLV) not in name_obj.queries: name_obj.dlv_parent = None # sanity check - if we weren't able to get responses from any # servers, check that we actually have connectivity self._check_connectivity(name_obj) self._finalize_analysis_proper(name_obj) finally: self._cleanup_analysis_proper(name_obj) # analyze dependencies self._analyze_dependencies(name_obj) self._finalize_analysis_all(name_obj) finally: self._cleanup_analysis_all(name_obj) return name_obj def _analyze_name(self, name_obj): self.logger.info('Analyzing %s' % fmt.humanize_name(name_obj.name)) self._handle_explicit_delegations(name_obj) if not name_obj.explicit_delegation: # analyze delegation, and return if name doesn't exist, unless # explicit_only was specified yxdomain = self._analyze_delegation(name_obj) if not yxdomain and not self.explicit_only: return # set the NS dependencies for the name if name_obj.is_zone(): name_obj.set_ns_dependencies() self._analyze_queries(name_obj) def _analyze_queries(self, name_obj): bailiwick = name_obj.zone.name servers = name_obj.zone.get_auth_or_designated_servers() # if we haven't queried any of the designated servers, then query them # all if not name_obj.zone._all_servers_queried.intersection(servers): pass # otherwise, just query the ones that were responsive else: servers = name_obj.zone.get_responsive_auth_or_designated_servers() odd_ports = dict([(s, self.odd_ports[(n, s)]) for n, s in self.odd_ports if n in (name_obj.zone.name, WILDCARD_EXPLICIT_DELEGATION)]) cookie_jar = name_obj.zone.cookie_jar servers = self._filter_servers(servers) exclude_no_answer = set() queries = {} # if there are responsive servers to query... if servers: # If 1) this is a zone, 2) DNS cookies are supported, and # 3) cookies have not yet been elicited, then issue queries now to # elicit DNS cookies. if name_obj.is_zone() and self.dns_cookies and name_obj.cookie_rdtype is None: self.logger.debug('Querying for DNS server cookies %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(dns.rdatatype.SOA))) query = self.diagnostic_query_no_server_cookie(name_obj.name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports) query.execute(tm=self.transport_manager, th_factories=self.th_factories) self._add_query(name_obj, query, False, True) name_obj.cookie_rdtype = dns.rdatatype.SOA # queries specific to zones for which non-delegation-related # queries are being issued if name_obj.is_zone() and self._ask_non_delegation_queries(name_obj.name) and not self.explicit_only: # EDNS diagnostic queries if self.edns_diagnostics: self.logger.debug('Preparing EDNS diagnostic queries %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(dns.rdatatype.SOA))) queries[(name_obj.name, -(dns.rdatatype.SOA+100))] = self.edns_version_diagnostic_query(name_obj.name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports) queries[(name_obj.name, -(dns.rdatatype.SOA+101))] = self.edns_opt_diagnostic_query(name_obj.name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) queries[(name_obj.name, -(dns.rdatatype.SOA+102))] = self.edns_flag_diagnostic_query(name_obj.name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # Query with a mixed-case name for 0x20, if possible mixed_case_name = self._mix_case(name_obj.name) if mixed_case_name is not None: self.logger.debug('Preparing 0x20 query %s/%s...' % (fmt.humanize_name(mixed_case_name, canonicalize=False), dns.rdatatype.to_text(dns.rdatatype.SOA))) queries[(name_obj.name, -(dns.rdatatype.SOA+103))] = self.diagnostic_query(mixed_case_name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # DNS cookies diagnostic queries if self.dns_cookies: self.logger.debug('Preparing DNS cookie diagnostic query %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(dns.rdatatype.SOA))) queries[(name_obj.name, -(dns.rdatatype.SOA+104))] = self.diagnostic_query_bad_server_cookie(name_obj.name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_bad=COOKIE_BAD) # NSEC3PARAM self.logger.debug('Preparing query %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(dns.rdatatype.NSEC3PARAM))) queries[(name_obj.name, dns.rdatatype.NSEC3PARAM)] = self.diagnostic_query(name_obj.name, dns.rdatatype.NSEC3PARAM, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_bad=COOKIE_STANDIN) # negative queries for all zones self._set_nxdomain_query(name_obj) if name_obj.nxdomain_name is not None: self.logger.debug('Preparing query %s/%s (NXDOMAIN)...' % (fmt.humanize_name(name_obj.nxdomain_name), dns.rdatatype.to_text(name_obj.nxdomain_rdtype))) queries[(name_obj.nxdomain_name, name_obj.nxdomain_rdtype)] = self.diagnostic_query(name_obj.nxdomain_name, name_obj.nxdomain_rdtype, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # if the name is SLD or lower, then ask MX and TXT if self._is_sld_or_lower(name_obj.name): self.logger.debug('Preparing query %s/MX...' % fmt.humanize_name(name_obj.name)) # note that we use a PMTU diagnostic query here, to simultaneously test PMTU queries[(name_obj.name, dns.rdatatype.MX)] = self.pmtu_diagnostic_query(name_obj.name, dns.rdatatype.MX, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # we also do a query with small UDP payload to elicit and test a truncated response queries[(name_obj.name, -dns.rdatatype.MX)] = self.truncation_diagnostic_query(name_obj.name, dns.rdatatype.MX, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) self.logger.debug('Preparing query %s/TXT...' % fmt.humanize_name(name_obj.name)) queries[(name_obj.name, dns.rdatatype.TXT)] = self.diagnostic_query(name_obj.name, dns.rdatatype.TXT, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # Issue a NODATA query (with type CNAME) for all zones. This is used # to detect problems with subdomains. if name_obj.is_zone() and not self.explicit_only: self._set_nodata_query(name_obj) if name_obj.nxrrset_name is not None: self.logger.debug('Preparing query %s/%s (NODATA)...' % (fmt.humanize_name(name_obj.nxrrset_name), dns.rdatatype.to_text(name_obj.nxrrset_rdtype))) queries[(name_obj.nxrrset_name, name_obj.nxrrset_rdtype)] = self.diagnostic_query(name_obj.nxrrset_name, name_obj.nxrrset_rdtype, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # for zones and for (non-zone) names which have DNSKEYs referenced if name_obj.is_zone() or self._force_dnskey_query(name_obj.name): # if there are responsive servers to query... if servers: if self._ask_non_delegation_queries(name_obj.name) and not self.explicit_only: self.logger.debug('Preparing query %s/SOA...' % fmt.humanize_name(name_obj.name)) queries[(name_obj.name, dns.rdatatype.SOA)] = self.diagnostic_query(name_obj.name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) if name_obj.is_zone(): # for zones we also use a TCP diagnostic query here, to simultaneously test TCP connectivity queries[(name_obj.name, -dns.rdatatype.SOA)] = self.tcp_diagnostic_query(name_obj.name, dns.rdatatype.SOA, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) else: # for non-zones we don't need to keep the (UDP) SOA query, if there is no positive response exclude_no_answer.add((name_obj.name, dns.rdatatype.SOA)) self.logger.debug('Preparing query %s/DNSKEY...' % fmt.humanize_name(name_obj.name)) # note that we use a PMTU diagnostic query here, to simultaneously test PMTU queries[(name_obj.name, dns.rdatatype.DNSKEY)] = self.pmtu_diagnostic_query(name_obj.name, dns.rdatatype.DNSKEY, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # we also do a query with small UDP payload to elicit and test a truncated response queries[(name_obj.name, -dns.rdatatype.DNSKEY)] = self.truncation_diagnostic_query(name_obj.name, dns.rdatatype.DNSKEY, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) if name_obj.name != dns.name.root: self.logger.debug('Preparing query %s/CDNSKEY...' % fmt.humanize_name(name_obj.name)) queries[(name_obj.name, dns.rdatatype.CDNSKEY)] = self.pmtu_diagnostic_query(name_obj.name, dns.rdatatype.CDNSKEY, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) self.logger.debug('Preparing query %s/CDS...' % fmt.humanize_name(name_obj.name)) queries[(name_obj.name, dns.rdatatype.CDS)] = self.pmtu_diagnostic_query(name_obj.name, dns.rdatatype.CDS, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # query for DS/DLV if name_obj.parent is not None: parent_servers = name_obj.zone.parent.get_auth_or_designated_servers() # if we haven't queried any of the servers designated for the # parent, then query them all if not name_obj.parent._all_servers_queried.intersection(parent_servers): pass # otherwise, just query the ones that were responsive else: parent_servers = name_obj.zone.parent.get_responsive_auth_or_designated_servers() if not parent_servers: # while the parent servers might not be responsive for the parent name, # they must be responsive for the current name, or else we wouldn't be here. parent_servers = name_obj.zone.parent.get_auth_or_designated_servers() parent_servers = self._filter_servers(parent_servers) parent_odd_ports = dict([(s, self.odd_ports[(n, s)]) for n, s in self.odd_ports if n in (name_obj.zone.parent.name, WILDCARD_EXPLICIT_DELEGATION)]) parent_cookie_jar = name_obj.zone.parent.cookie_jar self.logger.debug('Preparing query %s/DS...' % fmt.humanize_name(name_obj.name)) queries[(name_obj.name, dns.rdatatype.DS)] = self.diagnostic_query(name_obj.name, dns.rdatatype.DS, self.rdclass, parent_servers, name_obj.parent_name(), self.client_ipv4, self.client_ipv6, odd_ports=parent_odd_ports, cookie_jar=parent_cookie_jar, cookie_standin=COOKIE_STANDIN) if name_obj.dlv_parent is not None and self.dlv_domain != self.name: dlv_servers = name_obj.dlv_parent.get_responsive_auth_or_designated_servers() dlv_servers = self._filter_servers(dlv_servers) dlv_name = name_obj.dlv_name if dlv_servers: dlv_odd_ports = dict([(s, self.odd_ports[(n, s)]) for n, s in self.odd_ports if n in (name_obj.dlv_parent.name, WILDCARD_EXPLICIT_DELEGATION)]) dlv_cookie_jar = name_obj.dlv_parent.cookie_jar self.logger.debug('Preparing query %s/DLV...' % fmt.humanize_name(dlv_name)) queries[(dlv_name, dns.rdatatype.DLV)] = self.diagnostic_query(dlv_name, dns.rdatatype.DLV, self.rdclass, dlv_servers, name_obj.dlv_parent_name(), self.client_ipv4, self.client_ipv6, odd_ports=dlv_odd_ports, cookie_jar=dlv_cookie_jar, cookie_standin=COOKIE_STANDIN) exclude_no_answer.add((dlv_name, dns.rdatatype.DLV)) # get rid of any queries already asked for name, rdtype in set(name_obj.queries).intersection(set(queries)): del queries[(name, rdtype)] # finally, query any additional rdtypes if servers and self._ask_non_delegation_queries(name_obj.name): all_queries = set(name_obj.queries).union(set(queries)) for rdtype in self._rdtypes_to_query(name_obj.name): if (name_obj.name, rdtype) not in all_queries: self.logger.debug('Preparing query %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(rdtype))) queries[(name_obj.name, rdtype)] = self.diagnostic_query(name_obj.name, rdtype, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # if no default queries were identified (e.g., empty non-terminal in # in-addr.arpa space), then add a backup. if not (queries or name_obj.queries): rdtype = dns.rdatatype.A self.logger.debug('Preparing query %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(rdtype))) queries[(name_obj.name, rdtype)] = self.diagnostic_query(name_obj.name, rdtype, self.rdclass, servers, bailiwick, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) # actually execute the queries, then store the results self.logger.debug('Executing queries...') Q.ExecutableDNSQuery.execute_queries(*list(queries.values()), tm=self.transport_manager, th_factories=self.th_factories) for key, query in queries.items(): if query.is_answer_any() or key not in exclude_no_answer: self._add_query(name_obj, query, False, False) def _analyze_delegation(self, name_obj): if name_obj.parent is None: parent_auth_servers = self._root_servers() elif name_obj.parent.stub: parent_auth_servers = name_obj.parent.get_auth_or_designated_servers() else: parent_auth_servers = name_obj.parent.get_responsive_auth_or_designated_servers() # even if no servers are responsive, use all designated servers if this # is the name in question, for completeness if not parent_auth_servers and name_obj.name == self.name: parent_auth_servers = name_obj.parent.get_auth_or_designated_servers() parent_auth_servers = set(self._filter_servers(parent_auth_servers)) odd_ports = dict([(s, self.odd_ports[(n, s)]) for n, s in self.odd_ports if n in (name_obj.zone.name, WILDCARD_EXPLICIT_DELEGATION)]) cookie_jar = name_obj.zone.cookie_jar if not parent_auth_servers: return False servers_queried = OrderedDict(((dns.rdatatype.NS, set()),)) referral_queries = {} try: secondary_rdtype = self._rdtypes_to_query(name_obj.name)[0] except IndexError: secondary_rdtype = None else: if secondary_rdtype in (dns.rdatatype.DS, dns.rdatatype.DLV, dns.rdatatype.NS): secondary_rdtype = None else: servers_queried[secondary_rdtype] = set() # elicit a referral from parent servers by querying first for NS, then # a secondary type as a fallback for rdtype in servers_queried: servers_queried[rdtype].update(parent_auth_servers) name_obj.referral_rdtype = rdtype self.logger.debug('Querying %s/%s (referral)...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(rdtype))) query = self.diagnostic_query(name_obj.name, rdtype, self.rdclass, parent_auth_servers, name_obj.parent_name(), self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) query.execute(tm=self.transport_manager, th_factories=self.th_factories) referral_queries[rdtype] = query # if NXDOMAIN was received, then double-check with the secondary # type, as some servers (mostly load balancers) don't respond well # to NS queries if query.is_nxdomain_all(): continue # otherwise, if we received at least one valid response, then break out if query.is_valid_complete_response_any(): break # we only go a second time through the loop, querying the secondary # rdtype query if 1) there was NXDOMAIN or 2) there were no valid # responses. In either case the secondary record type becomes the # referral rdtype. # if the name is not a delegation, or if we received no valid and # complete response, then move along if query.is_not_delegation_all() or not query.is_valid_complete_response_any(): # We only keep the referral response if: # 1) there was an error getting a referral response; # 2) there was a discrepancy between NXDOMAIN and YXDOMAIN; or # 3) this is the name in question and the response was NXDOMAIN, # in which case we use this to show the NXDOMAIN (empty answers # will be asked later by better queries) # And in the case of a referral using the secondary rdtype, we only # keep the NS referral if there was a discrepancy between NXDOMAIN # and YXDOMAIN. is_nxdomain = query.is_nxdomain_all() is_valid = query.is_valid_complete_response_any() # (referral type is NS) if name_obj.referral_rdtype == dns.rdatatype.NS: # If there was a secondary type, the fact that only NS was queried # for indicates that there was no error and no NXDOMAIN response. # In this case, there is no need to save the referral. Delete it. if secondary_rdtype is not None: name_obj.referral_rdtype = None del referral_queries[dns.rdatatype.NS] # If there was no secondary type, we need to evaluate the responses # to see if they're worth saving. Save the referral if there # was an error or NXDOMAIN and there is no nxdomain_ancestor or # this is the name in question. else: if not is_valid or (is_nxdomain and (name_obj.name == self.name or name_obj.nxdomain_ancestor is None)): pass else: name_obj.referral_rdtype = None del referral_queries[dns.rdatatype.NS] # (referral type is secondary type) else: # don't remove either record if there's an NXDOMAIN/YXDOMAIN mismatch if referral_queries[dns.rdatatype.NS].is_nxdomain_all() and \ is_valid and not is_nxdomain: pass else: # if no mismatch, then always delete the NS record del referral_queries[dns.rdatatype.NS] # Save the referral if there was an error or NXDOMAIN and # there is no nxdomain_ancestor or this is the name in # question. if not is_valid or (is_nxdomain and (name_obj.name == self.name or name_obj.nxdomain_ancestor is None)): pass else: name_obj.referral_rdtype = None del referral_queries[secondary_rdtype] # add remaining queries for query in referral_queries.values(): self._add_query(name_obj, query, True, False) # return a positive response only if not nxdomain return not is_nxdomain if self.dns_cookies: # An NS query to authoritative servers will always be used to # elicit a server query. name_obj.cookie_rdtype = dns.rdatatype.NS cookie_jar = name_obj.cookie_jar cookie_str = ', detecting cookies' else: name_obj.cookie_rdtype = None cookie_jar = None cookie_str = '' # Add any queries made. At this point, at least one of the queries is # for type NS. If there is a second, it is because the first resulted # in NXDOMAIN, and the type for the second query is secondary_rdtype. for query in referral_queries.values(): detect_cookies = query.rdtype == name_obj.cookie_rdtype self._add_query(name_obj, query, True, detect_cookies) # Identify auth_rdtype, the rdtype used to query the authoritative # servers to retrieve NS records in the authority section. name_obj.auth_rdtype = secondary_rdtype # Now identify the authoritative NS RRset from all servers, both by # querying the authoritative servers for NS and by querying them for # another type and looking for NS in the authority section. Resolve # all names referred to in the NS RRset(s), and query each # corresponding server, until all names have been resolved and all # corresponding addresses queried.. names_resolved = set() names_not_resolved = name_obj.get_ns_names() while names_not_resolved: # resolve every name in the NS RRset query_tuples = [] for name in names_not_resolved: query_tuples.extend([(name, dns.rdatatype.A, self.rdclass), (name, dns.rdatatype.AAAA, self.rdclass)]) answer_map = self.resolver.query_multiple_for_answer(*query_tuples) for query_tuple in answer_map: name = query_tuple[0] a = answer_map[query_tuple] if isinstance(a, Resolver.DNSAnswer): for a_rr in a.rrset: name_obj.add_auth_ns_ip_mappings((name, IPAddr(a_rr.to_text()))) # negative responses elif isinstance(a, (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer)): name_obj.add_auth_ns_ip_mappings((name, None)) # error responses elif isinstance(a, (dns.exception.Timeout, dns.resolver.NoNameservers)): pass names_resolved.add(name) queries = [] auth_servers = name_obj.get_auth_or_designated_servers(no_cache=True) # NS query servers = auth_servers.difference(servers_queried[dns.rdatatype.NS]) servers_queried[dns.rdatatype.NS].update(servers) servers = self._filter_servers(servers, no_raise=True) if servers: self.logger.debug('Querying %s/NS (auth%s)...' % (fmt.humanize_name(name_obj.name), cookie_str)) query = self.diagnostic_query_no_server_cookie(name_obj.name, dns.rdatatype.NS, self.rdclass, servers, name_obj.name, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports) query.execute(tm=self.transport_manager, th_factories=self.th_factories) self._add_query(name_obj, query, True, True, True) # secondary query if secondary_rdtype is not None and self._ask_non_delegation_queries(name_obj.name): servers = auth_servers.difference(servers_queried[secondary_rdtype]) servers_queried[secondary_rdtype].update(servers) servers = self._filter_servers(servers, no_raise=True) if servers: self.logger.debug('Querying %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(secondary_rdtype))) query = self.diagnostic_query(name_obj.name, secondary_rdtype, self.rdclass, servers, name_obj.name, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) query.execute(tm=self.transport_manager, th_factories=self.th_factories) self._add_query(name_obj, query, True, False, True) names_not_resolved = name_obj.get_ns_names().difference(names_resolved) #TODO now go back and look at servers authoritative for both parent and #child that have authoritative referrals and re-classify them as #non-referrals (do this in deserialize (and dnsvizwww retrieve also) return True def _analyze_dependency(self, analyst, result_map, result_key, errors): try: result_map[result_key] = analyst.analyze() except: errors.append((result_key, sys.exc_info())) def _analyze_dependencies(self, name_obj): threads = [] errors = [] kwargs = dict([(n, getattr(self, n)) for n in self.clone_attrnames]) for cname in name_obj.cname_targets: for target in name_obj.cname_targets[cname]: a = self.__class__(target, trace=self.trace + [(name_obj, dns.rdatatype.CNAME)], explicit_only=self.explicit_only, extra_rdtypes=self.extra_rdtypes, **kwargs) t = threading.Thread(target=self._analyze_dependency, args=(a, name_obj.cname_targets[cname], target, errors)) t.start() threads.append(t) for signer in name_obj.external_signers: a = self.__class__(signer, trace=self.trace + [(name_obj, dns.rdatatype.RRSIG)], **kwargs) t = threading.Thread(target=self._analyze_dependency, args=(a, name_obj.external_signers, signer, errors)) t.start() threads.append(t) if self.follow_ns: for ns in name_obj.ns_dependencies: a = self.__class__(ns, trace=self.trace + [(name_obj, dns.rdatatype.NS)], **kwargs) t = threading.Thread(target=self._analyze_dependency, args=(a, name_obj.ns_dependencies, ns, errors)) t.start() threads.append(t) if self.follow_mx: for target in name_obj.mx_targets: a = self.__class__(target, trace=self.trace + [(name_obj, dns.rdatatype.MX)], explicit_only=True, extra_rdtypes=[dns.rdatatype.A, dns.rdatatype.AAAA], **kwargs) t = threading.Thread(target=self._analyze_dependency, args=(a, name_obj.mx_targets, target, errors)) t.start() threads.append(t) for t in threads: t.join() if errors: # raise only the first exception, but log all the ones beyond for name, exc_info in errors[1:]: self.logger.error('Error analyzing %s' % name, exc_info=exc_info) # python3/python2 dual compatibility if hasattr(errors[0][1][0], 'with_traceback'): raise errors[0][1][1].with_traceback(errors[0][1][2]) else: # lesser python2 functionality exec('raise errors[0][1][1], None, errors[0][1][2]') def _mix_case(self, name): name = name.to_text().lower() name_len = len(name) rnd = random.getrandbits((name_len + 8) - (name_len % 8)) new_name = '' changed = False for i, c in enumerate(name): # If the character is a lower case letter, mix it up randomly. # Always make the first letter upper case, to ensure that it isn't # completely lower case. if ord('a') <= ord(c) <= ord('z') and ((rnd & (1 << i)) or not changed): new_name += chr(ord(c) - 32) changed = True else: new_name += c if not changed: return None return dns.name.from_text(new_name) def _set_nxdomain_query(self, name_obj): population = 'abcdefghijklmnopqrstuvwxyz1234567890' random_label1 = ''.join(random.sample(population, 5)) random_label2 = ''.join(random.sample(population, 5)) try: name_obj.nxdomain_name = dns.name.from_text('%s.%s' % (random_label1, random_label2), name_obj.name) name_obj.nxdomain_rdtype = dns.rdatatype.A except dns.name.NameTooLong: pass def _set_nodata_query(self, name_obj): name_obj.nxrrset_name = name_obj.name name_obj.nxrrset_rdtype = dns.rdatatype.CNAME def _require_connectivity_ipv4(self, name_obj): return bool([x for x in name_obj.clients_ipv4 if LOOPBACK_IPV4_RE.match(x) is None]) def _require_connectivity_ipv6(self, name_obj): return bool([x for x in name_obj.clients_ipv6 if x != LOOPBACK_IPV6]) def _check_connectivity(self, name_obj): if self.local_ceiling is not None and (self.local_ceiling, dns.rdatatype.NS) in self.explicit_delegations: # this check is only useful if not the descendant of an explicit # delegation return if name_obj.get_auth_or_designated_servers(4) and self._require_connectivity_ipv4(name_obj) and not name_obj.get_responsive_servers_udp(4): if self._root_responsive(4) is False: raise IPv4ConnectivityException('IPv4 network is unreachable!') if name_obj.get_auth_or_designated_servers(6) and self._require_connectivity_ipv6(name_obj) and not name_obj.get_responsive_servers_udp(6): if self._root_responsive(6) is False: raise IPv6ConnectivityException('IPv6 network is unreachable!') def _raise_connectivity_error_remote(self): if not (self.try_ipv4 or self.try_ipv6): raise NetworkConnectivityException('No servers to query!') elif self.try_ipv4: raise IPv4ConnectivityException('No IPv4 servers to query!') else: # self.try_ipv6 raise IPv6ConnectivityException('No IPv6 servers to query!') def _raise_connectivity_error_local(self, servers): '''Raise a network connectivity exception. The class of exception and message associated with the exception are determined based on: whether IPv4 or IPv6 is specified to be attempted; whether there were IPv4 or IPv6 servers to query.''' if self.try_ipv4 and self.try_ipv6: # if we are configured to try both IPv4 and IPv6, then use servers # to determine which one is missing has_v4 = bool([x for x in servers if x.version == 4]) has_v6 = bool([x for x in servers if x.version == 6]) if has_v4 and has_v6: # if both IPv4 and IPv6 servers were attempted, then there was # no network connectivity for either protocol raise NetworkConnectivityException('No network connectivity available!') elif has_v4: raise IPv4ConnectivityException('No IPv4 network connectivity available!') else: # has_v6 raise IPv6ConnectivityException('No IPv6 network connectivity available!') elif self.try_ipv4: raise IPv4ConnectivityException('No IPv4 network connectivity available!') elif self.try_ipv6: raise IPv6ConnectivityException('No IPv6 network connectivity available!') else: raise NetworkConnectivityException('No network connectivity available!') def _root_responsive(self, proto): servers = list(self._root_servers(proto)) checker = Resolver.Resolver(servers, self.simple_query, max_attempts=1, shuffle=True, transport_manager=self.transport_manager) try: checker.query_for_answer(dns.name.root, dns.rdatatype.NS, self.rdclass) return True except dns.resolver.NoNameservers: return None except dns.exception.Timeout: pass return False class PrivateAnalyst(Analyst): default_th_factory = transport.DNSQueryTransportHandlerDNSPrivateFactory() class RecursiveAnalyst(Analyst): _simple_query = Q.RecursiveDNSQuery _diagnostic_query = Q.RecursiveDiagnosticQuery _tcp_diagnostic_query = Q.RecursiveTCPDiagnosticQuery _pmtu_diagnostic_query = Q.RecursivePMTUDiagnosticQuery _truncation_diagnostic_query = Q.RecursiveTruncationDiagnosticQuery _edns_version_diagnostic_query = Q.RecursiveEDNSVersionDiagnosticQuery _edns_flag_diagnostic_query = Q.RecursiveEDNSFlagDiagnosticQuery _edns_opt_diagnostic_query = Q.RecursiveEDNSOptDiagnosticQuery analysis_type = ANALYSIS_TYPE_RECURSIVE def _get_resolver(self): servers = set() for rdata in self.explicit_delegations[(WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS)]: for rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): if (rdata.target, rdtype) in self.explicit_delegations: servers.update([IPAddr(r.address) for r in self.explicit_delegations[(rdata.target, rdtype)]]) return Resolver.Resolver(list(servers), Q.StandardRecursiveQueryCD, transport_manager=self.transport_manager) def _detect_ceiling(self, ceiling): # if there is a ceiling, but the name is not a subdomain # of the ceiling, then use the name itself as a base if ceiling is not None and not self.name.is_subdomain(ceiling): ceiling = self.name return ceiling, None def _finalize_analysis_proper(self, name_obj): '''Since we initially queried the full set of queries before we knew which were appropriate for the name in question, we now identify all queries that were pertinent and remove all other.''' # if it's a stub, then no need to do anything if name_obj.stub: return # if there are not NS records, then it's not a zone, so clear auth NS # IP mapping if not name_obj.has_ns: name_obj._auth_ns_ip_mapping = {} queries = set() if name_obj.is_zone(): queries.add((name_obj.name, dns.rdatatype.NS)) if self._ask_non_delegation_queries(name_obj.name) and not self.explicit_only: queries.add((name_obj.nxdomain_name, name_obj.nxdomain_rdtype)) queries.add((name_obj.nxrrset_name, name_obj.nxrrset_rdtype)) if self._is_sld_or_lower(name_obj.name): queries.add((name_obj.name, dns.rdatatype.MX)) queries.add((name_obj.name, dns.rdatatype.TXT)) if name_obj.is_zone() or self._force_dnskey_query(name_obj.name): if self._ask_non_delegation_queries(name_obj.name) and not self.explicit_only: queries.add((name_obj.name, dns.rdatatype.SOA)) queries.add((name_obj.name, dns.rdatatype.DNSKEY)) if name_obj.parent is not None: queries.add((name_obj.name, dns.rdatatype.DS)) if name_obj.dlv_parent is not None: queries.add((name_obj.dlv_name, dns.rdatatype.DLV)) if self._ask_non_delegation_queries(name_obj.name): for rdtype in self._rdtypes_to_query(name_obj.name): queries.add((name_obj.name, rdtype)) if not queries: # for TLD and higher, add NS if len(name_obj.name) <= 2: rdtype = dns.rdatatype.NS # for SLD and lower, add A else: rdtype = dns.rdatatype.A queries.add((name_obj.name, rdtype)) for name, rdtype in set(name_obj.queries).difference(queries): del name_obj.queries[(name, rdtype)] if (name_obj.nxdomain_name, name_obj.nxdomain_rdtype) not in queries: name_obj.nxdomain_name = None name_obj.nxdomain_rdtype = None if (name_obj.nxrrset_name, name_obj.nxrrset_rdtype) not in queries: name_obj.nxrrset_name = None name_obj.nxrrset_rdtype = None def _analyze_stub(self, name): name_obj = self._get_name_for_analysis(name, stub=True) if name_obj.analysis_end is not None: return name_obj try: self.logger.info('Analyzing %s (stub)' % fmt.humanize_name(name)) name_obj.analysis_start = datetime.datetime.now(fmt.utc).replace(microsecond=0) self._handle_explicit_delegations(name_obj) servers = name_obj.zone.get_auth_or_designated_servers() servers = self._filter_servers(servers) resolver = Resolver.Resolver(list(servers), Q.StandardRecursiveQueryCD, transport_manager=self.transport_manager) try: ans = resolver.query_for_answer(name, dns.rdatatype.NS, self.rdclass) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): name_obj.parent = self._analyze_stub(name.parent()).zone except dns.exception.DNSException: pass name_obj.analysis_end = datetime.datetime.now(fmt.utc).replace(microsecond=0) self._finalize_analysis_proper(name_obj) self._finalize_analysis_all(name_obj) finally: self._cleanup_analysis_proper(name_obj) self._cleanup_analysis_all(name_obj) return name_obj def _analyze_ancestry(self, name, is_zone): # only analyze the parent if the name is not root and if there is no # ceiling or the name is a subdomain of the ceiling if name == dns.name.root: parent_obj = None elif self.local_ceiling is not None and self.local_ceiling.is_subdomain(name) and is_zone: parent_obj = self._analyze_stub(name.parent()) else: parent_obj = self._analyze(name.parent()) if parent_obj is not None: nxdomain_ancestor = parent_obj.nxdomain_ancestor if nxdomain_ancestor is None and not parent_obj.stub: rdtype = [x for x in parent_obj.queries.keys() if x[0] == parent_obj.name][0][1] if parent_obj.queries[(parent_obj.name, rdtype)].is_nxdomain_all(): nxdomain_ancestor = parent_obj # for zones other than the root assign parent_obj to the zone apex, # rather than the simply the domain formed by dropping its lower # leftmost label parent_obj = parent_obj.zone else: nxdomain_ancestor = None # retrieve the dlv if self.dlv_domain is not None and self.name != self.dlv_domain: dlv_parent_obj = self.analysis_cache[self.dlv_domain] else: dlv_parent_obj = None return parent_obj, dlv_parent_obj, nxdomain_ancestor def _analyze(self, name): '''Analyze a DNS name to learn about its health using introspective queries.''' # determine immediately if we need to do anything name_obj = self._get_name_for_analysis(name, lock=False) if name_obj is not None and name_obj.analysis_end is not None: return name_obj # get or create the name name_obj = self._get_name_for_analysis(name) if name_obj.analysis_end is not None: return name_obj try: try: name_obj.analysis_start = datetime.datetime.now(fmt.utc).replace(microsecond=0) # perform the actual analysis on this name self._analyze_name(name_obj) # set analysis_end name_obj.analysis_end = datetime.datetime.now(fmt.utc).replace(microsecond=0) # sanity check - if we weren't able to get responses from any # servers, check that we actually have connectivity self._check_connectivity(name_obj) # analyze ancestry parent_obj, dlv_parent_obj, nxdomain_ancestor = \ self._analyze_ancestry(name, name_obj.has_ns) name_obj.parent = parent_obj name_obj.dlv_parent = dlv_parent_obj name_obj.nxdomain_ancestor = nxdomain_ancestor self._finalize_analysis_proper(name_obj) finally: self._cleanup_analysis_proper(name_obj) # analyze dependencies self._analyze_dependencies(name_obj) self._finalize_analysis_all(name_obj) finally: self._cleanup_analysis_all(name_obj) return name_obj def _analyze_name(self, name_obj): self.logger.info('Analyzing %s' % fmt.humanize_name(name_obj.name)) self._handle_explicit_delegations(name_obj) servers = name_obj.zone.get_auth_or_designated_servers() if not servers: raise NoNameservers('No resolvers specified to query!') servers = self._filter_servers(servers) if not servers: raise NoNameservers('No resolvers available to query!') odd_ports = dict([(s, self.odd_ports[(n, s)]) for n, s in self.odd_ports if n in (name_obj.zone.name, WILDCARD_EXPLICIT_DELEGATION)]) cookie_jar = name_obj.zone.cookie_jar # make common query first to prime the cache # for root and TLD, use type NS if len(name_obj.name) <= 2: rdtype = dns.rdatatype.NS # for SLDs and below detect an appropriate type # and use A as a fallback. else: try: rdtype = self._rdtypes_to_query(name_obj.name)[0] except IndexError: rdtype = dns.rdatatype.A else: if rdtype in (dns.rdatatype.DS, dns.rdatatype.NS): rdtype = dns.rdatatype.A self.logger.debug('Querying %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(rdtype))) query = self.diagnostic_query_no_server_cookie(name_obj.name, rdtype, self.rdclass, servers, None, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports) query.execute(tm=self.transport_manager, th_factories=self.th_factories) self._add_query(name_obj, query, True, True) if self.dns_cookies: name_obj.cookie_rdtype = rdtype else: name_obj.cookie_rdtype = None # if there were no valid responses, then exit out early if not query.is_valid_complete_response_any() and not self.explicit_only: return name_obj # if there was an NXDOMAIN for the first query, then don't ask the # others, unless explicit was called if query.is_nxdomain_all() and not self.explicit_only: return name_obj # now query most other queries self._analyze_queries(name_obj) if name_obj.name != dns.name.root: # ensure these weren't already queried for (e.g., as part of extra_rdtypes) if (name_obj.name, dns.rdatatype.DS) not in name_obj.queries: # make DS queries (these won't be included in the above mix # because there is no parent on the name_obj) self.logger.debug('Querying %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(dns.rdatatype.DS))) query = self.diagnostic_query(name_obj.name, dns.rdatatype.DS, self.rdclass, servers, None, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) query.execute(tm=self.transport_manager, th_factories=self.th_factories) self._add_query(name_obj, query, False, False) # for non-TLDs make NS queries after all others if len(name_obj.name) > 2: # ensure these weren't already queried for (e.g., as part of extra_rdtypes) if (name_obj.name, dns.rdatatype.NS) not in name_obj.queries: self.logger.debug('Querying %s/%s...' % (fmt.humanize_name(name_obj.name), dns.rdatatype.to_text(dns.rdatatype.NS))) query = self.diagnostic_query(name_obj.name, dns.rdatatype.NS, self.rdclass, servers, None, self.client_ipv4, self.client_ipv6, odd_ports=odd_ports, cookie_jar=cookie_jar, cookie_standin=COOKIE_STANDIN) query.execute(tm=self.transport_manager, th_factories=self.th_factories) self._add_query(name_obj, query, True, False) return name_obj def _require_connectivity_ipv4(self, name_obj): return bool(name_obj.clients_ipv4) def _require_connectivity_ipv6(self, name_obj): return bool(name_obj.clients_ipv6) def _check_connectivity(self, name_obj): if self._require_connectivity_ipv4(name_obj) and not name_obj.get_responsive_servers_udp(4): if self._root_responsive(4) is False: raise IPv4ConnectivityException('IPv4 resolvers are not responsive!') if self._require_connectivity_ipv6(name_obj) and not name_obj.get_responsive_servers_udp(6): if self._root_responsive(6) is False: raise IPv6ConnectivityException('IPv6 resolvers are not responsive!') class PrivateRecursiveAnalyst(RecursiveAnalyst): default_th_factory = transport.DNSQueryTransportHandlerDNSPrivateFactory() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/analysis/status.py0000644000175000017500000025575215001472663017222 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import base64 import datetime import logging # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # python3/python2 dual compatibility try: from html import escape except ImportError: from cgi import escape import dns.name, dns.rdatatype from dnsviz import base32 from dnsviz import crypto from dnsviz import format as fmt from dnsviz.util import tuple_to_dict lb2s = fmt.latin1_binary_to_string from . import errors as Errors CLOCK_SKEW_WARNING = 300 STATUS_VALID = 0 STATUS_INDETERMINATE = 1 STATUS_INVALID = 2 status_mapping = { STATUS_VALID: 'VALID', True: 'VALID', STATUS_INDETERMINATE: 'INDETERMINATE', None: 'INDETERMINATE', STATUS_INVALID: 'INVALID', False: 'INVALID', } NAME_STATUS_NOERROR = 0 NAME_STATUS_NXDOMAIN = 1 NAME_STATUS_INDETERMINATE = 2 name_status_mapping = { NAME_STATUS_NOERROR: 'NOERROR', NAME_STATUS_NXDOMAIN: 'NXDOMAIN', NAME_STATUS_INDETERMINATE: 'INDETERMINATE', } RRSIG_STATUS_VALID = STATUS_VALID RRSIG_STATUS_INDETERMINATE_NO_DNSKEY = 1 RRSIG_STATUS_INDETERMINATE_MATCH_PRE_REVOKE = 2 RRSIG_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM = 3 RRSIG_STATUS_ALGORITHM_IGNORED = 4 RRSIG_STATUS_EXPIRED = 5 RRSIG_STATUS_PREMATURE = 6 RRSIG_STATUS_INVALID_SIG = 7 RRSIG_STATUS_INVALID = 8 rrsig_status_mapping = { RRSIG_STATUS_VALID: 'VALID', RRSIG_STATUS_INDETERMINATE_NO_DNSKEY: 'INDETERMINATE_NO_DNSKEY', RRSIG_STATUS_INDETERMINATE_MATCH_PRE_REVOKE: 'INDETERMINATE_MATCH_PRE_REVOKE', RRSIG_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM: 'INDETERMINATE_UNKNOWN_ALGORITHM', RRSIG_STATUS_ALGORITHM_IGNORED: 'ALGORITHM_IGNORED', RRSIG_STATUS_EXPIRED: 'EXPIRED', RRSIG_STATUS_PREMATURE: 'PREMATURE', RRSIG_STATUS_INVALID_SIG: 'INVALID_SIG', RRSIG_STATUS_INVALID: 'INVALID', } DS_STATUS_VALID = STATUS_VALID DS_STATUS_INDETERMINATE_NO_DNSKEY = 1 DS_STATUS_INDETERMINATE_MATCH_PRE_REVOKE = 2 DS_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM = 3 DS_STATUS_ALGORITHM_IGNORED = 4 DS_STATUS_INVALID_DIGEST = 5 DS_STATUS_INVALID = 6 ds_status_mapping = { DS_STATUS_VALID: 'VALID', DS_STATUS_INDETERMINATE_NO_DNSKEY: 'INDETERMINATE_NO_DNSKEY', DS_STATUS_INDETERMINATE_MATCH_PRE_REVOKE: 'INDETERMINATE_MATCH_PRE_REVOKE', DS_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM: 'INDETERMINATE_UNKNOWN_ALGORITHM', DS_STATUS_ALGORITHM_IGNORED: 'ALGORITHM_IGNORED', DS_STATUS_INVALID_DIGEST: 'INVALID_DIGEST', DS_STATUS_INVALID: 'INVALID', } DELEGATION_STATUS_SECURE = 0 DELEGATION_STATUS_INSECURE = 1 DELEGATION_STATUS_BOGUS = 2 DELEGATION_STATUS_INCOMPLETE = 3 DELEGATION_STATUS_LAME = 4 delegation_status_mapping = { DELEGATION_STATUS_SECURE: 'SECURE', DELEGATION_STATUS_INSECURE: 'INSECURE', DELEGATION_STATUS_BOGUS: 'BOGUS', DELEGATION_STATUS_INCOMPLETE: 'INCOMPLETE', DELEGATION_STATUS_LAME: 'LAME', } RRSET_STATUS_SECURE = 0 RRSET_STATUS_INSECURE = 1 RRSET_STATUS_BOGUS = 2 RRSET_STATUS_NON_EXISTENT = 3 rrset_status_mapping = { RRSET_STATUS_SECURE: 'SECURE', RRSET_STATUS_INSECURE: 'INSECURE', RRSET_STATUS_BOGUS: 'BOGUS', RRSET_STATUS_NON_EXISTENT: 'NON_EXISTENT', } NSEC_STATUS_VALID = STATUS_VALID NSEC_STATUS_INDETERMINATE = STATUS_INDETERMINATE NSEC_STATUS_INVALID = 2 nsec_status_mapping = { NSEC_STATUS_VALID: 'VALID', NSEC_STATUS_INDETERMINATE: 'INDETERMINATE', NSEC_STATUS_INVALID: 'INVALID', } DNAME_STATUS_VALID = STATUS_VALID DNAME_STATUS_INDETERMINATE = STATUS_INDETERMINATE DNAME_STATUS_INVALID_TARGET = 2 DNAME_STATUS_INVALID = 3 dname_status_mapping = { DNAME_STATUS_VALID: 'VALID', DNAME_STATUS_INDETERMINATE: 'INDETERMINATE', DNAME_STATUS_INVALID_TARGET: 'INVALID_TARGET', DNAME_STATUS_INVALID: 'INVALID', } RRSIG_SIG_LENGTHS_BY_ALGORITHM = { 12: 512, 13: 512, 14: 768, 15: 512, 16: 912, } RRSIG_SIG_LENGTH_ERRORS = { 12: Errors.RRSIGBadLengthGOST, 13: Errors.RRSIGBadLengthECDSA256, 14: Errors.RRSIGBadLengthECDSA384, 15: Errors.RRSIGBadLengthEd25519, 16: Errors.RRSIGBadLengthEd448, } DS_DIGEST_ALGS_STRONGER_THAN_SHA1 = (2, 4) DS_DIGEST_ALGS_IGNORING_SHA1 = (2,) # RFC 8624 Section 3.1 DNSKEY_ALGS_NOT_RECOMMENDED = (5, 7, 10) DNSKEY_ALGS_PROHIBITED = (1, 3, 6, 12) DNSKEY_ALGS_VALIDATION_PROHIBITED = (1, 3, 6) # RFC 8624 Section 3.2 DS_DIGEST_ALGS_NOT_RECOMMENDED = () DS_DIGEST_ALGS_PROHIBITED = (0, 1, 3) DS_DIGEST_ALGS_VALIDATION_PROHIBITED = () RDTYPES_TTL_EXEMPT = (dns.rdatatype.NSEC3PARAM,) class RRSIGStatus(object): def __init__(self, rrset, rrsig, dnskey, zone_name, reference_ts, supported_algs, ignore_rfc8624): self.rrset = rrset self.rrsig = rrsig self.dnskey = dnskey self.zone_name = zone_name self.reference_ts = reference_ts self.warnings = [] self.errors = [] if self.dnskey is None: self.signature_valid = None else: self.signature_valid = crypto.validate_rrsig(dnskey.rdata.algorithm, rrsig.signature, rrset.message_for_rrsig(rrsig), dnskey.rdata.key) self.validation_status = RRSIG_STATUS_VALID if self.signature_valid is None or self.rrsig.algorithm not in supported_algs: # Either we can't validate the cryptographic signature, or we are # explicitly directed to ignore the algorithm. if self.dnskey is None: # In this case, there is no corresponding DNSKEY, so we make # the status "INDETERMINATE". if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_INDETERMINATE_NO_DNSKEY else: # If there is a DNSKEY, then we look at *why* we are ignoring # the cryptographic signature. if self.dnskey.rdata.algorithm in DNSKEY_ALGS_VALIDATION_PROHIBITED: # In this case, specification dictates that the algorithm # MUST NOT be validated, so we mark it as ignored. if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_ALGORITHM_IGNORED else: # In this case, we can't validate this particular # algorithm, either because the code doesn't support it, # or because we have been explicitly directed to ignore it. # In either case, mark it as "UNKNOWN", and warn that it is # not supported. if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM self.warnings.append(Errors.AlgorithmNotSupported(algorithm=self.rrsig.algorithm)) # Independent of whether or not we considered the cryptographic # validation, issue a warning if we are using an algorithm for which # validation or signing has been prohibited. # # Signing is prohibited if self.rrsig.algorithm in DNSKEY_ALGS_VALIDATION_PROHIBITED: if not ignore_rfc8624: self.warnings.append(Errors.AlgorithmValidationProhibited(algorithm=self.rrsig.algorithm)) # Validation is prohibited or, at least, not recommended if self.rrsig.algorithm in DNSKEY_ALGS_PROHIBITED: if not ignore_rfc8624: self.warnings.append(Errors.AlgorithmProhibited(algorithm=self.rrsig.algorithm)) elif self.rrsig.algorithm in DNSKEY_ALGS_NOT_RECOMMENDED: if not ignore_rfc8624: self.warnings.append(Errors.AlgorithmNotRecommended(algorithm=self.rrsig.algorithm)) if self.rrset.rrset.rdtype not in RDTYPES_TTL_EXEMPT: # If we are comparing TTLs (i.e., for authoritative server responses), # then check that the TTL of the RRset matches the TTL of the RRSIG if self.rrset.ttl_cmp: if self.rrset.rrset.ttl != self.rrset.rrsig_info[self.rrsig].ttl: self.warnings.append(Errors.RRsetTTLMismatch(rrset_ttl=self.rrset.rrset.ttl, rrsig_ttl=self.rrset.rrsig_info[self.rrsig].ttl)) # Check that the TTL of the RRset does not exceed the value in the # original TTL field of the RRSIG if self.rrset.rrset.ttl > self.rrsig.original_ttl: self.errors.append(Errors.OriginalTTLExceededRRset(rrset_ttl=self.rrset.rrset.ttl, original_ttl=self.rrsig.original_ttl)) # Check that the TTL of the RRSIG does not exceed the value in the # original TTL field of the RRSIG if self.rrset.rrsig_info[self.rrsig].ttl > self.rrsig.original_ttl: self.errors.append(Errors.OriginalTTLExceededRRSIG(rrsig_ttl=self.rrset.rrsig_info[self.rrsig].ttl, original_ttl=self.rrsig.original_ttl)) min_ttl = min(self.rrset.rrset.ttl, self.rrset.rrsig_info[self.rrsig].ttl, self.rrsig.original_ttl) if (zone_name is not None and self.rrsig.signer != zone_name) or \ (zone_name is None and not self.rrset.rrset.name.is_subdomain(self.rrsig.signer)): if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_INVALID if zone_name is None: zn = self.rrsig.signer else: zn = zone_name self.errors.append(Errors.SignerNotZone(zone_name=fmt.humanize_name(zn), signer_name=fmt.humanize_name(self.rrsig.signer))) if self.rrsig.labels > len(self.rrset.rrset.name) - 1: self.errors.append(Errors.RRSIGLabelsExceedRRsetOwnerLabels(rrsig_labels=self.rrsig.labels, rrset_owner_labels=len(self.rrset.rrset.name) - 1)) if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_INVALID if self.dnskey is not None and \ self.dnskey.rdata.flags & fmt.DNSKEY_FLAGS['revoke'] and self.rrsig.covers() != dns.rdatatype.DNSKEY: if self.rrsig.key_tag != self.dnskey.key_tag: if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_INDETERMINATE_MATCH_PRE_REVOKE else: self.errors.append(Errors.DNSKEYRevokedRRSIG()) if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_INVALID sig_len = len(self.rrsig.signature) << 3 if self.rrsig.algorithm in RRSIG_SIG_LENGTHS_BY_ALGORITHM and \ sig_len != RRSIG_SIG_LENGTHS_BY_ALGORITHM[self.rrsig.algorithm]: self.errors.append(RRSIG_SIG_LENGTH_ERRORS[self.rrsig.algorithm](length=sig_len)) if self.reference_ts < self.rrsig.inception: if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_PREMATURE self.errors.append(Errors.InceptionInFuture(inception=fmt.timestamp_to_datetime(self.rrsig.inception), reference_time=fmt.timestamp_to_datetime(self.reference_ts))) elif self.reference_ts - CLOCK_SKEW_WARNING < self.rrsig.inception: self.warnings.append(Errors.InceptionWithinClockSkew(inception=fmt.timestamp_to_datetime(self.rrsig.inception), reference_time=fmt.timestamp_to_datetime(self.reference_ts))) if self.reference_ts >= self.rrsig.expiration: if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_EXPIRED self.errors.append(Errors.ExpirationInPast(expiration=fmt.timestamp_to_datetime(self.rrsig.expiration), reference_time=fmt.timestamp_to_datetime(self.reference_ts))) elif self.reference_ts + min_ttl > self.rrsig.expiration: self.errors.append(Errors.TTLBeyondExpiration(expiration=fmt.timestamp_to_datetime(self.rrsig.expiration), rrsig_ttl=min_ttl, reference_time=fmt.timestamp_to_datetime(self.reference_ts))) elif self.reference_ts + CLOCK_SKEW_WARNING >= self.rrsig.expiration: self.warnings.append(Errors.ExpirationWithinClockSkew(expiration=fmt.timestamp_to_datetime(self.rrsig.expiration), reference_time=fmt.timestamp_to_datetime(self.reference_ts))) if self.signature_valid == False and self.dnskey.rdata.algorithm in supported_algs: # only report this if we're not referring to a key revoked post-sign if self.dnskey.key_tag == self.rrsig.key_tag: if self.validation_status == RRSIG_STATUS_VALID: self.validation_status = RRSIG_STATUS_INVALID_SIG self.errors.append(Errors.SignatureInvalid()) def __str__(self): return 'RRSIG covering %s/%s' % (fmt.humanize_name(self.rrset.rrset.name), dns.rdatatype.to_text(self.rrset.rrset.rdtype)) def serialize(self, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = OrderedDict() erroneous_status = self.validation_status not in (RRSIG_STATUS_VALID, RRSIG_STATUS_INDETERMINATE_NO_DNSKEY, RRSIG_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM) show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) or \ erroneous_status if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = '%s/%d/%d' % (lb2s(self.rrsig.signer.canonicalize().to_text()), self.rrsig.algorithm, self.rrsig.key_tag) if loglevel <= logging.DEBUG: d.update(( ('description', formatter(str(self))), ('signer', formatter(lb2s(self.rrsig.signer.canonicalize().to_text()))), ('algorithm', self.rrsig.algorithm), ('key_tag', self.rrsig.key_tag), ('original_ttl', self.rrsig.original_ttl), ('labels', self.rrsig.labels), ('inception', fmt.timestamp_to_str(self.rrsig.inception)), ('expiration', fmt.timestamp_to_str(self.rrsig.expiration)), ('signature', lb2s(base64.b64encode(self.rrsig.signature))), ('ttl', self.rrset.rrsig_info[self.rrsig].ttl), )) if html_format: d['algorithm'] = '%d (%s)' % (self.rrsig.algorithm, fmt.DNSKEY_ALGORITHMS.get(self.rrsig.algorithm, self.rrsig.algorithm)) d['original_ttl'] = '%d (%s)' % (self.rrsig.original_ttl, fmt.humanize_time(self.rrsig.original_ttl)) if self.rrset.is_wildcard(self.rrsig): d['labels'] = '%d (wildcard)' % (self.rrsig.labels) else: d['labels'] = '%d (no wildcard)' % (self.rrsig.labels) d['inception'] += ' (%s)' % (fmt.format_diff(fmt.timestamp_to_datetime(self.reference_ts), fmt.timestamp_to_datetime(self.rrsig.inception))) d['expiration'] += ' (%s)' % (fmt.format_diff(fmt.timestamp_to_datetime(self.reference_ts), fmt.timestamp_to_datetime(self.rrsig.expiration))) d['ttl'] = '%d (%s)' % (self.rrset.rrsig_info[self.rrsig].ttl, fmt.humanize_time(self.rrset.rrsig_info[self.rrsig].ttl)) if loglevel <= logging.INFO or erroneous_status: d['status'] = rrsig_status_mapping[self.validation_status] if loglevel <= logging.INFO: servers = tuple_to_dict(self.rrset.rrsig_info[self.rrsig].servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.rrset.rrsig_info[self.rrsig].servers_clients: for response in self.rrset.rrsig_info[self.rrsig].servers_clients[(server, client)]: if response is not None: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d class DSStatus(object): def __init__(self, ds, ds_meta, dnskey, supported_digest_algs, ignore_rfc8624): self.ds = ds self.ds_meta = ds_meta self.dnskey = dnskey self.warnings = [] self.errors = [] if self.dnskey is None: self.digest_valid = None else: self.digest_valid = crypto.validate_ds_digest(ds.digest_type, ds.digest, dnskey.message_for_ds()) self.validation_status = DS_STATUS_VALID if self.digest_valid is None or self.ds.digest_type not in supported_digest_algs: # Either we cannot reproduce a digest with this type, or we are # explicitly directed to ignore the digest type. if self.dnskey is None: # In this case, there is no corresponding DNSKEY, so we make # the status "INDETERMINATE". if self.validation_status == DS_STATUS_VALID: self.validation_status = DS_STATUS_INDETERMINATE_NO_DNSKEY else: # If there is a DNSKEY, then we look at *why* we are ignoring # the digest of the DNSKEY. if self.ds.digest_type in DS_DIGEST_ALGS_VALIDATION_PROHIBITED: # In this case, specification dictates that the algorithm # MUST NOT be validated, so we mark it as ignored. if self.validation_status == DS_STATUS_VALID: self.validation_status = DS_STATUS_ALGORITHM_IGNORED else: # In this case, we can't validate this particular # digest type, either because the code doesn't support it, # or because we have been explicitly directed to ignore it. # In either case, mark it as "UNKNOWN", and warn that it is # not supported. if self.validation_status == DS_STATUS_VALID: self.validation_status = DS_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM self.warnings.append(Errors.DigestAlgorithmNotSupported(algorithm=self.ds.digest_type)) # Independent of whether or not we considered the digest for # validation, issue a warning if we are using a digest type for which # validation or signing has been prohibited. # # Signing is prohibited if self.ds.digest_type in DS_DIGEST_ALGS_VALIDATION_PROHIBITED: if not ignore_rfc8624: self.warnings.append(Errors.DigestAlgorithmValidationProhibited(algorithm=self.ds.digest_type)) # Validation is prohibited or, at least, not recommended if self.ds.digest_type in DS_DIGEST_ALGS_PROHIBITED: if not ignore_rfc8624: self.warnings.append(Errors.DigestAlgorithmProhibited(algorithm=self.ds.digest_type)) elif self.ds.digest_type in DS_DIGEST_ALGS_NOT_RECOMMENDED: if not ignore_rfc8624: self.warnings.append(Errors.DigestAlgorithmNotRecommended(algorithm=self.ds.digest_type)) if self.dnskey is not None and \ self.dnskey.rdata.flags & fmt.DNSKEY_FLAGS['revoke']: if self.dnskey.key_tag != self.ds.key_tag: if self.validation_status == DS_STATUS_VALID: self.validation_status = DS_STATUS_INDETERMINATE_MATCH_PRE_REVOKE else: self.errors.append(Errors.DNSKEYRevokedDS()) if self.validation_status == DS_STATUS_VALID: self.validation_status = DS_STATUS_INVALID if self.digest_valid == False and self.ds.digest_type in supported_digest_algs: # only report this if we're not referring to a key revoked post-DS if self.dnskey.key_tag == self.ds.key_tag: if self.validation_status == DS_STATUS_VALID: self.validation_status = DS_STATUS_INVALID_DIGEST self.errors.append(Errors.DigestInvalid()) # RFC 4509 if self.ds.digest_type == 1: stronger_algs_all_ds = set() # Cycle through all other DS records in the DS RRset, and # create a list of digest types that are stronger than SHA1 # and are being used by DS records across the *entire* DS. for ds_rdata in self.ds_meta.rrset: if ds_rdata.digest_type in DS_DIGEST_ALGS_STRONGER_THAN_SHA1: stronger_algs_all_ds.add(ds_rdata.digest_type) # Consider only digest types that we actually support stronger_algs_all_ds.intersection_update(supported_digest_algs) if stronger_algs_all_ds: # If there are DS records in the DS RRset with digest type # stronger than SHA1, then this one MUST be ignored by # validators (RFC 4509). for digest_alg in stronger_algs_all_ds: if digest_alg in DS_DIGEST_ALGS_IGNORING_SHA1: if self.validation_status == DS_STATUS_VALID: self.validation_status = DS_STATUS_ALGORITHM_IGNORED self.warnings.append(Errors.DSDigestAlgorithmIgnored(algorithm=1, new_algorithm=digest_alg)) else: self.warnings.append(Errors.DSDigestAlgorithmMaybeIgnored(algorithm=1, new_algorithm=digest_alg)) def __str__(self): return '%s record(s) corresponding to DNSKEY for %s (algorithm %d (%s), key tag %d)' % (dns.rdatatype.to_text(self.ds_meta.rrset.rdtype), fmt.humanize_name(self.ds_meta.rrset.name), self.ds.algorithm, fmt.DNSKEY_ALGORITHMS.get(self.ds.algorithm, self.ds.algorithm), self.ds.key_tag) def serialize(self, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=True): d = OrderedDict() erroneous_status = self.validation_status not in (DS_STATUS_VALID, DS_STATUS_INDETERMINATE_NO_DNSKEY, DS_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM) show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) or \ erroneous_status if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = '%d/%d/%d' % (self.ds.algorithm, self.ds.key_tag, self.ds.digest_type) if loglevel <= logging.DEBUG: d.update(( ('description', formatter(str(self))), ('algorithm', self.ds.algorithm), ('key_tag', self.ds.key_tag), ('digest_type', self.ds.digest_type), ('digest', lb2s(base64.b64encode(self.ds.digest))), )) if html_format: d['algorithm'] = '%d (%s)' % (self.ds.algorithm, fmt.DNSKEY_ALGORITHMS.get(self.ds.algorithm, self.ds.algorithm)) d['digest_type'] = '%d (%s)' % (self.ds.digest_type, fmt.DS_DIGEST_TYPES.get(self.ds.digest_type, self.ds.digest_type)) d['ttl'] = self.ds_meta.rrset.ttl if html_format: d['ttl'] = '%d (%s)' % (self.ds_meta.rrset.ttl, fmt.humanize_time(self.ds_meta.rrset.ttl)) if loglevel <= logging.INFO or erroneous_status: d['status'] = ds_status_mapping[self.validation_status] if loglevel <= logging.INFO: servers = tuple_to_dict(self.ds_meta.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.ds_meta.servers_clients: for response in self.ds_meta.servers_clients[(server, client)]: if response is not None: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d class NSECStatus(object): def __repr__(self): return '<%s: "%s">' % (self.__class__.__name__, self.qname) def _get_wildcard(self, qname, nsec_rrset): covering_name = nsec_rrset.name next_name = nsec_rrset[0].next for i in range(len(qname)): j = -(i + 1) if i < len(covering_name) and covering_name[j].lower() == qname[j].lower(): continue elif i < len(next_name) and next_name[j].lower() == qname[j].lower(): continue else: break return dns.name.Name(('*',) + qname[-i:]) class NSECStatusNXDOMAIN(NSECStatus): def __init__(self, qname, rdtype, origin, is_zone, nsec_set_info): self.qname = qname self.origin = origin self.is_zone = is_zone self.warnings = [] self.errors = [] self.wildcard_name = None self.nsec_names_covering_qname = {} covering_names = nsec_set_info.nsec_covering_name(self.qname) self.opt_out = None if covering_names: self.nsec_names_covering_qname[self.qname] = covering_names covering_name = list(covering_names)[0] self.wildcard_name = self._get_wildcard(qname, nsec_set_info.rrsets[covering_name].rrset) self.nsec_names_covering_wildcard = {} if self.wildcard_name is not None: covering_names = nsec_set_info.nsec_covering_name(self.wildcard_name) if covering_names: self.nsec_names_covering_wildcard[self.wildcard_name] = covering_names # check for covering of the origin self.nsec_names_covering_origin = {} covering_names = nsec_set_info.nsec_covering_name(self.origin) if covering_names: self.nsec_names_covering_origin[self.origin] = covering_names self._set_validation_status(nsec_set_info) def __eq__(self, other): return isinstance(other, self.__class__) and \ self.qname == other.qname and self.origin == other.origin and self.nsec_set_info == other.nsec_set_info def __hash__(self): return hash(id(self)) def _set_validation_status(self, nsec_set_info): self.validation_status = NSEC_STATUS_VALID if not self.nsec_names_covering_qname: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.SnameNotCoveredNameError(sname=fmt.humanize_name(self.qname))) if not self.nsec_names_covering_wildcard and self.wildcard_name is not None: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.WildcardNotCoveredNSEC(wildcard=fmt.humanize_name(self.wildcard_name))) if self.nsec_names_covering_origin: self.validation_status = NSEC_STATUS_INVALID qname, nsec_names = list(self.nsec_names_covering_origin.items())[0] nsec_rrset = nsec_set_info.rrsets[list(nsec_names)[0]].rrset self.errors.append(Errors.LastNSECNextNotZone(nsec_owner=fmt.humanize_name(nsec_rrset.name), next_name=fmt.humanize_name(nsec_rrset[0].next), zone_name=fmt.humanize_name(self.origin))) # if it validation_status, we project out just the pertinent NSEC records # otherwise clone it by projecting them all if self.validation_status == NSEC_STATUS_VALID: covering_names = set() for names in list(self.nsec_names_covering_qname.values()) + list(self.nsec_names_covering_wildcard.values()): covering_names.update(names) self.nsec_set_info = nsec_set_info.project(*list(covering_names)) else: self.nsec_set_info = nsec_set_info.project(*list(nsec_set_info.rrsets)) def __str__(self): return 'NSEC record(s) proving the non-existence (NXDOMAIN) of %s' % (fmt.humanize_name(self.qname)) def serialize(self, rrset_info_serializer=None, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = OrderedDict() nsec_list = [] for nsec_rrset in self.nsec_set_info.rrsets.values(): if rrset_info_serializer is not None: nsec_serialized = rrset_info_serializer(nsec_rrset, consolidate_clients=consolidate_clients, show_servers=False, loglevel=loglevel, html_format=html_format) if nsec_serialized: nsec_list.append(nsec_serialized) elif loglevel <= logging.DEBUG: nsec_list.append(nsec_rrset.serialize(consolidate_clients=consolidate_clients, html_format=html_format)) erroneous_status = self.validation_status != STATUS_VALID show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) or \ (erroneous_status or nsec_list) if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = 'NSEC' if loglevel <= logging.DEBUG: d['description'] = formatter(str(self)) if nsec_list: d['nsec'] = nsec_list if loglevel <= logging.DEBUG: if self.nsec_names_covering_qname: qname, nsec_names = list(self.nsec_names_covering_qname.items())[0] nsec_name = list(nsec_names)[0] nsec_rr = self.nsec_set_info.rrsets[nsec_name].rrset[0] d['sname_covering'] = OrderedDict(( ('covered_name', formatter(lb2s(qname.canonicalize().to_text()))), ('nsec_owner', formatter(lb2s(nsec_name.canonicalize().to_text()))), ('nsec_next', formatter(lb2s(nsec_rr.next.canonicalize().to_text()))) )) if self.nsec_names_covering_wildcard: wildcard, nsec_names = list(self.nsec_names_covering_wildcard.items())[0] nsec_name = list(nsec_names)[0] nsec_rr = self.nsec_set_info.rrsets[nsec_name].rrset[0] d['wildcard_covering'] = OrderedDict(( ('covered_name', formatter(lb2s(wildcard.canonicalize().to_text()))), ('nsec_owner', formatter(lb2s(nsec_name.canonicalize().to_text()))), ('nsec_next', formatter(lb2s(nsec_rr.next.canonicalize().to_text()))) )) if loglevel <= logging.INFO or erroneous_status: d['status'] = nsec_status_mapping[self.validation_status] if loglevel <= logging.INFO: servers = tuple_to_dict(self.nsec_set_info.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.nsec_set_info.servers_clients: for response in self.nsec_set_info.servers_clients[(server, client)]: if response is not None: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d class NSECStatusWildcard(NSECStatusNXDOMAIN): def __init__(self, qname, wildcard_name, rdtype, origin, is_zone, nsec_set_info): self.wildcard_name_from_rrsig = wildcard_name super(NSECStatusWildcard, self).__init__(qname, rdtype, origin, is_zone, nsec_set_info) def __eq__(self, other): return isinstance(other, self.__class__) and \ super(NSECStatusWildcard, self).__eq__(other) and self.wildcard_name_from_rrsig == other.wildcard_name_from_rrsig def __hash__(self): return hash(id(self)) def _next_closest_encloser(self): return dns.name.Name(self.qname.labels[-len(self.wildcard_name):]) def _set_validation_status(self, nsec_set_info): self.validation_status = NSEC_STATUS_VALID if self.nsec_names_covering_qname: next_closest_encloser = self._next_closest_encloser() nsec_covering_next_closest_encloser = nsec_set_info.nsec_covering_name(next_closest_encloser) if not nsec_covering_next_closest_encloser: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.WildcardExpansionInvalid(sname=fmt.humanize_name(self.qname), wildcard=fmt.humanize_name(self.wildcard_name), next_closest_encloser=fmt.humanize_name(next_closest_encloser))) else: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.SnameNotCoveredWildcardAnswer(sname=fmt.humanize_name(self.qname))) if self.nsec_names_covering_wildcard: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.WildcardCoveredAnswerNSEC(wildcard=fmt.humanize_name(self.wildcard_name))) if self.nsec_names_covering_origin: self.validation_status = NSEC_STATUS_INVALID qname, nsec_names = list(self.nsec_names_covering_origin.items())[0] nsec_rrset = nsec_set_info.rrsets[list(nsec_names)[0]].rrset self.errors.append(Errors.LastNSECNextNotZone(nsec_owner=fmt.humanize_name(nsec_rrset.name), next_name=fmt.humanize_name(nsec_rrset[0].next), zone_name=fmt.humanize_name(self.origin))) # if it validation_status, we project out just the pertinent NSEC records # otherwise clone it by projecting them all if self.validation_status == NSEC_STATUS_VALID: covering_names = set() for names in self.nsec_names_covering_qname.values(): covering_names.update(names) self.nsec_set_info = nsec_set_info.project(*list(covering_names)) else: self.nsec_set_info = nsec_set_info.project(*list(nsec_set_info.rrsets)) def serialize(self, rrset_info_serializer=None, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = super(NSECStatusWildcard, self).serialize(rrset_info_serializer, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=map_ip_to_ns_name) try: del d['wildcard'] except KeyError: pass return d class NSECStatusNODATA(NSECStatus): def __init__(self, qname, rdtype, origin, is_zone, nsec_set_info, sname_must_match=False): self.qname = qname self.rdtype = rdtype self.origin = origin self.is_zone = is_zone self.referral = nsec_set_info.referral self.warnings = [] self.errors = [] self.wildcard_name = None try: self.nsec_for_qname = nsec_set_info.rrsets[self.qname] self.has_rdtype = nsec_set_info.rdtype_exists_in_bitmap(self.qname, self.rdtype) self.has_ns = nsec_set_info.rdtype_exists_in_bitmap(self.qname, dns.rdatatype.NS) self.has_ds = nsec_set_info.rdtype_exists_in_bitmap(self.qname, dns.rdatatype.DS) self.has_soa = nsec_set_info.rdtype_exists_in_bitmap(self.qname, dns.rdatatype.SOA) except KeyError: self.nsec_for_qname = None self.has_rdtype = False self.has_ns = False self.has_ds = False self.has_soa = False if not sname_must_match: # If no NSEC exists for the name itself, then look for an NSEC with # an (empty non-terminal) ancestor for nsec_name in nsec_set_info.rrsets: next_name = nsec_set_info.rrsets[nsec_name].rrset[0].next if next_name.is_subdomain(self.qname) and next_name != self.qname: self.nsec_for_qname = nsec_set_info.rrsets[nsec_name] break self.nsec_names_covering_qname = {} covering_names = nsec_set_info.nsec_covering_name(self.qname) if covering_names: self.nsec_names_covering_qname[self.qname] = covering_names covering_name = list(covering_names)[0] self.wildcard_name = self._get_wildcard(qname, nsec_set_info.rrsets[covering_name].rrset) self.nsec_for_wildcard_name = None self.wildcard_has_rdtype = None if self.wildcard_name is not None: try: self.nsec_for_wildcard_name = nsec_set_info.rrsets[self.wildcard_name] self.wildcard_has_rdtype = nsec_set_info.rdtype_exists_in_bitmap(self.wildcard_name, self.rdtype) self.wildcard_has_ns = nsec_set_info.rdtype_exists_in_bitmap(self.wildcard_name, dns.rdatatype.NS) self.wildcard_has_ds = nsec_set_info.rdtype_exists_in_bitmap(self.wildcard_name, dns.rdatatype.DS) self.wildcard_has_soa = nsec_set_info.rdtype_exists_in_bitmap(self.wildcard_name, dns.rdatatype.SOA) except KeyError: self.wildcard_has_rdtype = False self.wildcard_has_ns = False self.wildcard_has_ds = False self.wildcard_has_soa = False # check for covering of the origin self.nsec_names_covering_origin = {} covering_names = nsec_set_info.nsec_covering_name(self.origin) if covering_names: self.nsec_names_covering_origin[self.origin] = covering_names self.opt_out = None self._set_validation_status(nsec_set_info) def __str__(self): return 'NSEC record(s) proving non-existence (NODATA) of %s/%s' % (fmt.humanize_name(self.qname), dns.rdatatype.to_text(self.rdtype)) def __eq__(self, other): return isinstance(other, self.__class__) and \ self.qname == other.qname and self.rdtype == other.rdtype and self.origin == other.origin and self.referral == other.referral and self.nsec_set_info == other.nsec_set_info def __hash__(self): return hash(id(self)) def _set_validation_status(self, nsec_set_info): self.validation_status = NSEC_STATUS_VALID if self.nsec_for_qname is not None: # RFC 4034 5.2, 6840 4.4 if self.rdtype == dns.rdatatype.DS or self.referral: if self.is_zone and not self.has_ns: self.errors.append(Errors.ReferralWithoutNSBitNSEC(sname=fmt.humanize_name(self.qname))) self.validation_status = NSEC_STATUS_INVALID if self.has_ds: self.errors.append(Errors.ReferralWithDSBitNSEC(sname=fmt.humanize_name(self.qname))) self.validation_status = NSEC_STATUS_INVALID if self.has_soa: self.errors.append(Errors.ReferralWithSOABitNSEC(sname=fmt.humanize_name(self.qname))) self.validation_status = NSEC_STATUS_INVALID else: if self.has_rdtype: self.errors.append(Errors.StypeInBitmapNODATANSEC(sname=fmt.humanize_name(self.qname), stype=dns.rdatatype.to_text(self.rdtype))) self.validation_status = NSEC_STATUS_INVALID if self.nsec_names_covering_qname: self.errors.append(Errors.SnameCoveredNODATANSEC(sname=fmt.humanize_name(self.qname))) self.validation_status = NSEC_STATUS_INVALID elif self.nsec_for_wildcard_name: # implies wildcard_name, which implies nsec_names_covering_qname if self.rdtype == dns.rdatatype.DS or self.referral: if self.is_zone and not self.wildcard_has_ns: self.errors.append(Errors.ReferralWithoutNSBitNSEC(sname=fmt.humanize_name(self.wildcard_name))) self.validation_status = NSEC_STATUS_INVALID if self.wildcard_has_ds: self.errors.append(Errors.ReferralWithDSBitNSEC(sname=fmt.humanize_name(self.wildcard_name))) self.validation_status = NSEC_STATUS_INVALID if self.wildcard_has_soa: self.errors.append(Errors.ReferralWithSOABitNSEC(sname=fmt.humanize_name(self.wildcard_name))) self.validation_status = NSEC_STATUS_INVALID else: if self.has_rdtype: self.errors.append(Errors.StypeInBitmapNODATANSEC(sname=fmt.humanize_name(self.qname), stype=dns.rdatatype.to_text(self.rdtype))) self.validation_status = NSEC_STATUS_INVALID if self.wildcard_has_rdtype: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.StypeInBitmapNODATANSEC(sname=fmt.humanize_name(self.wildcard_name), stype=dns.rdatatype.to_text(self.rdtype))) if self.nsec_names_covering_origin: self.validation_status = NSEC_STATUS_INVALID qname, nsec_names = list(self.nsec_names_covering_origin.items())[0] nsec_rrset = nsec_set_info.rrsets[list(nsec_names)[0]].rrset self.errors.append(Errors.LastNSECNextNotZone(nsec_owner=fmt.humanize_name(nsec_rrset.name), next_name=fmt.humanize_name(nsec_rrset[0].next), zone_name=fmt.humanize_name(self.origin))) else: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.NoNSECMatchingSnameNODATA(sname=fmt.humanize_name(self.qname))) # if it validation_status, we project out just the pertinent NSEC records # otherwise clone it by projecting them all if self.validation_status == NSEC_STATUS_VALID: covering_names = set() if self.nsec_for_qname is not None: covering_names.add(self.nsec_for_qname.rrset.name) if self.nsec_names_covering_qname: for names in self.nsec_names_covering_qname.values(): covering_names.update(names) if self.nsec_for_wildcard_name is not None: covering_names.add(self.wildcard_name) self.nsec_set_info = nsec_set_info.project(*list(covering_names)) else: self.nsec_set_info = nsec_set_info.project(*list(nsec_set_info.rrsets)) def serialize(self, rrset_info_serializer=None, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = OrderedDict() nsec_list = [] for nsec_rrset in self.nsec_set_info.rrsets.values(): if rrset_info_serializer is not None: nsec_serialized = rrset_info_serializer(nsec_rrset, consolidate_clients=consolidate_clients, show_servers=False, loglevel=loglevel, html_format=html_format) if nsec_serialized: nsec_list.append(nsec_serialized) elif loglevel <= logging.DEBUG: nsec_list.append(nsec_rrset.serialize(consolidate_clients=consolidate_clients, html_format=html_format)) erroneous_status = self.validation_status != STATUS_VALID show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) or \ (erroneous_status or nsec_list) if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = 'NSEC' if loglevel <= logging.DEBUG: d['description'] = formatter(str(self)) if nsec_list: d['nsec'] = nsec_list if loglevel <= logging.DEBUG: if self.nsec_for_qname is not None: d['sname_nsec_match'] = formatter(lb2s(self.nsec_for_qname.rrset.name.canonicalize().to_text())) if self.nsec_names_covering_qname: qname, nsec_names = list(self.nsec_names_covering_qname.items())[0] nsec_name = list(nsec_names)[0] nsec_rr = self.nsec_set_info.rrsets[nsec_name].rrset[0] d['sname_covering'] = OrderedDict(( ('covered_name', formatter(lb2s(qname.canonicalize().to_text()))), ('nsec_owner', formatter(lb2s(nsec_name.canonicalize().to_text()))), ('nsec_next', formatter(lb2s(nsec_rr.next.canonicalize().to_text()))) )) if self.nsec_for_wildcard_name is not None: d['wildcard_nsec_match'] = formatter(lb2s(self.wildcard_name.canonicalize().to_text())) if loglevel <= logging.INFO or erroneous_status: d['status'] = nsec_status_mapping[self.validation_status] if loglevel <= logging.INFO: servers = tuple_to_dict(self.nsec_set_info.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.nsec_set_info.servers_clients: for response in self.nsec_set_info.servers_clients[(server, client)]: if response is not None: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d class NSEC3Status(object): def __repr__(self): return '<%s: "%s">' % (self.__class__.__name__, self.qname) def _get_next_closest_encloser(self, encloser): return dns.name.Name(self.qname.labels[-(len(encloser)+1):]) def get_next_closest_encloser(self): if self.closest_encloser: encloser_name, nsec_names = list(self.closest_encloser.items())[0] return self._get_next_closest_encloser(encloser_name) return None def _get_wildcard(self, encloser): return dns.name.from_text('*', encloser) def get_wildcard(self): if self.closest_encloser: encloser_name, nsec_names = list(self.closest_encloser.items())[0] return self._get_wildcard(encloser_name) return None class NSEC3StatusNXDOMAIN(NSEC3Status): def __init__(self, qname, rdtype, origin, is_zone, nsec_set_info, ignore_rfc9276): self.qname = qname self.origin = origin self.is_zone = is_zone self.warnings = [] self.errors = [] self.name_digest_map = {} self._set_closest_encloser(nsec_set_info) self.nsec_names_covering_qname = {} self.nsec_names_covering_wildcard = {} self.opt_out = None self.iterations = 0 self.salt = None for (salt, alg, iterations), nsec3_names in nsec_set_info.nsec3_params.items(): if iterations > self.iterations: self.iterations = iterations if salt: self.salt = salt digest_name = nsec_set_info.get_digest_name_for_nsec3(self.qname, self.origin, salt, alg, iterations) if self.qname not in self.name_digest_map: self.name_digest_map[self.qname] = {} self.name_digest_map[self.qname][(salt, alg, iterations)] = digest_name for encloser in self.closest_encloser: next_closest_encloser = self._get_next_closest_encloser(encloser) for salt, alg, iterations in nsec_set_info.nsec3_params: try: digest_name = self.name_digest_map[next_closest_encloser][(salt, alg, iterations)] except KeyError: digest_name = nsec_set_info.get_digest_name_for_nsec3(next_closest_encloser, self.origin, salt, alg, iterations) if digest_name is not None: covering_names = nsec_set_info.nsec3_covering_name(digest_name, salt, alg, iterations) if covering_names: self.nsec_names_covering_qname[digest_name] = covering_names self.opt_out = False for nsec_name in covering_names: if nsec_set_info.rrsets[nsec_name].rrset[0].flags & 0x01: self.opt_out = True if next_closest_encloser not in self.name_digest_map: self.name_digest_map[next_closest_encloser] = {} self.name_digest_map[next_closest_encloser][(salt, alg, iterations)] = digest_name wildcard_name = self._get_wildcard(encloser) digest_name = nsec_set_info.get_digest_name_for_nsec3(wildcard_name, self.origin, salt, alg, iterations) if digest_name is not None: covering_names = nsec_set_info.nsec3_covering_name(digest_name, salt, alg, iterations) if covering_names: self.nsec_names_covering_wildcard[digest_name] = covering_names if wildcard_name not in self.name_digest_map: self.name_digest_map[wildcard_name] = {} self.name_digest_map[wildcard_name][(salt, alg, iterations)] = digest_name self._set_validation_status(nsec_set_info, ignore_rfc9276) def __str__(self): return 'NSEC3 record(s) proving the non-existence (NXDOMAIN) of %s' % (fmt.humanize_name(self.qname)) def __eq__(self, other): return isinstance(other, self.__class__) and \ self.qname == other.qname and self.origin == other.origin and self.nsec_set_info == other.nsec_set_info def __hash__(self): return hash(id(self)) def _set_closest_encloser(self, nsec_set_info): self.closest_encloser = nsec_set_info.get_closest_encloser(self.qname, self.origin) def _set_validation_status(self, nsec_set_info, ignore_rfc9276): self.validation_status = NSEC_STATUS_VALID valid_algs, invalid_algs = nsec_set_info.get_algorithm_support() if invalid_algs: invalid_alg_err = Errors.UnsupportedNSEC3Algorithm(algorithm=list(invalid_algs)[0]) else: invalid_alg_err = None if not ignore_rfc9276: if self.iterations > 0: self.errors.append(Errors.NonZeroNSEC3IterationCount()) if self.salt is not None: self.warnings.append(Errors.NonEmptyNSEC3Salt()) if not self.closest_encloser: self.validation_status = NSEC_STATUS_INVALID if valid_algs: self.errors.append(Errors.NoClosestEncloserNameError(sname=fmt.humanize_name(self.qname))) if invalid_algs: self.errors.append(invalid_alg_err) else: if not self.nsec_names_covering_qname: self.validation_status = NSEC_STATUS_INVALID if valid_algs: next_closest_encloser = self.get_next_closest_encloser() self.errors.append(Errors.NextClosestEncloserNotCoveredNameError(next_closest_encloser=fmt.humanize_name(next_closest_encloser))) if invalid_algs: self.errors.append(invalid_alg_err) if not self.nsec_names_covering_wildcard: self.validation_status = NSEC_STATUS_INVALID if valid_algs: wildcard_name = self.get_wildcard() self.errors.append(Errors.WildcardNotCoveredNSEC3(wildcard=fmt.humanize_name(wildcard_name))) if invalid_algs and invalid_alg_err not in self.errors: self.errors.append(invalid_alg_err) # if it validation_status, we project out just the pertinent NSEC records # otherwise clone it by projecting them all if self.validation_status == NSEC_STATUS_VALID: covering_names = set() for names in list(self.closest_encloser.values()) + list(self.nsec_names_covering_qname.values()) + list(self.nsec_names_covering_wildcard.values()): covering_names.update(names) self.nsec_set_info = nsec_set_info.project(*list(covering_names)) else: self.nsec_set_info = nsec_set_info.project(*list(nsec_set_info.rrsets)) # Report errors with NSEC3 owner names for name in self.nsec_set_info.invalid_nsec3_owner: self.errors.append(Errors.InvalidNSEC3OwnerName(name=fmt.format_nsec3_name(name))) for name in self.nsec_set_info.invalid_nsec3_hash: self.errors.append(Errors.InvalidNSEC3Hash(name=fmt.format_nsec3_name(name), nsec3_hash=lb2s(base32.b32encode(self.nsec_set_info.rrsets[name].rrset[0].next)))) def serialize(self, rrset_info_serializer=None, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = OrderedDict() nsec3_list = [] for nsec_rrset in self.nsec_set_info.rrsets.values(): if rrset_info_serializer is not None: nsec_serialized = rrset_info_serializer(nsec_rrset, consolidate_clients=consolidate_clients, show_servers=False, loglevel=loglevel, html_format=html_format) if nsec_serialized: nsec3_list.append(nsec_serialized) elif loglevel <= logging.DEBUG: nsec3_list.append(nsec_rrset.serialize(consolidate_clients=consolidate_clients, html_format=html_format)) erroneous_status = self.validation_status != STATUS_VALID show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) or \ (erroneous_status or nsec3_list) if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = 'NSEC3' if loglevel <= logging.DEBUG: d['description'] = formatter(str(self)) if nsec3_list: d['nsec3'] = nsec3_list if loglevel <= logging.DEBUG: if self.opt_out is not None: d['opt_out'] = self.opt_out if self.closest_encloser: encloser_name, nsec_names = list(self.closest_encloser.items())[0] nsec_name = list(nsec_names)[0] d['closest_encloser'] = formatter(lb2s(encloser_name.canonicalize().to_text())) # could be inferred from wildcard if nsec_name is not None: d['closest_encloser_hash'] = formatter(fmt.format_nsec3_name(nsec_name)) next_closest_encloser = self._get_next_closest_encloser(encloser_name) d['next_closest_encloser'] = formatter(lb2s(next_closest_encloser.canonicalize().to_text())) digest_name = list(self.name_digest_map[next_closest_encloser].items())[0][1] if digest_name is not None: d['next_closest_encloser_hash'] = formatter(fmt.format_nsec3_name(digest_name)) else: d['next_closest_encloser_hash'] = None if self.nsec_names_covering_qname: qname, nsec_names = list(self.nsec_names_covering_qname.items())[0] nsec_name = list(nsec_names)[0] next_name = self.nsec_set_info.name_for_nsec3_next(nsec_name) d['next_closest_encloser_covering'] = OrderedDict(( ('covered_name', formatter(fmt.format_nsec3_name(qname))), ('nsec_owner', formatter(fmt.format_nsec3_name(nsec_name))), ('nsec_next', formatter(fmt.format_nsec3_name(next_name))), )) wildcard_name = self._get_wildcard(encloser_name) wildcard_digest = list(self.name_digest_map[wildcard_name].items())[0][1] d['wildcard'] = formatter(lb2s(wildcard_name.canonicalize().to_text())) if wildcard_digest is not None: d['wildcard_hash'] = formatter(fmt.format_nsec3_name(wildcard_digest)) else: d['wildcard_hash'] = None if self.nsec_names_covering_wildcard: wildcard, nsec_names = list(self.nsec_names_covering_wildcard.items())[0] nsec_name = list(nsec_names)[0] next_name = self.nsec_set_info.name_for_nsec3_next(nsec_name) d['wildcard_covering'] = OrderedDict(( ('covered_name', formatter(fmt.format_nsec3_name(wildcard))), ('nsec3_owner', formatter(fmt.format_nsec3_name(nsec_name))), ('nsec3_next', formatter(fmt.format_nsec3_name(next_name))), )) else: digest_name = list(self.name_digest_map[self.qname].items())[0][1] if digest_name is not None: d['sname_hash'] = formatter(fmt.format_nsec3_name(digest_name)) else: d['sname_hash'] = None if loglevel <= logging.INFO or erroneous_status: d['status'] = nsec_status_mapping[self.validation_status] if loglevel <= logging.INFO: servers = tuple_to_dict(self.nsec_set_info.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.nsec_set_info.servers_clients: for response in self.nsec_set_info.servers_clients[(server, client)]: if response is not None: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d class NSEC3StatusWildcard(NSEC3StatusNXDOMAIN): def __init__(self, qname, wildcard_name, rdtype, origin, is_zone, nsec_set_info, ignore_rfc9276): self.wildcard_name = wildcard_name super(NSEC3StatusWildcard, self).__init__(qname, rdtype, origin, is_zone, nsec_set_info, ignore_rfc9276) def _set_closest_encloser(self, nsec_set_info): super(NSEC3StatusWildcard, self)._set_closest_encloser(nsec_set_info) if not self.closest_encloser: self.closest_encloser = { self.wildcard_name.parent(): set([None]) } # fill in a dummy value for wildcard_name_digest_map self.name_digest_map[self.wildcard_name] = { None: self.wildcard_name } def __eq__(self, other): return isinstance(other, self.__class__) and \ super(NSEC3StatusWildcard, self).__eq__(other) and self.wildcard_name == other.wildcard_name def __hash__(self): return hash(id(self)) def _set_validation_status(self, nsec_set_info, ignore_rfc9276): self.validation_status = NSEC_STATUS_VALID if not ignore_rfc9276: if self.iterations > 0: self.errors.append(Errors.NonZeroNSEC3IterationCount()) if self.salt is not None: self.warnings.append(Errors.NonEmptyNSEC3Salt()) if not self.nsec_names_covering_qname: self.validation_status = NSEC_STATUS_INVALID valid_algs, invalid_algs = nsec_set_info.get_algorithm_support() if invalid_algs: invalid_alg_err = Errors.UnsupportedNSEC3Algorithm(algorithm=list(invalid_algs)[0]) else: invalid_alg_err = None if valid_algs: next_closest_encloser = self.get_next_closest_encloser() self.errors.append(Errors.NextClosestEncloserNotCoveredWildcardAnswer(next_closest_encloser=fmt.humanize_name(next_closest_encloser))) if invalid_algs: self.errors.append(invalid_alg_err) if self.nsec_names_covering_wildcard: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.WildcardCoveredAnswerNSEC3(wildcard=fmt.humanize_name(self.wildcard_name))) # if it validation_status, we project out just the pertinent NSEC records # otherwise clone it by projecting them all if self.validation_status == NSEC_STATUS_VALID: covering_names = set() for names in list(self.closest_encloser.values()) + list(self.nsec_names_covering_qname.values()): covering_names.update(names) self.nsec_set_info = nsec_set_info.project(*[x for x in covering_names if x is not None]) else: self.nsec_set_info = nsec_set_info.project(*list(nsec_set_info.rrsets)) # Report errors with NSEC3 owner names for name in self.nsec_set_info.invalid_nsec3_owner: self.errors.append(Errors.InvalidNSEC3OwnerName(name=fmt.format_nsec3_name(name))) for name in self.nsec_set_info.invalid_nsec3_hash: self.errors.append(Errors.InvalidNSEC3Hash(name=fmt.format_nsec3_name(name), nsec3_hash=lb2s(base32.b32encode(self.nsec_set_info.rrsets[name].rrset[0].next)))) def serialize(self, rrset_info_serializer=None, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = super(NSEC3StatusWildcard, self).serialize(rrset_info_serializer, consolidate_clients=consolidate_clients, loglevel=loglevel, html_format=html_format, map_ip_to_ns_name=map_ip_to_ns_name) try: del d['wildcard'] except KeyError: pass try: del d['wildcard_digest'] except KeyError: pass if loglevel <= logging.DEBUG: if [x for x in list(self.closest_encloser.values())[0] if x is not None]: d['superfluous_closest_encloser'] = True return d class NSEC3StatusNODATA(NSEC3Status): def __init__(self, qname, rdtype, origin, is_zone, nsec_set_info, ignore_rfc9276): self.qname = qname self.rdtype = rdtype self.origin = origin self.is_zone = is_zone self.referral = nsec_set_info.referral self.wildcard_name = None self.warnings = [] self.errors = [] self.name_digest_map = {} self.closest_encloser = nsec_set_info.get_closest_encloser(qname, origin) self.nsec_names_covering_qname = {} self.nsec_names_covering_wildcard = {} self.nsec_for_qname = set() self.nsec_for_wildcard_name = set() self.has_rdtype = False self.has_cname = False self.has_ns = False self.has_ds = False self.has_soa = False self.opt_out = None self.wildcard_has_rdtype = False self.wildcard_has_cname = False self.iterations = 0 self.salt = None for (salt, alg, iterations), nsec3_names in nsec_set_info.nsec3_params.items(): if iterations > self.iterations: self.iterations = iterations if salt: self.salt = salt digest_name = nsec_set_info.get_digest_name_for_nsec3(self.qname, self.origin, salt, alg, iterations) if self.qname not in self.name_digest_map: self.name_digest_map[self.qname] = {} self.name_digest_map[self.qname][(salt, alg, iterations)] = digest_name for encloser in self.closest_encloser: wildcard_name = self._get_wildcard(encloser) digest_name = nsec_set_info.get_digest_name_for_nsec3(wildcard_name, self.origin, salt, alg, iterations) if digest_name in nsec3_names: self.nsec_for_wildcard_name.add(digest_name) if nsec_set_info.rdtype_exists_in_bitmap(digest_name, rdtype): self.wildcard_has_rdtype = True if nsec_set_info.rdtype_exists_in_bitmap(digest_name, dns.rdatatype.CNAME): self.wildcard_has_cname = True if wildcard_name not in self.name_digest_map: self.name_digest_map[wildcard_name] = {} self.name_digest_map[wildcard_name][(salt, alg, iterations)] = digest_name for (salt, alg, iterations), nsec3_names in nsec_set_info.nsec3_params.items(): digest_name = self.name_digest_map[self.qname][(salt, alg, iterations)] if digest_name in nsec3_names: self.nsec_for_qname.add(digest_name) if nsec_set_info.rdtype_exists_in_bitmap(digest_name, rdtype): self.has_rdtype = True if nsec_set_info.rdtype_exists_in_bitmap(digest_name, dns.rdatatype.CNAME): self.has_cname = True if nsec_set_info.rdtype_exists_in_bitmap(digest_name, dns.rdatatype.NS): self.has_ns = True if nsec_set_info.rdtype_exists_in_bitmap(digest_name, dns.rdatatype.DS): self.has_ds = True if nsec_set_info.rdtype_exists_in_bitmap(digest_name, dns.rdatatype.SOA): self.has_soa = True else: for encloser in self.closest_encloser: next_closest_encloser = self._get_next_closest_encloser(encloser) digest_name = nsec_set_info.get_digest_name_for_nsec3(next_closest_encloser, self.origin, salt, alg, iterations) if next_closest_encloser not in self.name_digest_map: self.name_digest_map[next_closest_encloser] = {} self.name_digest_map[next_closest_encloser][(salt, alg, iterations)] = digest_name if digest_name is not None: covering_names = nsec_set_info.nsec3_covering_name(digest_name, salt, alg, iterations) if covering_names: self.nsec_names_covering_qname[digest_name] = covering_names self.opt_out = False for nsec_name in covering_names: if nsec_set_info.rrsets[nsec_name].rrset[0].flags & 0x01: self.opt_out = True self._set_validation_status(nsec_set_info, ignore_rfc9276) def __str__(self): return 'NSEC3 record(s) proving non-existence (NODATA) of %s/%s' % (fmt.humanize_name(self.qname), dns.rdatatype.to_text(self.rdtype)) def __eq__(self, other): return isinstance(other, self.__class__) and \ self.qname == other.qname and self.rdtype == other.rdtype and self.origin == other.origin and self.referral == other.referral and self.nsec_set_info == other.nsec_set_info def __hash__(self): return hash(id(self)) def _set_validation_status(self, nsec_set_info, ignore_rfc9276): self.validation_status = NSEC_STATUS_VALID valid_algs, invalid_algs = nsec_set_info.get_algorithm_support() if invalid_algs: invalid_alg_err = Errors.UnsupportedNSEC3Algorithm(algorithm=list(invalid_algs)[0]) else: invalid_alg_err = None if not ignore_rfc9276: if self.iterations > 0: self.errors.append(Errors.NonZeroNSEC3IterationCount()) if self.salt is not None: self.warnings.append(Errors.NonEmptyNSEC3Salt()) if self.nsec_for_qname: # RFC 4035 5.2, 6840 4.4 if self.rdtype == dns.rdatatype.DS or self.referral: if self.is_zone and not self.has_ns: self.errors.append(Errors.ReferralWithoutNSBitNSEC3(sname=fmt.humanize_name(self.qname))) self.validation_status = NSEC_STATUS_INVALID if self.has_ds: self.errors.append(Errors.ReferralWithDSBitNSEC3(sname=fmt.humanize_name(self.qname))) self.validation_status = NSEC_STATUS_INVALID if self.has_soa: self.errors.append(Errors.ReferralWithSOABitNSEC3(sname=fmt.humanize_name(self.qname))) self.validation_status = NSEC_STATUS_INVALID # RFC 5155, section 8.5, 8.6 else: if self.has_rdtype: self.errors.append(Errors.StypeInBitmapNODATANSEC3(sname=fmt.humanize_name(self.qname), stype=dns.rdatatype.to_text(self.rdtype))) self.validation_status = NSEC_STATUS_INVALID if self.has_cname: self.errors.append(Errors.StypeInBitmapNODATANSEC3(sname=fmt.humanize_name(self.qname), stype=dns.rdatatype.to_text(dns.rdatatype.CNAME))) self.validation_status = NSEC_STATUS_INVALID elif self.nsec_for_wildcard_name: if not self.nsec_names_covering_qname: self.validation_status = NSEC_STATUS_INVALID if valid_algs: self.errors.append(Errors.NextClosestEncloserNotCoveredWildcardNODATA(next_closest_encloser=fmt.humanize_name(next_closest_encloser))) if invalid_algs: self.errors.append(invalid_alg_err) if self.wildcard_has_rdtype: self.validation_status = NSEC_STATUS_INVALID self.errors.append(Errors.StypeInBitmapWildcardNODATANSEC3(sname=fmt.humanize_name(self.get_wildcard()), stype=dns.rdatatype.to_text(self.rdtype))) elif self.nsec_names_covering_qname: if not self.opt_out: self.validation_status = NSEC_STATUS_INVALID if valid_algs: if self.rdtype == dns.rdatatype.DS: cls = Errors.OptOutFlagNotSetNODATADS else: cls = Errors.OptOutFlagNotSetNODATA next_closest_encloser = self.get_next_closest_encloser() self.errors.append(cls(next_closest_encloser=fmt.humanize_name(next_closest_encloser))) if invalid_algs: self.errors.append(invalid_alg_err) else: self.validation_status = NSEC_STATUS_INVALID if valid_algs: if self.rdtype == dns.rdatatype.DS: cls = Errors.NoNSEC3MatchingSnameDSNODATA else: cls = Errors.NoNSEC3MatchingSnameNODATA self.errors.append(cls(sname=fmt.humanize_name(self.qname))) if invalid_algs: self.errors.append(invalid_alg_err) # if it validation_status, we project out just the pertinent NSEC records # otherwise clone it by projecting them all if self.validation_status == NSEC_STATUS_VALID: covering_names = set() for names in self.closest_encloser.values(): covering_names.update(names) if self.nsec_for_qname: covering_names.update(self.nsec_for_qname) else: for names in self.nsec_names_covering_qname.values(): covering_names.update(names) if self.nsec_for_wildcard_name is not None: covering_names.update(self.nsec_for_wildcard_name) self.nsec_set_info = nsec_set_info.project(*list(covering_names)) else: self.nsec_set_info = nsec_set_info.project(*list(nsec_set_info.rrsets)) # Report errors with NSEC3 owner names for name in self.nsec_set_info.invalid_nsec3_owner: self.errors.append(Errors.InvalidNSEC3OwnerName(name=fmt.format_nsec3_name(name))) for name in self.nsec_set_info.invalid_nsec3_hash: self.errors.append(Errors.InvalidNSEC3Hash(name=fmt.format_nsec3_name(name), nsec3_hash=lb2s(base32.b32encode(self.nsec_set_info.rrsets[name].rrset[0].next)))) def serialize(self, rrset_info_serializer=None, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = OrderedDict() nsec3_list = [] for nsec_rrset in self.nsec_set_info.rrsets.values(): if rrset_info_serializer is not None: nsec_serialized = rrset_info_serializer(nsec_rrset, consolidate_clients=consolidate_clients, show_servers=False, loglevel=loglevel, html_format=html_format) if nsec_serialized: nsec3_list.append(nsec_serialized) elif loglevel <= logging.DEBUG: nsec3_list.append(nsec_rrset.serialize(consolidate_clients=consolidate_clients, html_format=html_format)) erroneous_status = self.validation_status != STATUS_VALID show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) or \ (erroneous_status or nsec3_list) if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = 'NSEC3' if loglevel <= logging.DEBUG: d['description'] = formatter(str(self)) if nsec3_list: d['nsec3'] = nsec3_list if loglevel <= logging.DEBUG: if self.opt_out is not None: d['opt_out'] = self.opt_out if self.nsec_for_qname: digest_name = list(self.name_digest_map[self.qname].items())[0][1] if digest_name is not None: d['sname_hash'] = formatter(fmt.format_nsec3_name(digest_name)) else: d['sname_hash'] = None d['sname_nsec_match'] = formatter(fmt.format_nsec3_name(list(self.nsec_for_qname)[0])) if self.closest_encloser: encloser_name, nsec_names = list(self.closest_encloser.items())[0] nsec_name = list(nsec_names)[0] d['closest_encloser'] = formatter(lb2s(encloser_name.canonicalize().to_text())) d['closest_encloser_digest'] = formatter(fmt.format_nsec3_name(nsec_name)) next_closest_encloser = self._get_next_closest_encloser(encloser_name) d['next_closest_encloser'] = formatter(lb2s(next_closest_encloser.canonicalize().to_text())) digest_name = list(self.name_digest_map[next_closest_encloser].items())[0][1] if digest_name is not None: d['next_closest_encloser_hash'] = formatter(fmt.format_nsec3_name(digest_name)) else: d['next_closest_encloser_hash'] = None if self.nsec_names_covering_qname: qname, nsec_names = list(self.nsec_names_covering_qname.items())[0] nsec_name = list(nsec_names)[0] next_name = self.nsec_set_info.name_for_nsec3_next(nsec_name) d['next_closest_encloser_covering'] = OrderedDict(( ('covered_name', formatter(fmt.format_nsec3_name(qname))), ('nsec3_owner', formatter(fmt.format_nsec3_name(nsec_name))), ('nsec3_next', formatter(fmt.format_nsec3_name(next_name))), )) wildcard_name = self._get_wildcard(encloser_name) wildcard_digest = list(self.name_digest_map[wildcard_name].items())[0][1] d['wildcard'] = formatter(lb2s(wildcard_name.canonicalize().to_text())) if wildcard_digest is not None: d['wildcard_hash'] = formatter(fmt.format_nsec3_name(wildcard_digest)) else: d['wildcard_hash'] = None if self.nsec_for_wildcard_name: d['wildcard_nsec_match'] = formatter(fmt.format_nsec3_name(list(self.nsec_for_wildcard_name)[0])) if not self.nsec_for_qname and not self.closest_encloser: digest_name = list(self.name_digest_map[self.qname].items())[0][1] if digest_name is not None: d['sname_hash'] = formatter(fmt.format_nsec3_name(digest_name)) else: d['sname_hash'] = None if loglevel <= logging.INFO or erroneous_status: d['status'] = nsec_status_mapping[self.validation_status] if loglevel <= logging.INFO: servers = tuple_to_dict(self.nsec_set_info.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.nsec_set_info.servers_clients: for response in self.nsec_set_info.servers_clients[(server, client)]: if response is not None: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d class CNAMEFromDNAMEStatus(object): def __init__(self, synthesized_cname, included_cname): self.synthesized_cname = synthesized_cname self.included_cname = included_cname self.warnings = [] self.errors = [] if self.included_cname is None: self.validation_status = DNAME_STATUS_INVALID self.errors.append(Errors.DNAMENoCNAME()) else: self.validation_status = DNAME_STATUS_VALID if self.included_cname.rrset[0].target != self.synthesized_cname.rrset[0].target: self.errors.append(Errors.DNAMETargetMismatch(included_target=fmt.humanize_name(self.included_cname.rrset[0].target), synthesized_target=fmt.humanize_name(self.synthesized_cname.rrset[0].target))) self.validation_status = DNAME_STATUS_INVALID_TARGET if self.included_cname.rrset.ttl != self.synthesized_cname.rrset.ttl: if self.included_cname.rrset.ttl == 0: self.warnings.append(Errors.DNAMETTLZero()) else: self.warnings.append(Errors.DNAMETTLMismatch(cname_ttl=self.included_cname.rrset.ttl, dname_ttl=self.synthesized_cname.rrset.ttl)) def __str__(self): return 'CNAME synthesis for %s from %s/%s' % (fmt.humanize_name(self.synthesized_cname.rrset.name), fmt.humanize_name(self.synthesized_cname.dname_info.rrset.name), dns.rdatatype.to_text(self.synthesized_cname.dname_info.rrset.rdtype)) def serialize(self, rrset_info_serializer=None, consolidate_clients=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): values = [] d = OrderedDict() dname_serialized = None if rrset_info_serializer is not None: dname_serialized = rrset_info_serializer(self.synthesized_cname.dname_info, consolidate_clients=consolidate_clients, show_servers=False, loglevel=loglevel, html_format=html_format) elif loglevel <= logging.DEBUG: dname_serialized = self.synthesized_cname.dname_info.serialize(consolidate_clients=consolidate_clients, html_format=html_format) erroneous_status = self.validation_status != STATUS_VALID show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) or \ (erroneous_status or dname_serialized) if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = lb2s(self.synthesized_cname.dname_info.rrset.name.canonicalize().to_text()) if loglevel <= logging.DEBUG: d['description'] = formatter(str(self)) if dname_serialized: d['dname'] = dname_serialized if loglevel <= logging.DEBUG: if self.included_cname is not None: d['cname_owner'] = formatter(lb2s(self.included_cname.rrset.name.canonicalize().to_text())) d['cname_target'] = formatter(lb2s(self.included_cname.rrset[0].target.canonicalize().to_text())) if loglevel <= logging.INFO or erroneous_status: d['status'] = dname_status_mapping[self.validation_status] if loglevel <= logging.INFO: servers = tuple_to_dict(self.synthesized_cname.dname_info.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.synthesized_cname.dname_info.servers_clients: for response in self.synthesized_cname.dname_info.servers_clients[(server, client)]: if response is not None: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/base32.py0000644000175000017500000001217015001472663015114 0ustar00caseycasey# # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved # # PSF license: https://docs.python.org/2/license.html # # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # -------------------------------------------- # # 1. This LICENSE AGREEMENT is between the Python Software Foundation # ("PSF"), and the Individual or Organization ("Licensee") accessing and # otherwise using this software ("Python") in source or binary form and # its associated documentation. # # 2. Subject to the terms and conditions of this License Agreement, PSF hereby # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, # analyze, test, perform and/or display publicly, prepare derivative works, # distribute, and otherwise use Python alone or in any derivative version, # provided, however, that PSF's License Agreement and PSF's notice of copyright, # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" # are retained in Python alone or in any derivative version prepared by Licensee. # # 3. In the event Licensee prepares a derivative work that is based on # or incorporates Python or any part thereof, and wants to make # the derivative work available to others as provided herein, then # Licensee hereby agrees to include in any such work a brief summary of # the changes made to Python. # # 4. PSF is making Python available to Licensee on an "AS IS" # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT # INFRINGE ANY THIRD PARTY RIGHTS. # # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. # # 6. This License Agreement will automatically terminate upon a material # breach of its terms and conditions. # # 7. Nothing in this License Agreement shall be deemed to create any # relationship of agency, partnership, or joint venture between PSF and # Licensee. This License Agreement does not grant permission to use PSF # trademarks or trade name in a trademark sense to endorse or promote # products or services of Licensee, or any third party. # # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. # # # The contents of this module are derived the base64 module of python 2.7, with # the value of _b32tab modified to use the Base 32 Encoding with Extended Hex # Alphabet, as specified in RFC 4648. Also, bytes literals are prefixed with # 'b'. from __future__ import unicode_literals import struct _b32tab = { 0: b'0', 1: b'1', 2: b'2', 3: b'3', 4: b'4', 5: b'5', 6: b'6', 7: b'7', 8: b'8', 9: b'9', 10: b'A', 11: b'B', 12: b'C', 13: b'D', 14: b'E', 15: b'F', 16: b'G', 17: b'H', 18: b'I', 19: b'J', 20: b'K', 21: b'L', 22: b'M', 23: b'N', 24: b'O', 25: b'P', 26: b'Q', 27: b'R', 28: b'S', 29: b'T', 30: b'U', 31: b'V' } EMPTYSTRING = b'' b32alphabet = set(_b32tab.values()) def b32encode(s): """Encode a string using Base32. s is the string to encode. The encoded string is returned. """ parts = [] quanta, leftover = divmod(len(s), 5) # Pad the last quantum with zero bits if necessary if leftover: s += (b'\0' * (5 - leftover)) quanta += 1 for i in range(quanta): # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this # code is to process the 40 bits in units of 5 bits. So we take the 1 # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover # bits of c2 and tack them onto c3. The shifts and masks are intended # to give us values of exactly 5 bits in width. c1, c2, c3 = struct.unpack(b'!HHB', s[i*5:(i+1)*5]) c2 += (c1 & 1) << 16 # 17 bits wide c3 += (c2 & 3) << 8 # 10 bits wide parts.extend([_b32tab[c1 >> 11], # bits 1 - 5 _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10 _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15 _b32tab[c2 >> 12], # bits 16 - 20 (1 - 5) _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10) _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15) _b32tab[c3 >> 5], # bits 31 - 35 (1 - 5) _b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5) ]) encoded = EMPTYSTRING.join(parts) # Adjust for any leftover partial quanta if leftover == 1: return encoded[:-6] + b'======' elif leftover == 2: return encoded[:-4] + b'====' elif leftover == 3: return encoded[:-3] + b'===' elif leftover == 4: return encoded[:-1] + b'=' return encoded ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3905313 dnsviz-0.11.1/dnsviz/commands/0000755000175000017500000000000015001473074015260 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/commands/__init__.py0000644000175000017500000000000015001472663017362 0ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/commands/graph.py0000644000175000017500000005562215001472663016750 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import argparse import codecs import io import json import logging import os import re import sys # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict import dns.exception, dns.name from dnsviz.analysis import deserialize, DNS_RAW_VERSION from dnsviz.analysis.errors import ExistingCovered from dnsviz.config import DNSVIZ_SHARE_PATH, JQUERY_PATH, JQUERY_UI_PATH, JQUERY_UI_CSS_PATH, RAPHAEL_PATH from dnsviz.format import latin1_binary_to_string as lb2s from dnsviz.util import get_trusted_keys, get_default_trusted_keys, io_try_buffered # If the import of DNSAuthGraph fails because of the lack of pygraphviz, it # will be reported later try: from dnsviz.viz.dnssec import DNSAuthGraph except ImportError: try: import pygraphviz except ImportError: pass else: raise IGNORE_UNLESS_ERRORS = set([dns.rdatatype.HINFO, dns.rdatatype.NSEC3PARAM]) LOCAL_MEDIA_URL = 'file://' + DNSVIZ_SHARE_PATH DNSSEC_TEMPLATE_FILE = os.path.join(DNSVIZ_SHARE_PATH, 'html', 'dnssec-template.html') logging.basicConfig(level=logging.WARNING, format='%(message)s') logger = logging.getLogger() class AnalysisInputError(Exception): pass def finish_graph(G, name_objs, rdtypes, trusted_keys, supported_algs, fmt, filename, remove_edges): G.add_trust(trusted_keys, supported_algs=supported_algs) if remove_edges: G.remove_extra_edges() if fmt == 'html': try: js_img = codecs.decode(G.draw('js'), 'utf-8') except IOError as e: logger.error(str(e)) sys.exit(3) try: with io.open(DNSSEC_TEMPLATE_FILE, 'r', encoding='utf-8') as fh: template_str = fh.read() except IOError as e: logger.error('Error reading template file "%s": %s' % (DNSSEC_TEMPLATE_FILE, e.strerror)) sys.exit(3) template_str = template_str.replace('LOCAL_MEDIA_URL', LOCAL_MEDIA_URL) template_str = template_str.replace('JQUERY_PATH', JQUERY_PATH) template_str = template_str.replace('JQUERY_UI_PATH', JQUERY_UI_PATH) template_str = template_str.replace('JQUERY_UI_CSS_PATH', JQUERY_UI_CSS_PATH) template_str = template_str.replace('RAPHAEL_PATH', RAPHAEL_PATH) template_str = template_str.replace('JS_CODE', js_img) try: fh = io.open(filename, 'w', encoding='utf-8').write(template_str) except IOError as e: logger.error('%s: "%s"' % (e.strerror, filename)) sys.exit(3) else: try: fh = io.open(filename, 'wb').write(G.draw(fmt)) except IOError as e: if e.strerror: logger.error('%s: "%s"' % (e.strerror, filename)) else: logger.error(str(e)) sys.exit(3) def test_pygraphviz(): try: try: # pygraphviz < 1.7 used pygraphviz.release.version from pygraphviz import release version = release.version except ImportError: # pygraphviz 1.7 changed to pygraphviz.__version__ from pygraphviz import __version__ version = __version__ try: major, minor = version.split('.')[:2] major = int(major) minor = int(re.sub(r'(\d+)[^\d].*', r'\1', minor)) if (major, minor) < (1,3): logger.error('''pygraphviz version >= 1.3 is required, but version %s is installed.''' % version) sys.exit(2) except ValueError: logger.error('''pygraphviz version >= 1.3 is required, but version %s is installed.''' % version) sys.exit(2) except ImportError: logger.error('''pygraphviz is required, but not installed.''') sys.exit(2) class GraphArgHelper: FORMAT_CHOICES = ('dot','png','jpg','svg','html') def __init__(self, logger): self.parser = None self.output_format = None self.trusted_keys = None self.names = None self.analysis_structured = None self.args = None self._arg_mapping = None self._logger = logger def build_parser(self, prog): self.parser = argparse.ArgumentParser(description='Graph the assessment of diagnostic DNS queries', prog=prog) # python3/python2 dual compatibility stdin_buffer = io_try_buffered(sys.stdin, 'rb', closefd=False) stdout_buffer = io_try_buffered(sys.stdout, 'wb', closefd=False) try: self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r', encoding='UTF-8'), action='store', metavar='', help='Read names from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r'), action='store', metavar='', help='Read names from a file') #self.parser.add_argument('-s', '--silent', # const=True, default=False, # action='store_const', # help='Suppress error messages') try: self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r', encoding='UTF-8'), default=stdin_buffer, action='store', metavar='', help='Read diagnostic queries from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r'), default=stdin_buffer, action='store', metavar='', help='Read diagnostic queries from a file') try: self.parser.add_argument('-t', '--trusted-keys-file', type=argparse.FileType('r', encoding='UTF-8'), action='append', metavar='', help='Use trusted keys from the designated file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-t', '--trusted-keys-file', type=argparse.FileType('r'), action='append', metavar='', help='Use trusted keys from the designated file') self.parser.add_argument('-a', '--algorithms', type=self.comma_separated_ints_set, action='store', metavar=',[...]', help='Support only the specified DNSSEC algorithm(s)') self.parser.add_argument('-d', '--digest-algorithms', type=self.comma_separated_ints_set, action='store', metavar=',[...]', help='Support only the specified DNSSEC digest algorithm(s)') self.parser.add_argument('--ignore-rfc8624', const=True, default=False, action='store_const', help='Ignore errors associated with RFC 8624, DNSSEC algorithm implementation requirements') self.parser.add_argument('--ignore-rfc9276', const=True, default=False, action='store_const', help='Ignore errors associated with RFC 9276, NSEC3 parameter settings') self.parser.add_argument('-C', '--enforce-cookies', const=True, default=False, action='store_const', help='Enforce DNS cookies strictly') self.parser.add_argument('-P', '--allow-private', const=True, default=False, action='store_const', help='Allow private IP addresses for authoritative DNS servers') self.parser.add_argument('--trust-cdnskey-cds', const=True, default=False, action='store_const', help='Trust all CDNSKEY/CDS records') self.parser.add_argument('--multi-signer', const=True, default=False, action='store_const', help='Don\'t issue errors for missing KSKs with DS RRs (e.g., for multi-signer setups)') self.parser.add_argument('-R', '--rr-types', type=self.comma_separated_dns_types, action='store', metavar=',[...]', help='Process queries of only the specified type(s)') self.parser.add_argument('-e', '--redundant-edges', const=True, default=False, action='store_const', help='Do not remove redundant RRSIG edges from the graph') self.parser.add_argument('-O', '--derive-filename', const=True, default=False, action='store_const', help='Derive the filename(s) from the format and domain name(s)') self.parser.add_argument('-o', '--output-file', type=argparse.FileType('wb'), default=stdout_buffer, action='store', metavar='', help='Save the output to the specified file') self.parser.add_argument('-T', '--output-format', type=str, choices=self.FORMAT_CHOICES, action='store', metavar='', help='Use the specified output format') self.parser.add_argument('domain_name', type=self.valid_domain_name, action='store', nargs='*', metavar='', help='Domain names') self._arg_mapping = dict([(a.dest, '/'.join(a.option_strings)) for a in self.parser._actions]) def parse_args(self, args): self.args = self.parser.parse_args(args) @classmethod def comma_separated_dns_types(cls, arg): rdtypes = [] arg = arg.strip() if not arg: return rdtypes for r in arg.split(','): try: rdtypes.append(dns.rdatatype.from_text(r.strip())) except dns.rdatatype.UnknownRdatatype: raise argparse.ArgumentTypeError('Invalid resource record type: %s' % (r)) return rdtypes @classmethod def comma_separated_ints_set(cls, arg): return set(cls.comma_separated_ints(arg)) @classmethod def comma_separated_ints(cls, arg): ints = [] arg = arg.strip() if not arg: return ints for i in arg.split(','): try: ints.append(int(i.strip())) except ValueError: raise argparse.ArgumentTypeError('Invalid integer: %s' % (i)) return ints @classmethod def valid_domain_name(cls, arg): try: return dns.name.from_text(arg) except dns.exception.DNSException: raise argparse.ArgumentTypeError('Invalid domain name: "%s"' % arg) def check_args(self): if self.args.names_file and self.args.domain_name: raise argparse.ArgumentTypeError('If %(names_file)s is used, then domain names may not supplied as command line arguments.' % \ self._arg_mapping) if self.args.derive_filename and self.args.output_file.fileno() != sys.stdout.fileno(): raise argparse.ArgumentTypeError('The %(derive_filename)s and %(output_file)s options may not be used together.' % \ self._arg_mapping) def set_kwargs(self): self.output_format = None if self.args.output_format is not None: self.output_format = self.args.output_format elif self.args.output_file is not None and \ self.args.output_file.fileno() != sys.stdout.fileno() and \ isinstance(self.args.output_file.name, str): if '.' in self.args.output_file.name: extension = self.args.output_file.name.split('.')[-1] if extension in self.FORMAT_CHOICES: self.output_format = extension if self.output_format is None: raise argparse.ArgumentTypeError('Unable to detect a valid format from output file: %s' % (self.args.output_file.name)) else: self.output_format = 'dot' def set_buffers(self): # This entire method is for # python3/python2 dual compatibility if self.args.input_file is not None: if self.args.input_file.fileno() == sys.stdin.fileno(): filename = self.args.input_file.fileno() else: filename = self.args.input_file.name self.args.input_file.close() self.args.input_file = io.open(filename, 'r', encoding='utf-8') if self.args.names_file is not None: if self.args.names_file.fileno() == sys.stdin.fileno(): filename = self.args.names_file.fileno() else: filename = self.args.names_file.name self.args.names_file.close() self.args.names_file = io.open(filename, 'r', encoding='utf-8') if self.args.trusted_keys_file is not None: trusted_keys_files = [] for tk_file in self.args.trusted_keys_file: if tk_file.fileno() == sys.stdin.fileno(): filename = tk_file.fileno() else: filename = tk_file.name tk_file.close() trusted_keys_files.append(io.open(filename, 'r', encoding='utf-8')) self.args.trusted_keys_file = trusted_keys_files if self.args.output_file is not None: if self.args.output_file.fileno() == sys.stdout.fileno(): filename = self.args.output_file.fileno() else: filename = self.args.output_file.name self.args.output_file.close() self.args.output_file = io.open(filename, 'wb') def aggregate_trusted_key_info(self): if not self.args.trusted_keys_file: return self.trusted_keys = [] for fh in self.args.trusted_keys_file: tk_str = fh.read() try: self.trusted_keys.extend(get_trusted_keys(tk_str)) except dns.exception.DNSException: raise argparse.ArgumentTypeError('There was an error parsing the trusted keys file: "%s"' % \ self._arg_mapping) def update_trusted_key_info(self, latest_analysis_date): if self.args.trusted_keys_file is None: self.trusted_keys = get_default_trusted_keys(latest_analysis_date) def ingest_input(self): analysis_str = self.args.input_file.read() if not analysis_str: if self.args.input_file.fileno() != sys.stdin.fileno(): raise AnalysisInputError('No input') else: raise AnalysisInputError() try: self.analysis_structured = json.loads(analysis_str) except ValueError: raise AnalysisInputError('There was an error parsing the JSON input: "%s"' % self.args.input_file.name) # check version if '_meta._dnsviz.' not in self.analysis_structured or 'version' not in self.analysis_structured['_meta._dnsviz.']: raise AnalysisInputError('No version information in JSON input: "%s"' % self.args.input_file.name) try: major_vers, minor_vers = [int(x) for x in str(self.analysis_structured['_meta._dnsviz.']['version']).split('.', 1)] except ValueError: raise AnalysisInputError('Version of JSON input is invalid: %s' % self.analysis_structured['_meta._dnsviz.']['version']) # ensure major version is a match and minor version is no greater # than the current minor version curr_major_vers, curr_minor_vers = [int(x) for x in str(DNS_RAW_VERSION).split('.', 1)] if major_vers != curr_major_vers or minor_vers > curr_minor_vers: raise AnalysisInputError('Version %d.%d of JSON input is incompatible with this software.' % (major_vers, minor_vers)) def ingest_names(self): self.names = OrderedDict() if self.args.domain_name: for name in self.args.domain_name: if name not in self.names: self.names[name] = None return if self.args.names_file: args = self.args.names_file else: try: args = self.analysis_structured['_meta._dnsviz.']['names'] except KeyError: raise AnalysisInputError('No names found in JSON input!') for arg in args: name = arg.strip() # python3/python2 dual compatibility if hasattr(name, 'decode'): name = name.decode('utf-8') try: name = dns.name.from_text(name) except UnicodeDecodeError as e: self._logger.error('%s: "%s"' % (e, name)) except dns.exception.DNSException: self._logger.error('The domain name was invalid: "%s"' % name) else: if name not in self.names: self.names[name] = None def build_helper(logger, cmd, subcmd): arghelper = GraphArgHelper(logger) arghelper.build_parser('%s %s' % (cmd, subcmd)) return arghelper def main(argv): try: test_pygraphviz() arghelper = build_helper(logger, sys.argv[0], argv[0]) arghelper.parse_args(argv[1:]) logger.setLevel(logging.WARNING) try: arghelper.check_args() arghelper.set_kwargs() arghelper.set_buffers() arghelper.aggregate_trusted_key_info() arghelper.ingest_input() arghelper.ingest_names() except argparse.ArgumentTypeError as e: arghelper.parser.error(str(e)) except AnalysisInputError as e: s = str(e) if s: logger.error(s) sys.exit(3) latest_analysis_date = None name_objs = [] cache = {} for name in arghelper.names: name_str = lb2s(name.canonicalize().to_text()) if name_str not in arghelper.analysis_structured or arghelper.analysis_structured[name_str].get('stub', True): logger.error('The analysis of "%s" was not found in the input.' % lb2s(name.to_text())) continue name_obj = deserialize(name, arghelper.analysis_structured, cache, strict_cookies=arghelper.args.enforce_cookies, allow_private=arghelper.args.allow_private) name_objs.append(name_obj) if latest_analysis_date is None or latest_analysis_date > name_obj.analysis_end: latest_analysis_date = name_obj.analysis_end if not name_objs: sys.exit(4) arghelper.update_trusted_key_info(latest_analysis_date) G = DNSAuthGraph() for name_obj in name_objs: name_obj.populate_status(arghelper.trusted_keys, supported_algs=arghelper.args.algorithms, supported_digest_algs=arghelper.args.digest_algorithms, ignore_rfc8624=arghelper.args.ignore_rfc8624, ignore_rfc9276=arghelper.args.ignore_rfc9276, trust_cdnskey_cds=arghelper.args.trust_cdnskey_cds, multi_signer=arghelper.args.multi_signer) has_warnings, has_errors = name_obj.queries_with_errors_warnings() has_warnings_or_errors = has_warnings.union(has_errors) for qname, rdtype in name_obj.queries: if arghelper.args.rr_types is None: # if rdtypes was not specified, then graph all, with some # exceptions if name_obj.is_zone() and rdtype in (dns.rdatatype.DNSKEY, dns.rdatatype.DS, dns.rdatatype.DLV): continue # Ignore some data types unless they are explicitly # requested or there are warnings or errors associated with # them. if rdtype in IGNORE_UNLESS_ERRORS and (qname, rdtype) not in has_warnings_or_errors: continue else: # if rdtypes was specified, then only graph rdtypes that # were specified if qname != name_obj.name or rdtype not in arghelper.args.rr_types: continue G.graph_rrset_auth(name_obj, qname, rdtype) # Also graph the NODATA query for the zone name, if applicable if not name_obj.is_zone() and name_obj.zone is not None: z_has_warnings, z_has_errors = name_obj.zone.queries_with_errors_warnings(ExistingCovered) z_has_warnings_or_errors = z_has_warnings.union(z_has_errors) if (name_obj.zone.nxrrset_name, name_obj.zone.nxrrset_rdtype) in z_has_warnings_or_errors: G.graph_rrset_auth(name_obj.zone, name_obj.zone.nxrrset_name, name_obj.zone.nxrrset_rdtype) if arghelper.args.rr_types is not None: for rdtype in arghelper.args.rr_types: if (name_obj.name, rdtype) not in name_obj.queries: logger.error('No query for "%s/%s" was included in the analysis.' % (lb2s(name_obj.name.to_text()), dns.rdatatype.to_text(rdtype))) if arghelper.args.derive_filename: if name_obj.name == dns.name.root: name = 'root' else: name = lb2s(name_obj.name.canonicalize().to_text()).rstrip('.') name = name.replace(os.sep, '--') finish_graph(G, [name_obj], arghelper.args.rr_types, arghelper.trusted_keys, arghelper.args.algorithms, arghelper.output_format, '%s.%s' % (name, arghelper.output_format), not arghelper.args.redundant_edges) G = DNSAuthGraph() if not arghelper.args.derive_filename: finish_graph(G, name_objs, arghelper.args.rr_types, arghelper.trusted_keys, arghelper.args.algorithms, arghelper.output_format, arghelper.args.output_file.fileno(), not arghelper.args.redundant_edges) except KeyboardInterrupt: logger.error('Interrupted.') sys.exit(4) if __name__ == "__main__": main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/commands/grok.py0000644000175000017500000005070615001472663016607 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import argparse import codecs import io import json import logging import os import re import sys # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict import dns.exception, dns.name from dnsviz.analysis import deserialize, DNS_RAW_VERSION from dnsviz.format import latin1_binary_to_string as lb2s from dnsviz.util import get_trusted_keys, io_try_buffered # If the import of DNSAuthGraph fails because of the lack of pygraphviz, it # will be reported later try: from dnsviz.viz.dnssec import DNSAuthGraph except ImportError: try: import pygraphviz except ImportError: pass else: raise logging.basicConfig(level=logging.WARNING, format='%(message)s') logger = logging.getLogger() class AnalysisInputError(Exception): pass TERM_COLOR_MAP = { 'BOLD': '\033[1m', 'RESET': '\033[0m', 'SECURE': '\033[36m', 'BOGUS': '\033[31m', 'INSECURE': '\033[37m', 'NOERROR': '\033[37m', 'NXDOMAIN': '\033[37m', 'INDETERMINATE': '\033[31m', 'NON_EXISTENT': '\033[37m', 'VALID': '\033[36m', 'INDETERMINATE': '\033[37m', 'INDETERMINATE_NO_DNSKEY': '\033[37m', 'INDETERMINATE_MATCH_PRE_REVOKE': '\033[37m', 'INDETERMINATE_UNKNOWN_ALGORITHM': '\033[33m', 'ALGORITHM_IGNORED': '\033[37m', 'EXPIRED': '\033[35m', 'PREMATURE': '\033[35m', 'INVALID_SIG': '\033[31m', 'INVALID': '\033[31m', 'INVALID_DIGEST': '\033[31m', 'INCOMPLETE': '\033[33m', 'LAME': '\033[33m', 'INVALID_TARGET': '\033[31m', 'ERROR': '\033[31m', 'WARNING': '\033[33m', } KEY_RE = re.compile(r'^((?P\s+)")(.+)(": )') ERRORS_RE = re.compile(r'^((?P\s+)")((?Pwarning|error)s?)(": \[)$') ERRORS_CLOSE_RE = re.compile(r'^(?P\s+)],?$') DESCRIPTION_CODE_RE = re.compile(r'^((?P\s+)")(?Pdescription|code)(": ")(.+)(",?)$') STATUS_RE = re.compile(r'^(?P\s+)("status": ")(?P.+)(",?)') def color_json(s): error = None s1 = '' for line in s.split('\n'): if error is None: # not in an error object; look for a start error = ERRORS_RE.search(line) if error is not None: # found an error start line = ERRORS_RE.sub(r'\1%s%s\3%s\5' % (TERM_COLOR_MAP['BOLD'], TERM_COLOR_MAP[error.group('level').upper()], TERM_COLOR_MAP['RESET']), line) s1 += line + '\n' continue if error is None: # not in an error object m = STATUS_RE.search(line) if m is not None: line = STATUS_RE.sub(r'\1\2%s\3%s\4' % (TERM_COLOR_MAP[m.group('status').upper()], TERM_COLOR_MAP['RESET']), line) line = KEY_RE.sub(r'\1%s\3%s\4' % (TERM_COLOR_MAP['BOLD'], TERM_COLOR_MAP['RESET']), line) s1 += line + '\n' continue # in an error object m = ERRORS_CLOSE_RE.search(line) if m is not None and len(m.group('indent')) == len(error.group('indent')): error = None s1 += line + '\n' continue line = DESCRIPTION_CODE_RE.sub(r'\1\3\4%s\5%s\6' % (TERM_COLOR_MAP[error.group('level').upper()], TERM_COLOR_MAP['RESET']), line) line = KEY_RE.sub(r'\1%s\3%s\4' % (TERM_COLOR_MAP['BOLD'], TERM_COLOR_MAP['RESET']), line) s1 += line + '\n' return s1.rstrip() def test_pygraphviz(): try: try: # pygraphviz < 1.7 used pygraphviz.release.version from pygraphviz import release version = release.version except ImportError: # pygraphviz 1.7 changed to pygraphviz.__version__ from pygraphviz import __version__ version = __version__ try: major, minor = version.split('.')[:2] major = int(major) minor = int(re.sub(r'(\d+)[^\d].*', r'\1', minor)) if (major, minor) < (1,3): logger.error('''pygraphviz version >= 1.3 is required, but version %s is installed.''' % version) sys.exit(2) except ValueError: logger.error('''pygraphviz version >= 1.3 is required, but version %s is installed.''' % version) sys.exit(2) except ImportError: logger.error('''pygraphviz is required, but not installed.''') sys.exit(2) class GrokArgHelper: def __init__(self, logger): self.parser = None self.trusted_keys = None self.names = None self.analysis_structured = None self.log_level = None self.args = None self._arg_mapping = None self._logger = logger def build_parser(self, prog): self.parser = argparse.ArgumentParser(description='Assess diagnostic DNS queries', prog=prog) # python3/python2 dual compatibility stdin_buffer = io_try_buffered(sys.stdin, 'rb', closefd=False) stdout_buffer = io_try_buffered(sys.stdout, 'wb', closefd=False) try: self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r', encoding='UTF-8'), action='store', metavar='', help='Read names from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r'), action='store', metavar='', help='Read names from a file') #self.parser.add_argument('-s', '--silent', # const=True, default=False, # action='store_const', # help='Suppress error messages') try: self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r', encoding='UTF-8'), default=stdin_buffer, action='store', metavar='', help='Read diagnostic queries from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r'), default=stdin_buffer, action='store', metavar='', help='Read diagnostic queries from a file') try: self.parser.add_argument('-t', '--trusted-keys-file', type=argparse.FileType('r', encoding='UTF-8'), action='append', metavar='', help='Use trusted keys from the designated file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-t', '--trusted-keys-file', type=argparse.FileType('r'), action='append', metavar='', help='Use trusted keys from the designated file') self.parser.add_argument('-a', '--algorithms', type=self.comma_separated_ints_set, action='store', metavar=',[...]', help='Support only the specified DNSSEC algorithm(s)') self.parser.add_argument('-d', '--digest-algorithms', type=self.comma_separated_ints_set, action='store', metavar=',[...]', help='Support only the specified DNSSEC digest algorithm(s)') self.parser.add_argument('--ignore-rfc8624', const=True, default=False, action='store_const', help='Ignore errors associated with RFC 8624, DNSSEC algorithm implementation requirements') self.parser.add_argument('--ignore-rfc9276', const=True, default=False, action='store_const', help='Ignore errors associated with RFC 9276, NSEC3 parameter settings') self.parser.add_argument('-C', '--enforce-cookies', const=True, default=False, action='store_const', help='Enforce DNS cookies strictly') self.parser.add_argument('-P', '--allow-private', const=True, default=False, action='store_const', help='Allow private IP addresses for authoritative DNS servers') self.parser.add_argument('--trust-cdnskey-cds', const=True, default=False, action='store_const', help='Trust all CDNSKEY/CDS records') self.parser.add_argument('--multi-signer', const=True, default=False, action='store_const', help='Don\'t issue errors for missing KSKs with DS RRs (e.g., for multi-signer setups)') self.parser.add_argument('-o', '--output-file', type=argparse.FileType('wb'), default=stdout_buffer, action='store', metavar='', help='Save the output to the specified file') self.parser.add_argument('-c', '--minimize-output', const=True, default=False, action='store_const', help='Format JSON output minimally, instead of "pretty"') self.parser.add_argument('-l', '--log-level', type=str, choices=('error', 'warning', 'info', 'debug'), default='debug', action='store', metavar='', help='Save the output to the specified file') self.parser.add_argument('domain_name', type=self.valid_domain_name, action='store', nargs='*', metavar='', help='Domain names') self._arg_mapping = dict([(a.dest, '/'.join(a.option_strings)) for a in self.parser._actions]) def parse_args(self, args): self.args = self.parser.parse_args(args) @classmethod def comma_separated_ints_set(cls, arg): return set(cls.comma_separated_ints(arg)) @classmethod def comma_separated_ints(cls, arg): ints = [] arg = arg.strip() if not arg: return ints for i in arg.split(','): try: ints.append(int(i.strip())) except ValueError: raise argparse.ArgumentTypeError('Invalid integer: %s' % (i)) return ints @classmethod def valid_domain_name(cls, arg): try: return dns.name.from_text(arg) except dns.exception.DNSException: raise argparse.ArgumentTypeError('Invalid domain name: "%s"' % arg) def check_args(self): if self.args.names_file and self.args.domain_name: raise argparse.ArgumentTypeError('If %(names_file)s is used, then domain names may not supplied as command line arguments.' % \ self._arg_mapping) def set_kwargs(self): if self.args.log_level == 'error': self.log_level = logging.ERROR elif self.args.log_level == 'warning': self.log_level = logging.WARNING elif self.args.log_level == 'info': self.log_level = logging.INFO else: # self.args.log_level == 'debug': self.log_level = logging.DEBUG def set_buffers(self): # This entire method is for # python3/python2 dual compatibility if self.args.input_file is not None: if self.args.input_file.fileno() == sys.stdin.fileno(): filename = self.args.input_file.fileno() else: filename = self.args.input_file.name self.args.input_file.close() self.args.input_file = io.open(filename, 'r', encoding='utf-8') if self.args.names_file is not None: if self.args.names_file.fileno() == sys.stdin.fileno(): filename = self.args.names_file.fileno() else: filename = self.args.names_file.name self.args.names_file.close() self.args.names_file = io.open(filename, 'r', encoding='utf-8') if self.args.trusted_keys_file is not None: trusted_keys_files = [] for tk_file in self.args.trusted_keys_file: if tk_file.fileno() == sys.stdin.fileno(): filename = tk_file.fileno() else: filename = tk_file.name tk_file.close() trusted_keys_files.append(io.open(filename, 'r', encoding='utf-8')) self.args.trusted_keys_file = trusted_keys_files if self.args.output_file is not None: if self.args.output_file.fileno() == sys.stdout.fileno(): filename = self.args.output_file.fileno() else: filename = self.args.output_file.name self.args.output_file.close() self.args.output_file = io.open(filename, 'wb') def aggregate_trusted_key_info(self): if not self.args.trusted_keys_file: return self.trusted_keys = [] for fh in self.args.trusted_keys_file: tk_str = fh.read() try: self.trusted_keys.extend(get_trusted_keys(tk_str)) except dns.exception.DNSException: raise argparse.ArgumentTypeError('There was an error parsing the trusted keys file: "%s"' % \ self._arg_mapping) def update_trusted_key_info(self): if self.args.trusted_keys_file is None: self.trusted_keys = [] def ingest_input(self): analysis_str = self.args.input_file.read() if not analysis_str: if self.args.input_file.fileno() != sys.stdin.fileno(): raise AnalysisInputError('No input') else: raise AnalysisInputError() try: self.analysis_structured = json.loads(analysis_str) except ValueError: raise AnalysisInputError('There was an error parsing the JSON input: "%s"' % self.args.input_file.name) # check version if '_meta._dnsviz.' not in self.analysis_structured or 'version' not in self.analysis_structured['_meta._dnsviz.']: raise AnalysisInputError('No version information in JSON input: "%s"' % self.args.input_file.name) try: major_vers, minor_vers = [int(x) for x in str(self.analysis_structured['_meta._dnsviz.']['version']).split('.', 1)] except ValueError: raise AnalysisInputError('Version of JSON input is invalid: %s' % self.analysis_structured['_meta._dnsviz.']['version']) # ensure major version is a match and minor version is no greater # than the current minor version curr_major_vers, curr_minor_vers = [int(x) for x in str(DNS_RAW_VERSION).split('.', 1)] if major_vers != curr_major_vers or minor_vers > curr_minor_vers: raise AnalysisInputError('Version %d.%d of JSON input is incompatible with this software.' % (major_vers, minor_vers)) def ingest_names(self): self.names = OrderedDict() if self.args.domain_name: for name in self.args.domain_name: if name not in self.names: self.names[name] = None return if self.args.names_file: args = self.args.names_file else: try: args = self.analysis_structured['_meta._dnsviz.']['names'] except KeyError: raise AnalysisInputError('No names found in JSON input!') for arg in args: name = arg.strip() # python3/python2 dual compatibility if hasattr(name, 'decode'): name = name.decode('utf-8') try: name = dns.name.from_text(name) except UnicodeDecodeError as e: self._logger.error('%s: "%s"' % (e, name)) except dns.exception.DNSException: self._logger.error('The domain name was invalid: "%s"' % name) else: if name not in self.names: self.names[name] = None def build_helper(logger, cmd, subcmd): arghelper = GrokArgHelper(logger) arghelper.build_parser('%s %s' % (cmd, subcmd)) return arghelper def main(argv): try: arghelper = build_helper(logger, sys.argv[0], argv[0]) arghelper.parse_args(argv[1:]) logger.setLevel(logging.WARNING) try: arghelper.check_args() arghelper.set_kwargs() arghelper.set_buffers() arghelper.aggregate_trusted_key_info() arghelper.ingest_input() arghelper.ingest_names() except argparse.ArgumentTypeError as e: arghelper.parser.error(str(e)) except AnalysisInputError as e: s = str(e) if s: logger.error(s) sys.exit(3) if arghelper.args.minimize_output: kwargs = {} else: kwargs = { 'indent': 4, 'separators': (',', ': ') } # if trusted keys were supplied, check that pygraphviz is installed if arghelper.trusted_keys: test_pygraphviz() name_objs = [] cache = {} for name in arghelper.names: name_str = lb2s(name.canonicalize().to_text()) if name_str not in arghelper.analysis_structured or arghelper.analysis_structured[name_str].get('stub', True): logger.error('The analysis of "%s" was not found in the input.' % lb2s(name.to_text())) continue name_obj = deserialize(name, arghelper.analysis_structured, cache, strict_cookies=arghelper.args.enforce_cookies, allow_private=arghelper.args.allow_private) name_objs.append(name_obj) if not name_objs: sys.exit(4) arghelper.update_trusted_key_info() d = OrderedDict() for name_obj in name_objs: name_obj.populate_status(arghelper.trusted_keys, supported_algs=arghelper.args.algorithms, supported_digest_algs=arghelper.args.digest_algorithms, ignore_rfc8624=arghelper.args.ignore_rfc8624, ignore_rfc9276=arghelper.args.ignore_rfc9276, trust_cdnskey_cds=arghelper.args.trust_cdnskey_cds, multi_signer=arghelper.args.multi_signer) if arghelper.trusted_keys: G = DNSAuthGraph() for qname, rdtype in name_obj.queries: if name_obj.is_zone() and rdtype in (dns.rdatatype.DNSKEY, dns.rdatatype.DS, dns.rdatatype.DLV): continue G.graph_rrset_auth(name_obj, qname, rdtype) for target, mx_obj in name_obj.mx_targets.items(): if mx_obj is not None: G.graph_rrset_auth(mx_obj, target, dns.rdatatype.A) G.graph_rrset_auth(mx_obj, target, dns.rdatatype.AAAA) for target, ns_obj in name_obj.ns_dependencies.items(): if ns_obj is not None: G.graph_rrset_auth(ns_obj, target, dns.rdatatype.A) G.graph_rrset_auth(ns_obj, target, dns.rdatatype.AAAA) G.add_trust(arghelper.trusted_keys, supported_algs=arghelper.args.algorithms) name_obj.populate_response_component_status(G) name_obj.serialize_status(d, loglevel=arghelper.log_level) if d: s = json.dumps(d, ensure_ascii=False, **kwargs) if not arghelper.args.minimize_output and arghelper.args.output_file.isatty() and os.environ.get('TERM', 'dumb') != 'dumb': s = color_json(s) arghelper.args.output_file.write(s.encode('utf-8')) except KeyboardInterrupt: logger.error('Interrupted.') sys.exit(4) if __name__ == "__main__": main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/commands/lookingglass.py0000644000175000017500000001022615001472663020332 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2016-2021 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import codecs import io import json import threading import sys # python3/python2 dual compatibility try: import queue except ImportError: import Queue as queue from dnsviz import transport class RemoteQueryError(Exception): pass def main(argv): sock = transport.ReaderWriter(io.open(sys.stdin.fileno(), 'rb'), io.open(sys.stdout.fileno(), 'wb')) sock.lock = threading.Lock() qth_reader = transport.DNSQueryTransportHandlerWebSocketClientReader(sock) qth_writer = transport.DNSQueryTransportHandlerWebSocketClientWriter(sock) response_queue = queue.Queue() queries_in_waiting = set() th_factory = transport.DNSQueryTransportHandlerDNSFactory() tm = transport.DNSQueryTransportManager() try: while True: try: qth_writer.qtms = [] tm.handle_msg(qth_reader) qth_reader.finalize() if len(qth_reader.msg_recv) == 0: break # load the json content try: content = json.loads(codecs.decode(qth_reader.msg_recv, 'utf-8')) except ValueError: raise RemoteQueryError('JSON decoding of request failed: %s' % qth_reader.msg_recv) if 'version' not in content: raise RemoteQueryError('No version information in request.') try: major_vers, minor_vers = [int(x) for x in str(content['version']).split('.', 1)] except ValueError: raise RemoteQueryError('Version of JSON input in request is invalid: %s' % content['version']) # ensure major version is a match and minor version is no greater # than the current minor version curr_major_vers, curr_minor_vers = [int(x) for x in str(transport.DNS_TRANSPORT_VERSION).split('.', 1)] if major_vers != curr_major_vers or minor_vers > curr_minor_vers: raise RemoteQueryError('Version %d.%d of JSON input in request is incompatible with this software.' % (major_vers, minor_vers)) if 'requests' not in content: raise RemoteQueryError('No request information in request.') for i, qtm_serialized in enumerate(content['requests']): try: qtm = transport.DNSQueryTransportMeta.deserialize_request(qtm_serialized) except transport.TransportMetaDeserializationError as e: raise RemoteQueryError('Error deserializing request information: %s' % e) qth_writer.add_qtm(qtm) th = th_factory.build(processed_queue=response_queue) th.add_qtm(qtm) th.init_req() tm.handle_msg_nowait(th) queries_in_waiting.add(th) while queries_in_waiting: th = response_queue.get() th.finalize() queries_in_waiting.remove(th) qth_writer.init_req() except RemoteQueryError as e: qth_writer.init_err_send(str(e)) tm.handle_msg(qth_writer) except EOFError: pass finally: tm.close() if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/commands/print.py0000644000175000017500000006651615001472663017007 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import argparse import codecs import io import json import logging import os import re import sys # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict import dns.exception, dns.name from dnsviz.analysis import deserialize, DNS_RAW_VERSION from dnsviz.analysis.errors import ExistingCovered from dnsviz.format import latin1_binary_to_string as lb2s from dnsviz.util import get_trusted_keys, get_default_trusted_keys, io_try_buffered # If the import of DNSAuthGraph fails because of the lack of pygraphviz, it # will be reported later try: from dnsviz.viz.dnssec import DNSAuthGraph except ImportError: try: import pygraphviz except ImportError: pass else: raise IGNORE_UNLESS_ERRORS = set([dns.rdatatype.HINFO, dns.rdatatype.NSEC3PARAM]) logging.basicConfig(level=logging.WARNING, format='%(message)s') logger = logging.getLogger() class AnalysisInputError(Exception): pass def finish_graph(G, name_objs, rdtypes, ignore_qnames_rdtypes, trusted_keys, supported_algs, filename, override_show_colors): G.add_trust(trusted_keys, supported_algs=supported_algs) try: fh = io.open(filename, 'w', encoding='utf-8') except IOError as e: logger.error('%s: "%s"' % (e.strerror, filename)) sys.exit(3) if override_show_colors: show_colors = True else: show_colors = fh.isatty() and os.environ.get('TERM', 'dumb') != 'dumb' tuples = [] processed = set() for name_obj in name_objs: name_obj.populate_response_component_status(G) tuples.extend(name_obj.serialize_status_simple(rdtypes, ignore_qnames_rdtypes, processed)) fh.write(textualize_status_output(tuples, show_colors)) TERM_COLOR_MAP = { 'BOLD': '\033[1m', 'RESET': '\033[0m', 'SECURE': '\033[36m', 'BOGUS': '\033[31m', 'INSECURE': '\033[37m', 'NOERROR': '\033[37m', 'NXDOMAIN': '\033[37m', 'INDETERMINATE': '\033[31m', 'NON_EXISTENT': '\033[37m', 'VALID': '\033[36m', 'INDETERMINATE': '\033[37m', 'INDETERMINATE_NO_DNSKEY': '\033[37m', 'INDETERMINATE_MATCH_PRE_REVOKE': '\033[37m', 'INDETERMINATE_UNKNOWN_ALGORITHM': '\033[33m', 'ALGORITHM_IGNORED': '\033[37m', 'EXPIRED': '\033[35m', 'PREMATURE': '\033[35m', 'INVALID_SIG': '\033[31m', 'INVALID': '\033[31m', 'INVALID_DIGEST': '\033[31m', 'INCOMPLETE': '\033[33m', 'LAME': '\033[33m', 'INVALID_TARGET': '\033[31m', 'ERROR': '\033[31m', 'WARNING': '\033[33m', } STATUS_MAP = { 'SECURE': '.', 'BOGUS': '!', 'INSECURE': '-', 'NON_EXISTENT': '-', 'VALID': '.', 'INDETERMINATE': '-', 'INDETERMINATE_NO_DNSKEY': '-', 'INDETERMINATE_MATCH_PRE_REVOKE': '-', 'INDETERMINATE_UNKNOWN_ALGORITHM': '?', 'ALGORITHM_IGNORED': '-', 'EXPIRED': '!', 'PREMATURE': '!', 'INVALID_SIG': '!', 'INVALID': '!', 'INVALID_DIGEST': '!', 'INCOMPLETE': '?', 'LAME': '?', 'INVALID_TARGET': '!', 'ERROR': '!', 'WARNING': '?', } def _errors_warnings_full(warnings, errors, indent, show_color): # display status, errors, and warnings s = '' for error in errors: if show_color: s += '%s%sE:%s%s\n' % (indent, TERM_COLOR_MAP['ERROR'], error, TERM_COLOR_MAP['RESET']) else: s += '%sE:%s\n' % (indent, error) for warning in warnings: if show_color: s += '%s%sW:%s%s\n' % (indent, TERM_COLOR_MAP['WARNING'], warning, TERM_COLOR_MAP['RESET']) else: s += '%sW:%s\n' % (indent, warning) return s def _errors_warnings_str(status, warnings, errors, show_color): # display status, errors, and warnings error_str = '' if errors: if show_color: error_str = '%s%s%s' % (TERM_COLOR_MAP['ERROR'], STATUS_MAP['ERROR'], TERM_COLOR_MAP[status]) else: error_str = STATUS_MAP['ERROR'] elif warnings: if show_color: error_str = '%s%s%s' % (TERM_COLOR_MAP['WARNING'], STATUS_MAP['WARNING'], TERM_COLOR_MAP[status]) else: error_str = STATUS_MAP['WARNING'] return '[%s%s]' % (STATUS_MAP[status], error_str) def _textualize_status_output_response(rdtype_str, status, warnings, errors, rdata, children, depth, show_color): s = '' response_prefix = ' %(status_color)s%(status)s%(preindent)s %(indent)s%(rdtype)s: ' response_rdata = '%(rdata)s%(color_reset)s%(status_color_rdata)s%(status_rdata)s%(color_reset)s' join_str_template = '%(status_color)s, ' params = {} params['status_color'] = '' params['status_color_rdata'] = '' if show_color: params['color_reset'] = TERM_COLOR_MAP['RESET'] else: params['color_reset'] = '' # display status, errors, and warnings params['status'] = _errors_warnings_str(status, warnings, errors, show_color) # indent based on the presence of errors and warnings if errors or warnings: params['preindent'] = '' else: params['preindent'] = ' ' params['rdtype'] = rdtype_str params['indent'] = ' '*depth if show_color: params['status_color'] = TERM_COLOR_MAP[status] s += response_prefix % params rdata_set = [] subwarnings_all = warnings[:] suberrors_all = errors[:] for i, (substatus, subwarnings, suberrors, rdata_item) in enumerate(rdata): params['rdata'] = rdata_item # display status, errors, and warnings if substatus is not None: if show_color: params['status_color_rdata'] = TERM_COLOR_MAP[substatus] params['status_rdata'] = ' ' + _errors_warnings_str(substatus, subwarnings, suberrors, show_color) else: params['status_color_rdata'] = '' params['status_rdata'] = '' rdata_set.append(response_rdata % params) subwarnings_all.extend(subwarnings) suberrors_all.extend(suberrors) join_str = join_str_template % params s += join_str.join(rdata_set) + '\n' s += _errors_warnings_full(subwarnings_all, suberrors_all, ' ' + params['preindent'] + params['indent'], show_color) for rdtype_str_child, status_child, warnings_child, errors_child, rdata_child, children_child in children: s += _textualize_status_output_response(rdtype_str_child, status_child, warnings_child, errors_child, rdata_child, children_child, depth + 1, show_color) return s def _textualize_status_output_name(name, zone_status, zone_warnings, zone_errors, delegation_status, delegation_warnings, delegation_errors, responses, show_color): s = '' name_template = '%(status_color)s%(name)s%(color_reset)s%(status_color_rdata)s%(status_rdata)s%(color_reset)s\n' params = {} params['status_color'] = '' params['status_color_rdata'] = '' if show_color: params['color_reset'] = TERM_COLOR_MAP['RESET'] else: params['color_reset'] = '' warnings_all = zone_warnings + delegation_warnings errors_all = zone_errors + delegation_errors params['name'] = name params['status_rdata'] = '' if show_color: params['status_color'] = TERM_COLOR_MAP['BOLD'] params['color_reset'] = TERM_COLOR_MAP['RESET'] if zone_status is not None: params['status_rdata'] += ' ' + _errors_warnings_str(zone_status, zone_warnings, zone_errors, show_color) if show_color: params['status_color_rdata'] = TERM_COLOR_MAP[zone_status] if delegation_status is not None: params['status_rdata'] += ' ' + _errors_warnings_str(delegation_status, delegation_warnings, delegation_errors, show_color) if show_color: params['status_color_rdata'] = TERM_COLOR_MAP[delegation_status] s += name_template % params s += _errors_warnings_full(warnings_all, errors_all, ' ', show_color) for rdtype_str, status, warnings, errors, rdata, children in responses: s += _textualize_status_output_response(rdtype_str, status, warnings, errors, rdata, children, 0, show_color) return s def textualize_status_output(names, show_color): s = '' for name, zone_status, zone_warnings, zone_errors, delegation_status, delegation_warnings, delegation_errors, responses in names: s += _textualize_status_output_name(name, zone_status, zone_warnings, zone_errors, delegation_status, delegation_warnings, delegation_errors, responses, show_color) return s def test_pygraphviz(): try: try: # pygraphviz < 1.7 used pygraphviz.release.version from pygraphviz import release version = release.version except ImportError: # pygraphviz 1.7 changed to pygraphviz.__version__ from pygraphviz import __version__ version = __version__ try: major, minor = version.split('.')[:2] major = int(major) minor = int(re.sub(r'(\d+)[^\d].*', r'\1', minor)) if (major, minor) < (1,3): logger.error('''pygraphviz version >= 1.3 is required, but version %s is installed.''' % version) sys.exit(2) except ValueError: logger.error('''pygraphviz version >= 1.3 is required, but version %s is installed.''' % version) sys.exit(2) except ImportError: logger.error('''pygraphviz is required, but not installed.''') sys.exit(2) class PrintArgHelper: def __init__(self, logger): self.parser = None self.trusted_keys = None self.names = None self.analysis_structured = None self.args = None self._arg_mapping = None self._logger = logger def build_parser(self, prog): self.parser = argparse.ArgumentParser(description='Print the assessment of diagnostic DNS queries', prog=prog) # python3/python2 dual compatibility stdin_buffer = io_try_buffered(sys.stdin, 'rb', closefd=False) stdout_buffer = io_try_buffered(sys.stdout, 'wb', closefd=False) try: self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r', encoding='UTF-8'), action='store', metavar='', help='Read names from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r'), action='store', metavar='', help='Read names from a file') #self.parser.add_argument('-s', '--silent', # const=True, default=False, # action='store_const', # help='Suppress error messages') try: self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r', encoding='UTF-8'), default=stdin_buffer, action='store', metavar='', help='Read diagnostic queries from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r'), default=stdin_buffer, action='store', metavar='', help='Read diagnostic queries from a file') try: self.parser.add_argument('-t', '--trusted-keys-file', type=argparse.FileType('r', encoding='UTF-8'), action='append', metavar='', help='Use trusted keys from the designated file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-t', '--trusted-keys-file', type=argparse.FileType('r'), action='append', metavar='', help='Use trusted keys from the designated file') self.parser.add_argument('-S', '--show-colors', const=True, default=False, action='store_const', help='Use terminal colors even if not a TTY or supported') self.parser.add_argument('-a', '--algorithms', type=self.comma_separated_ints_set, action='store', metavar=',[...]', help='Support only the specified DNSSEC algorithm(s)') self.parser.add_argument('-d', '--digest-algorithms', type=self.comma_separated_ints_set, action='store', metavar=',[...]', help='Support only the specified DNSSEC digest algorithm(s)') self.parser.add_argument('--ignore-rfc8624', const=True, default=False, action='store_const', help='Ignore errors associated with RFC 8624, DNSSEC algorithm implementation requirements') self.parser.add_argument('--ignore-rfc9276', const=True, default=False, action='store_const', help='Ignore errors associated with RFC 9276, NSEC3 parameter settings') self.parser.add_argument('-C', '--enforce-cookies', const=True, default=False, action='store_const', help='Enforce DNS cookies strictly') self.parser.add_argument('-P', '--allow-private', const=True, default=False, action='store_const', help='Allow private IP addresses for authoritative DNS servers') self.parser.add_argument('--trust-cdnskey-cds', const=True, default=False, action='store_const', help='Trust all CDNSKEY/CDS records') self.parser.add_argument('--multi-signer', const=True, default=False, action='store_const', help='Don\'t issue errors for missing KSKs with DS RRs (e.g., for multi-signer setups)') self.parser.add_argument('-R', '--rr-types', type=self.comma_separated_dns_types, action='store', metavar=',[...]', help='Process queries of only the specified type(s)') self.parser.add_argument('-O', '--derive-filename', const=True, default=False, action='store_const', help='Derive the filename(s) from domain name(s)') self.parser.add_argument('-o', '--output-file', type=argparse.FileType('wb'), default=stdout_buffer, action='store', metavar='', help='Save the output to the specified file') self.parser.add_argument('domain_name', type=self.valid_domain_name, action='store', nargs='*', metavar='', help='Domain names') self._arg_mapping = dict([(a.dest, '/'.join(a.option_strings)) for a in self.parser._actions]) def parse_args(self, args): self.args = self.parser.parse_args(args) @classmethod def comma_separated_dns_types(cls, arg): rdtypes = [] arg = arg.strip() if not arg: return rdtypes for r in arg.split(','): try: rdtypes.append(dns.rdatatype.from_text(r.strip())) except dns.rdatatype.UnknownRdatatype: raise argparse.ArgumentTypeError('Invalid resource record type: %s' % (r)) return rdtypes @classmethod def comma_separated_ints_set(cls, arg): return set(cls.comma_separated_ints(arg)) @classmethod def comma_separated_ints(cls, arg): ints = [] arg = arg.strip() if not arg: return ints for i in arg.split(','): try: ints.append(int(i.strip())) except ValueError: raise argparse.ArgumentTypeError('Invalid integer: %s' % (i)) return ints @classmethod def valid_domain_name(cls, arg): try: return dns.name.from_text(arg) except dns.exception.DNSException: raise argparse.ArgumentTypeError('Invalid domain name: "%s"' % arg) def check_args(self): if self.args.names_file and self.args.domain_name: raise argparse.ArgumentTypeError('If %(names_file)s is used, then domain names may not supplied as command line arguments.' % \ self._arg_mapping) if self.args.derive_filename and self.args.output_file.fileno() != sys.stdout.fileno(): raise argparse.ArgumentTypeError('The %(derive_filename)s and %(output_file)s options may not be used together.' % \ self._arg_mapping) def set_buffers(self): # This entire method is for # python3/python2 dual compatibility if self.args.input_file is not None: if self.args.input_file.fileno() == sys.stdin.fileno(): filename = self.args.input_file.fileno() else: filename = self.args.input_file.name self.args.input_file.close() self.args.input_file = io.open(filename, 'r', encoding='utf-8') if self.args.names_file is not None: if self.args.names_file.fileno() == sys.stdin.fileno(): filename = self.args.names_file.fileno() else: filename = self.args.names_file.name self.args.names_file.close() self.args.names_file = io.open(filename, 'r', encoding='utf-8') if self.args.trusted_keys_file is not None: trusted_keys_files = [] for tk_file in self.args.trusted_keys_file: if tk_file.fileno() == sys.stdin.fileno(): filename = tk_file.fileno() else: filename = tk_file.name tk_file.close() trusted_keys_files.append(io.open(filename, 'r', encoding='utf-8')) self.args.trusted_keys_file = trusted_keys_files if self.args.output_file is not None: if self.args.output_file.fileno() == sys.stdout.fileno(): filename = self.args.output_file.fileno() else: filename = self.args.output_file.name self.args.output_file.close() self.args.output_file = io.open(filename, 'wb') def aggregate_trusted_key_info(self): if not self.args.trusted_keys_file: return self.trusted_keys = [] for fh in self.args.trusted_keys_file: tk_str = fh.read() try: self.trusted_keys.extend(get_trusted_keys(tk_str)) except dns.exception.DNSException: raise argparse.ArgumentTypeError('There was an error parsing the trusted keys file: "%s"' % \ self._arg_mapping) def update_trusted_key_info(self, latest_analysis_date): if self.args.trusted_keys_file is None: self.trusted_keys = get_default_trusted_keys(latest_analysis_date) def ingest_input(self): analysis_str = self.args.input_file.read() if not analysis_str: if self.args.input_file.fileno() != sys.stdin.fileno(): raise AnalysisInputError('No input') else: raise AnalysisInputError() try: self.analysis_structured = json.loads(analysis_str) except ValueError: raise AnalysisInputError('There was an error parsing the JSON input: "%s"' % self.args.input_file.name) # check version if '_meta._dnsviz.' not in self.analysis_structured or 'version' not in self.analysis_structured['_meta._dnsviz.']: raise AnalysisInputError('No version information in JSON input: "%s"' % self.args.input_file.name) try: major_vers, minor_vers = [int(x) for x in str(self.analysis_structured['_meta._dnsviz.']['version']).split('.', 1)] except ValueError: raise AnalysisInputError('Version of JSON input is invalid: %s' % self.analysis_structured['_meta._dnsviz.']['version']) # ensure major version is a match and minor version is no greater # than the current minor version curr_major_vers, curr_minor_vers = [int(x) for x in str(DNS_RAW_VERSION).split('.', 1)] if major_vers != curr_major_vers or minor_vers > curr_minor_vers: raise AnalysisInputError('Version %d.%d of JSON input is incompatible with this software.' % (major_vers, minor_vers)) def ingest_names(self): self.names = OrderedDict() if self.args.domain_name: for name in self.args.domain_name: if name not in self.names: self.names[name] = None return if self.args.names_file: args = self.args.names_file else: try: args = self.analysis_structured['_meta._dnsviz.']['names'] except KeyError: raise AnalysisInputError('No names found in JSON input!') for arg in args: name = arg.strip() # python3/python2 dual compatibility if hasattr(name, 'decode'): name = name.decode('utf-8') try: name = dns.name.from_text(name) except UnicodeDecodeError as e: self._logger.error('%s: "%s"' % (e, name)) except dns.exception.DNSException: self._logger.error('The domain name was invalid: "%s"' % name) else: if name not in self.names: self.names[name] = None def build_helper(logger, cmd, subcmd): arghelper = PrintArgHelper(logger) arghelper.build_parser('%s %s' % (cmd, subcmd)) return arghelper def main(argv): try: test_pygraphviz() arghelper = build_helper(logger, sys.argv[0], argv[0]) arghelper.parse_args(argv[1:]) logger.setLevel(logging.WARNING) try: arghelper.check_args() arghelper.set_buffers() arghelper.aggregate_trusted_key_info() arghelper.ingest_input() arghelper.ingest_names() except argparse.ArgumentTypeError as e: arghelper.parser.error(str(e)) except AnalysisInputError as e: s = str(e) if s: logger.error(s) sys.exit(3) latest_analysis_date = None name_objs = [] cache = {} for name in arghelper.names: name_str = lb2s(name.canonicalize().to_text()) if name_str not in arghelper.analysis_structured or arghelper.analysis_structured[name_str].get('stub', True): logger.error('The analysis of "%s" was not found in the input.' % lb2s(name.to_text())) continue name_obj = deserialize(name, arghelper.analysis_structured, cache, strict_cookies=arghelper.args.enforce_cookies, allow_private=arghelper.args.allow_private) name_objs.append(name_obj) if latest_analysis_date is None or latest_analysis_date > name_obj.analysis_end: latest_analysis_date = name_obj.analysis_end if not name_objs: sys.exit(4) arghelper.update_trusted_key_info(latest_analysis_date) G = DNSAuthGraph() ignore_qnames_rdtypes = set() for name_obj in name_objs: name_obj.populate_status(arghelper.trusted_keys, supported_algs=arghelper.args.algorithms, supported_digest_algs=arghelper.args.digest_algorithms, ignore_rfc8624=arghelper.args.ignore_rfc8624, ignore_rfc9276=arghelper.args.ignore_rfc9276, trust_cdnskey_cds=arghelper.args.trust_cdnskey_cds, multi_signer=arghelper.args.multi_signer) has_warnings, has_errors = name_obj.queries_with_errors_warnings() has_warnings_or_errors = has_warnings.union(has_errors) for qname, rdtype in name_obj.queries: if arghelper.args.rr_types is None: # if rdtypes was not specified, then graph all, with some # exceptions if name_obj.is_zone() and rdtype in (dns.rdatatype.DNSKEY, dns.rdatatype.DS, dns.rdatatype.DLV): continue # Ignore some data types unless they are explicitly # requested or there are warnings or errors associated with # them. if rdtype in IGNORE_UNLESS_ERRORS and (qname, rdtype) not in has_warnings_or_errors: ignore_qnames_rdtypes.add((qname, rdtype)) continue else: # if rdtypes was specified, then only graph rdtypes that # were specified if qname != name_obj.name or rdtype not in arghelper.args.rr_types: continue G.graph_rrset_auth(name_obj, qname, rdtype) # Also graph the NODATA query for the zone name, if applicable if not name_obj.is_zone() and name_obj.zone is not None: z_has_warnings, z_has_errors = name_obj.zone.queries_with_errors_warnings(ExistingCovered) z_has_warnings_or_errors = z_has_warnings.union(z_has_errors) if (name_obj.zone.nxrrset_name, name_obj.zone.nxrrset_rdtype) in z_has_warnings_or_errors: G.graph_rrset_auth(name_obj.zone, name_obj.zone.nxrrset_name, name_obj.zone.nxrrset_rdtype) if arghelper.args.rr_types is not None: for rdtype in arghelper.args.rr_types: if (name_obj.name, rdtype) not in name_obj.queries: logger.error('No query for "%s/%s" was included in the analysis.' % (lb2s(name_obj.name.to_text()), dns.rdatatype.to_text(rdtype))) if arghelper.args.derive_filename: if name_obj.name == dns.name.root: name = 'root' else: name = lb2s(name_obj.name.canonicalize().to_text()).rstrip('.') name = name.replace(os.sep, '--') finish_graph(G, [name_obj], arghelper.args.rr_types, ignore_qnames_rdtypes, arghelper.trusted_keys, arghelper.args.algorithms, '%s.txt' % name, arghelper.args.show_colors) G = DNSAuthGraph() if not arghelper.args.derive_filename: finish_graph(G, name_objs, arghelper.args.rr_types, ignore_qnames_rdtypes, arghelper.trusted_keys, arghelper.args.algorithms, arghelper.args.output_file.fileno(), arghelper.args.show_colors) except KeyboardInterrupt: logger.error('Interrupted.') sys.exit(4) if __name__ == "__main__": main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/commands/probe.py0000644000175000017500000020216215001472663016747 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import argparse import atexit import binascii import codecs import errno import getopt import io import json import logging import multiprocessing import multiprocessing.managers import platform import os import random import re import shutil import signal import socket import struct import subprocess import sys import tempfile import threading import time # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # python3/python2 dual compatibility try: import urllib.parse except ImportError: import urlparse else: urlparse = urllib.parse import dns.edns, dns.exception, dns.message, dns.name, dns.rdata, dns.rdataclass, dns.rdatatype, dns.rdtypes.ANY.NS, dns.rdtypes.IN.A, dns.rdtypes.IN.AAAA, dns.resolver, dns.rrset from dnsviz.analysis import COOKIE_STANDIN, WILDCARD_EXPLICIT_DELEGATION, PrivateAnalyst, PrivateRecursiveAnalyst, OnlineDomainNameAnalysis, NetworkConnectivityException, DNS_RAW_VERSION from dnsviz.config import RESOLV_CONF import dnsviz.format as fmt from dnsviz.ipaddr import IPAddr from dnsviz.query import DiagnosticQuery, QuickDNSSECQuery, StandardRecursiveQueryCD from dnsviz.resolver import DNSAnswer, Resolver, ResolvConfError, PrivateFullResolver from dnsviz import transport from dnsviz.util import get_client_address, get_root_hints lb2s = fmt.latin1_binary_to_string logging.basicConfig(level=logging.WARNING, format='%(message)s') logger = logging.getLogger() # this needs to be global because of multiprocessing tm = None th_factories = None resolver = None explicit_delegations = None odd_ports = None A_ROOT_IPV4 = IPAddr('198.41.0.4') A_ROOT_IPV6 = IPAddr('2001:503:ba3e::2:30') class MissingExecutablesError(Exception): pass class ZoneFileServiceError(Exception): pass class AnalysisInputError(Exception): pass class CustomQueryMixin(object): edns_options = [] #XXX this is a hack required for inter-process sharing of dns.name.Name # instances using multiprocess def _setattr_dummy(self, name, value): return super(dns.name.Name, self).__setattr__(name, value) dns.name.Name.__setattr__ = _setattr_dummy def _raise_eof(signum, frame): # EOFError is raised instead of KeyboardInterrupt # because the multiprocessing worker doesn't handle # KeyboardInterrupt raise EOFError def _init_tm(): global tm tm = transport.DNSQueryTransportManager() def _cleanup_tm(): global tm if tm is not None: tm.close() tm = None def _init_stub_resolver(): global resolver servers = set() for rdata in explicit_delegations[(WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS)]: for rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): if (rdata.target, rdtype) in explicit_delegations: servers.update([IPAddr(r.address) for r in explicit_delegations[(rdata.target, rdtype)]]) resolver = Resolver(list(servers), StandardRecursiveQueryCD, transport_manager=tm) def _init_full_resolver(): global resolver quick_query = QuickDNSSECQuery.add_mixin(CustomQueryMixin).add_server_cookie(COOKIE_STANDIN) diagnostic_query = DiagnosticQuery.add_mixin(CustomQueryMixin).add_server_cookie(COOKIE_STANDIN) # now that we have the hints, make resolver a full resolver instead of a stub hints = get_root_hints() for key in explicit_delegations: hints[key] = explicit_delegations[key] resolver = PrivateFullResolver(hints, query_cls=(quick_query, diagnostic_query), odd_ports=odd_ports, cookie_standin=COOKIE_STANDIN, transport_manager=tm) def _init_interrupt_handler(): signal.signal(signal.SIGINT, _raise_eof) def _init_subprocess(use_full): _init_tm() if use_full: _init_full_resolver() else: _init_stub_resolver() _init_interrupt_handler() multiprocessing.util.Finalize(None, _cleanup_tm, exitpriority=0) def _analyze(args): (cls, name, rdclass, dlv_domain, try_ipv4, try_ipv6, client_ipv4, client_ipv6, query_class_mixin, ceiling, edns_diagnostics, \ stop_at_explicit, extra_rdtypes, explicit_only, cache, cache_level, cache_lock) = args if ceiling is not None and name.is_subdomain(ceiling): c = ceiling else: c = name try: a = cls(name, rdclass=rdclass, dlv_domain=dlv_domain, try_ipv4=try_ipv4, try_ipv6=try_ipv6, client_ipv4=client_ipv4, client_ipv6=client_ipv6, query_class_mixin=query_class_mixin, ceiling=c, edns_diagnostics=edns_diagnostics, explicit_delegations=explicit_delegations, stop_at_explicit=stop_at_explicit, odd_ports=odd_ports, extra_rdtypes=extra_rdtypes, explicit_only=explicit_only, analysis_cache=cache, cache_level=cache_level, analysis_cache_lock=cache_lock, transport_manager=tm, th_factories=th_factories, resolver=resolver) return a.analyze() # re-raise a KeyboardInterrupt, as this means we've been interrupted except KeyboardInterrupt: raise # report exceptions related to network connectivity except (NetworkConnectivityException, transport.RemoteQueryTransportError) as e: logger.error('Error analyzing %s: %s' % (fmt.humanize_name(name), e)) # don't report EOFError, as that is what is raised if there is a # KeyboardInterrupt in ParallelAnalyst except EOFError: pass except: logger.exception('Error analyzing %s' % fmt.humanize_name(name)) return None class BulkAnalyst(object): analyst_cls = PrivateAnalyst use_full_resolver = True def __init__(self, rdclass, try_ipv4, try_ipv6, client_ipv4, client_ipv6, query_class_mixin, ceiling, edns_diagnostics, stop_at_explicit, cache_level, extra_rdtypes, explicit_only, dlv_domain): self.rdclass = rdclass self.try_ipv4 = try_ipv4 self.try_ipv6 = try_ipv6 self.client_ipv4 = client_ipv4 self.client_ipv6 = client_ipv6 self.query_class_mixin = query_class_mixin self.ceiling = ceiling self.edns_diagnostics = edns_diagnostics self.stop_at_explicit = stop_at_explicit self.cache_level = cache_level self.extra_rdtypes = extra_rdtypes self.explicit_only = explicit_only self.dlv_domain = dlv_domain self.cache = {} self.cache_lock = threading.Lock() def _name_to_args_iter(self, names): for name in names: yield (self.analyst_cls, name, self.rdclass, self.dlv_domain, self.try_ipv4, self.try_ipv6, self.client_ipv4, self.client_ipv6, self.query_class_mixin, self.ceiling, self.edns_diagnostics, self.stop_at_explicit, self.extra_rdtypes, self.explicit_only, self.cache, self.cache_level, self.cache_lock) def analyze(self, names, flush_func=None): name_objs = [] for args in self._name_to_args_iter(names): name_obj = _analyze(args) if flush_func is not None: flush_func(name_obj) else: name_objs.append(name_obj) return name_objs class RecursiveBulkAnalyst(BulkAnalyst): analyst_cls = PrivateRecursiveAnalyst use_full_resolver = False class MultiProcessAnalystMixin(object): analysis_model = OnlineDomainNameAnalysis def _finalize_analysis_proper(self, name_obj): self.analysis_cache[name_obj.name] = name_obj super(MultiProcessAnalystMixin, self)._finalize_analysis_proper(name_obj) def _finalize_analysis_all(self, name_obj): self.analysis_cache[name_obj.name] = name_obj super(MultiProcessAnalystMixin, self)._finalize_analysis_all(name_obj) def refresh_dependency_references(self, name_obj, trace=None): if trace is None: trace = [] if name_obj.name in trace: return if name_obj.parent is not None: self.refresh_dependency_references(name_obj.parent, trace+[name_obj.name]) if name_obj.nxdomain_ancestor is not None: self.refresh_dependency_references(name_obj.nxdomain_ancestor, trace+[name_obj.name]) if name_obj.dlv_parent is not None: self.refresh_dependency_references(name_obj.dlv_parent, trace+[name_obj.name]) # loop until all deps have been added for cname in name_obj.cname_targets: for target in name_obj.cname_targets[cname]: while name_obj.cname_targets[cname][target] is None: try: name_obj.cname_targets[cname][target] = self.analysis_cache[target] except KeyError: time.sleep(1) self.refresh_dependency_references(name_obj.cname_targets[cname][target], trace+[name_obj.name]) for signer in name_obj.external_signers: while name_obj.external_signers[signer] is None: try: name_obj.external_signers[signer] = self.analysis_cache[signer] except KeyError: time.sleep(1) self.refresh_dependency_references(name_obj.external_signers[signer], trace+[name_obj.name]) if self.follow_ns: for ns in name_obj.ns_dependencies: while name_obj.ns_dependencies[ns] is None: try: name_obj.ns_dependencies[ns] = self.analysis_cache[ns] except KeyError: time.sleep(1) self.refresh_dependency_references(name_obj.ns_dependencies[ns], trace+[name_obj.name]) if self.follow_mx: for target in name_obj.mx_targets: while name_obj.mx_targets[target] is None: try: name_obj.mx_targets[target] = self.analysis_cache[target] except KeyError: time.sleep(1) self.refresh_dependency_references(name_obj.mx_targets[target], trace+[name_obj.name]) def analyze(self): name_obj = super(MultiProcessAnalystMixin, self).analyze() if not self.trace: self.refresh_dependency_references(name_obj) return name_obj class MultiProcessAnalyst(MultiProcessAnalystMixin, PrivateAnalyst): pass class RecursiveMultiProcessAnalyst(MultiProcessAnalystMixin, PrivateRecursiveAnalyst): pass class ParallelAnalystMixin(object): analyst_cls = MultiProcessAnalyst use_full_resolver = None def __init__(self, rdclass, try_ipv4, try_ipv6, client_ipv4, client_ipv6, query_class_mixin, ceiling, edns_diagnostics, stop_at_explicit, cache_level, extra_rdtypes, explicit_only, dlv_domain, processes): super(ParallelAnalystMixin, self).__init__(rdclass, try_ipv4, try_ipv6, client_ipv4, client_ipv6, query_class_mixin, ceiling, edns_diagnostics, stop_at_explicit, cache_level, extra_rdtypes, explicit_only, dlv_domain) self.manager = multiprocessing.managers.SyncManager() self.manager.start() self.processes = processes self.cache = self.manager.dict() self.cache_lock = self.manager.Lock() def analyze(self, names, flush_func=None): results = [] name_objs = [] pool = multiprocessing.Pool(self.processes, _init_subprocess, (self.use_full_resolver,)) try: for args in self._name_to_args_iter(names): results.append(pool.apply_async(_analyze, (args,))) # loop instead of just joining, so we can check for interrupt at # main process for result in results: name_objs.append(result.get()) except KeyboardInterrupt: pool.terminate() raise pool.close() pool.join() return name_objs class ParallelAnalyst(ParallelAnalystMixin, BulkAnalyst): analyst_cls = MultiProcessAnalyst use_full_resolver = True class RecursiveParallelAnalyst(ParallelAnalystMixin, RecursiveBulkAnalyst): analyst_cls = RecursiveMultiProcessAnalyst use_full_resolver = False class ZoneFileToServe: _next_free_port = 50053 NAMED = 'named' NAMED_CHECKCONF = 'named-checkconf' NAMED_CONF = '%(dir)s/named.conf' NAMED_PID = '%(dir)s/named.pid' NAMED_LOG = '%(dir)s/named.log' NAMED_CONF_TEMPLATE = ''' options { directory "%(dir)s"; pid-file "%(named_pid)s"; listen-on port %(port)d { 127.0.0.1; }; listen-on-v6 port %(port)d { ::1; }; recursion no; notify no; dnssec-validation no; }; controls {}; zone "%(zone_name)s" { type master; file "%(zone_file)s"; }; logging { channel info_file { file "%(named_log)s"; severity debug; }; category default { info_file; }; category unmatched { null; }; }; ''' ZONEFILE_TEMPLATE_PRE = ''' $ORIGIN %(zone_name)s $TTL 600 @ IN SOA localhost. root.localhost. 1 1800 900 86400 600 @ IN NS @ ''' ZONEFILE_TEMPLATE_A = '@ IN A 127.0.0.1\n' ZONEFILE_TEMPLATE_AAAA = '@ IN AAAA ::1\n' ERROR_RE = re.compile(r'permission\s+denied|error:', re.IGNORECASE) def __init__(self, domain, filename): self.domain = domain self.filename = filename self.port = self._next_free_port self.__class__._next_free_port += 1 self.working_dir = None self.pid = None @classmethod def from_mappings(cls, domain, mappings, use_ipv6_loopback): zonefile = tempfile.NamedTemporaryFile('w', prefix='dnsviz', delete=False) atexit.register(os.remove, zonefile.name) args = { 'zone_name': lb2s(domain.to_text()) } if use_ipv6_loopback: zonefile_template = cls.ZONEFILE_TEMPLATE_PRE + cls.ZONEFILE_TEMPLATE_AAAA else: zonefile_template = cls.ZONEFILE_TEMPLATE_PRE + cls.ZONEFILE_TEMPLATE_A zonefile_contents = zonefile_template % args zonefile.write(zonefile_contents) for name, rdtype in mappings: if not name.is_subdomain(domain): continue zonefile.write(mappings[(name, rdtype)].to_text() + '\n') zonefile.close() return cls(domain, zonefile.name) def _cleanup_process(self): if self.pid is not None: try: os.kill(self.pid, signal.SIGTERM) except OSError: pass else: time.sleep(1.0) try: os.kill(self.pid, signal.SIGKILL) except OSError: pass if self.working_dir is not None: shutil.rmtree(self.working_dir) def serve(self): self.working_dir = tempfile.mkdtemp(prefix='dnsviz') env = { 'PATH': '%s:/sbin:/usr/sbin:/usr/local/sbin' % (os.environ.get('PATH', '')) } args = { 'dir': self.working_dir, 'port': self.port, 'zone_name': lb2s(self.domain.to_text()), 'zone_file': os.path.abspath(self.filename) } args['named_conf'] = self.NAMED_CONF % args args['named_pid'] = self.NAMED_PID % args args['named_log'] = self.NAMED_LOG % args named_conf_contents = self.NAMED_CONF_TEMPLATE % args io.open(args['named_conf'], 'w', encoding='utf-8').write(named_conf_contents) try: p = subprocess.Popen([self.NAMED_CHECKCONF, '-z', args['named_conf']], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) except OSError as e: self._cleanup_process() raise MissingExecutablesError('The options used require %s. Please ensure that it is installed and in PATH (%s).' % (self.NAMED_CHECKCONF, e)) (stdout, stderr) = p.communicate() if False: #p.returncode != 0: stdout = stdout.decode('utf-8') self._cleanup_process() raise ZoneFileServiceError('There was a problem with the zone file for "%s":\n%s' % (args['zone_name'], stdout)) named_cmd_without_debug = [self.NAMED, '-c', args['named_conf']] named_cmd_with_debug = named_cmd_without_debug + ['-g'] started = False for named_cmd in (named_cmd_without_debug, named_cmd_with_debug): try: p = subprocess.Popen(named_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) except OSError as e: self._cleanup_process() raise MissingExecutablesError('The options used require %s. Please ensure that it is installed and in PATH (%s).' % (self.NAMED, e)) (stdout, stderr) = p.communicate() if p.returncode == 0: started = True break stdout = stdout.decode('utf-8') try: with io.open(args['named_log'], 'r', encoding='utf-8') as fh: log = fh.read() except IOError as e: log = '' if not log: log = stdout if log: break if not started: newlog = '' lines = log.splitlines() for line in lines: if self.ERROR_RE.search(line): newlog += line + '\n' self._cleanup_process() raise ZoneFileServiceError('There was an problem executing %s to serve the "%s" zone:\n%s' % (self.NAMED, args['zone_name'], newlog)) try: with io.open(args['named_pid'], 'r', encoding='utf-8') as fh: self.pid = int(fh.read()) except (IOError, ValueError) as e: self._cleanup_process() raise ZoneFileServiceError('There was an problem detecting the process ID for %s: %s' % (self.NAMED, e)) atexit.register(self._cleanup_process) class NameServerMappingsForDomain(object): PORT_RE = re.compile(r'^(.*):(\d+)$') BRACKETS_RE = re.compile(r'^\[(.*)\]$') DEFAULT_PORT = 53 DYN_LABEL = '_dnsviz' _allow_file = None _allow_name_only = None _allow_addr_only = None _allow_stop_at = None _handle_file_arg = None _resolvers_initialized = False _stub_resolver = None _full_resolver = None def __init__(self, domain, stop_at): if not (self._allow_file is not None and \ self._allow_name_only is not None and \ self._allow_addr_only is not None and \ self._allow_stop_at is not None and \ (not self._allow_file or self._handle_file_arg is not None)): raise NotImplemented if stop_at and not self._allow_stop_at: raise argparse.ArgumentTypeError('The "+" may not be specified with this option') self.domain = domain self._nsi = 1 self.delegation_mapping = {} self.stop_at = stop_at self.odd_ports = {} self.filename = None self.delegation_mapping[(self.domain, dns.rdatatype.NS)] = dns.rrset.RRset(self.domain, dns.rdataclass.IN, dns.rdatatype.NS) @classmethod def init_resolvers(cls): if not NameServerMappingsForDomain._resolvers_initialized: tm = transport.DNSQueryTransportManager() try: NameServerMappingsForDomain._stub_resolver = Resolver.from_file(RESOLV_CONF, StandardRecursiveQueryCD, transport_manager=tm) except ResolvConfError: pass NameServerMappingsForDomain._full_resolver = PrivateFullResolver(transport_manager=tm) NameServerMappingsForDomain._resolvers_initialized = True @classmethod def cleanup_resolvers(cls): NameServerMappingsForDomain._stub_resolver = None NameServerMappingsForDomain._full_resolver = None NameServerMappingsForDomain._resolvers_initialized = False @classmethod def _strip_port(cls, s): # Determine whether there is a port attached to the end match = cls.PORT_RE.search(s) if match is not None: s = match.group(1) port = int(match.group(2)) else: port = None return s, port def handle_list_arg(self, name_addr_arg): name_addr_arg = name_addr_arg.strip() # if the value is actually a path, then check it as a zone file if os.path.isfile(name_addr_arg): if not self._allow_file: raise argparse.ArgumentTypeError('A filename may not be specified with this option') self._handle_file_arg(name_addr_arg) else: self._handle_name_addr_list(name_addr_arg) def _handle_name_addr_list(self, name_addr_list): for name_addr in name_addr_list.split(','): self._handle_name_addr_mapping(name_addr) def _handle_name_no_addr(self, name, port): resolver = None self.init_resolvers() if self._stub_resolver is not None: resolver = self._stub_resolver else: resolver = self._full_resolver query_tuples = ((name, dns.rdatatype.A, dns.rdataclass.IN), (name, dns.rdatatype.AAAA, dns.rdataclass.IN)) answer_map = resolver.query_multiple_for_answer(*query_tuples) found_answer = False for (n, rdtype, rdclass) in answer_map: a = answer_map[(n, rdtype, rdclass)] if isinstance(a, DNSAnswer): found_answer = True if (name, rdtype) not in self.delegation_mapping: self.delegation_mapping[(name, rdtype)] = dns.rrset.RRset(name, dns.rdataclass.IN, rdtype) if rdtype == dns.rdatatype.A: rdtype_cls = dns.rdtypes.IN.A.A else: rdtype_cls = dns.rdtypes.IN.AAAA.AAAA for rdata in a.rrset: self.delegation_mapping[(name, rdtype)].add(rdtype_cls(dns.rdataclass.IN, rdtype, rdata.address)) if port is not None and port != self.DEFAULT_PORT: self.odd_ports[(self.domain, IPAddr(rdata.address))] = port # negative responses elif isinstance(a, (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer)): pass # error responses elif isinstance(a, (dns.exception.Timeout, dns.resolver.NoNameservers)): pass if not found_answer: raise argparse.ArgumentTypeError('"%s" could not be resolved to an address. Please specify an address or use a name that resolves properly.' % fmt.humanize_name(name)) def _handle_name_with_addr(self, name, addr, port): if addr.version == 6: rdtype = dns.rdatatype.AAAA rdtype_cls = dns.rdtypes.IN.AAAA.AAAA else: rdtype = dns.rdatatype.A rdtype_cls = dns.rdtypes.IN.A.A if (name, rdtype) not in self.delegation_mapping: self.delegation_mapping[(name, rdtype)] = dns.rrset.RRset(name, dns.rdataclass.IN, rdtype) self.delegation_mapping[(name, rdtype)].add(rdtype_cls(dns.rdataclass.IN, rdtype, addr)) if port is not None and port != self.DEFAULT_PORT: self.odd_ports[(self.domain, addr)] = port def _handle_name_addr_mapping(self, name_addr): name_addr = name_addr.strip() name, addr, port = self._parse_name_addr(name_addr) if not name and not self._allow_addr_only: raise argparse.ArgumentTypeError('A domain name must accompany the address') if not addr and not self._allow_name_only: raise argparse.ArgumentTypeError('An address must accompany the domain name name') name = self._format_name(name) addr = self._format_addr(addr) # Add the name to the NS RRset self.delegation_mapping[(self.domain, dns.rdatatype.NS)].add(dns.rdtypes.ANY.NS.NS(dns.rdataclass.IN, dns.rdatatype.NS, name)) if not addr: self._handle_name_no_addr(name, port) else: self._handle_name_with_addr(name, addr, port) def _create_name(self): # value is an address name = 'ns%d.%s.%s' % (self._nsi, self.DYN_LABEL, lb2s(self.domain.canonicalize().to_text())) self._nsi += 1 return name def _format_name(self, name): if name is None: name = self._create_name() try: name = dns.name.from_text(name) except dns.exception.DNSException: raise argparse.ArgumentTypeError('The domain name was invalid: "%s"' % name) return name def _format_addr(self, addr): if addr is not None: addr, num_sub = self.BRACKETS_RE.subn(r'\1', addr) try: addr = IPAddr(addr) except ValueError: raise argparse.ArgumentTypeError('The IP address was invalid: "%s"' % addr) if addr.version == 6 and num_sub < 1: raise argparse.ArgumentTypeError('Brackets are required around IPv6 addresses.') return addr def _parse_name_addr(self, name_addr): # 1. Strip an optional port off the end name_addr_orig = name_addr name_addr, port = self._strip_port(name_addr) # 2. Now determine whether the argument is a) a single value--either # name or addr--or b) a name-addr mapping try: name, addr = name_addr.split('=', 1) except ValueError: # a) Argument is either a name or an address, not a mapping; # Now, determine which it is. try: IPAddr(self.BRACKETS_RE.sub(r'\1', name_addr)) except ValueError: # a1. It is not a valid address. Maybe. See if the address # was valid with the port re-appended. try: IPAddr(self.BRACKETS_RE.sub(r'\1', name_addr_orig)) except ValueError: # a2. Even with the port, the address is not valid, so the # must be a name instead of an address. Validity of # the name will be checked later. name = name_addr addr = None else: # a3. When considering the address with the port, the # address is valid, so it is in fact an address. # Re-append the port to make the address valid, and # cancel the port. name = None addr = name_addr_orig port = None else: # a4. Value was a valid address. name = None addr = name_addr else: # b) Argument is a name-addr mapping. Now, determine whether # removing the port was the right thing. name = name.strip() addr = addr.strip() if port is None: addr_orig = addr else: addr_orig = '%s:%d' % (addr, port) try: IPAddr(self.BRACKETS_RE.sub(r'\1', addr)) except ValueError: # b1. Without the port, addr is not a valid address. See if # things change when we re-append the port. try: IPAddr(self.BRACKETS_RE.sub(r'\1', addr_orig)) except ValueError: # b2. Even with the port, the address is not valid, so it # doesn't matter if we leave the port on or off; # address invalidity will be reported later. pass else: # b3. When considering the address with the port, the # address is valid, so re-append the port to make the # address valid, and cancel the port. addr = addr_orig port = None else: # b4. Value was a valid address, so no need to do anything pass return name, addr, port def _set_filename(self, filename): self.filename = filename def _extract_delegation_info_from_file(self, filename): # if this is a file containing delegation records, then read the # file, create a name=value string, and call name_addrs_from_string() try: with io.open(filename, 'r', encoding='utf-8') as fh: file_contents = fh.read() except IOError as e: raise argparse.ArgumentTypeError('%s: "%s"' % (e.strerror, filename)) try: m = dns.message.from_text(str(';ANSWER\n' + file_contents)) except dns.exception.DNSException as e: raise argparse.ArgumentTypeError('Error reading delegation records from %s: "%s"' % (filename, e)) try: ns_rrset = m.find_rrset(m.answer, self.domain, dns.rdataclass.IN, dns.rdatatype.NS) except KeyError: raise argparse.ArgumentTypeError('No NS records for %s found in %s' % (lb2s(self.domain.canonicalize().to_text()), filename)) for rdata in ns_rrset: a_rrsets = [r for r in m.answer if r.name == rdata.target and r.rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA)] if not a_rrsets or not rdata.target.is_subdomain(self.domain.parent()): name_addr = lb2s(rdata.target.canonicalize().to_text()) else: for a_rrset in a_rrsets: for a_rdata in a_rrset: name_addr = '%s=[%s]' % (lb2s(rdata.target.canonicalize().to_text()), a_rdata.address) self._handle_name_addr_mapping(name_addr) class DelegationNameServerMappingsForDomain(NameServerMappingsForDomain): _allow_file = True _allow_name_only = False _allow_addr_only = False _allow_stop_at = False _handle_file_arg = NameServerMappingsForDomain._extract_delegation_info_from_file def __init__(self, *args, **kwargs): super(DelegationNameServerMappingsForDomain, self).__init__(*args, **kwargs) if self.domain == dns.name.root: raise argparse.ArgumentTypeError('The root domain may not specified with this option.') class AuthoritativeNameServerMappingsForDomain(NameServerMappingsForDomain): _allow_file = True _allow_name_only = True _allow_addr_only = True _allow_stop_at = True _handle_file_arg = NameServerMappingsForDomain._set_filename class RecursiveServersForDomain(NameServerMappingsForDomain): _allow_file = False _allow_name_only = True _allow_addr_only = True _allow_stop_at = False _handle_file_arg = None class DSForDomain: def __init__(self, domain, stop_at): self.domain = domain if stop_at and not self._allow_stop_at: raise argparse.ArgumentTypeError('The "+" may not be specified with this option') self.delegation_mapping = {} self.delegation_mapping[(self.domain, dns.rdatatype.DS)] = dns.rrset.RRset(self.domain, dns.rdataclass.IN, dns.rdatatype.DS) def _extract_ds_info_from_file(self, filename): # if this is a file containing delegation records, then read the # file, create a name=value string, and call name_addrs_from_string() try: with io.open(filename, 'r', encoding='utf-8') as fh: file_contents = fh.read() except IOError as e: raise argparse.ArgumentTypeError('%s: "%s"' % (e.strerror, filename)) try: m = dns.message.from_text(str(';ANSWER\n' + file_contents)) except dns.exception.DNSException as e: raise argparse.ArgumentTypeError('Error reading DS records from %s: "%s"' % (filename, e)) try: ds_rrset = m.find_rrset(m.answer, self.domain, dns.rdataclass.IN, dns.rdatatype.DS) except KeyError: raise argparse.ArgumentTypeError('No DS records for %s found in %s' % (lb2s(self.domain.canonicalize().to_text()), filename)) for rdata in ds_rrset: self.delegation_mapping[(self.domain, dns.rdatatype.DS)].add(rdata) def _handle_ds(self, ds): ds = ds.strip() try: self.delegation_mapping[(self.domain, dns.rdatatype.DS)].add(dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DS, ds)) except dns.exception.DNSException as e: raise argparse.ArgumentTypeError('Error parsing DS records: %s\n%s' % (e, ds)) def _handle_ds_list(self, ds_list): for ds in ds_list.split(','): self._handle_ds(ds) def handle_list_arg(self, ds_arg): ds_arg = ds_arg.strip() # if the value is actually a path, then check it as a zone file if os.path.isfile(ds_arg): self._extract_ds_info_from_file(ds_arg) else: self._handle_ds_list(ds_arg) class DomainListArgHelper: STOP_RE = re.compile(r'^(.*)\+$') @classmethod def _strip_stop_marker(cls, s): match = cls.STOP_RE.search(s) if match is not None: s = match.group(1) stop_at = True else: stop_at = False return s, stop_at def _parse_domain_list(self, domain_item_list): try: domain, item_list = domain_item_list.split(':', 1) except ValueError: raise argparse.ArgumentTypeError('Option expects both a domain and servers for that domain') domain = domain.strip() domain, stop_at = self._strip_stop_marker(domain) return domain, item_list, stop_at def _handle_domain_list_arg(self, cls, domain_list_arg): domain, list_arg, stop_at = self._parse_domain_list(domain_list_arg) if domain is not None: domain = domain.strip() try: domain = dns.name.from_text(domain) except dns.exception.DNSException: raise argparse.ArgumentTypeError('The domain name was invalid: "%s"' % domain) if list_arg is not None: list_arg = list_arg.strip() obj = cls(domain, stop_at) if list_arg: obj.handle_list_arg(list_arg) return obj def _handle_list_arg(self, cls, list_arg): obj = cls(WILDCARD_EXPLICIT_DELEGATION, False) obj.handle_list_arg(list_arg) return obj def delegation_name_server_mappings(self, arg): return self._handle_domain_list_arg(DelegationNameServerMappingsForDomain, arg) def authoritative_name_server_mappings(self, arg): return self._handle_domain_list_arg(AuthoritativeNameServerMappingsForDomain, arg) def recursive_servers_for_domain(self, arg): return self._handle_list_arg(RecursiveServersForDomain, arg) def ds_for_domain(self, arg): return self._handle_domain_list_arg(DSForDomain, arg) class ArgHelper: BRACKETS_RE = re.compile(r'^\[(.*)\]$') def __init__(self, logger): self.parser = None self.odd_ports = {} self.stop_at = {} self.explicit_delegations = {} self.ceiling = None self.explicit_only = None self.try_ipv4 = None self.try_ipv6 = None self.client_ipv4 = None self.client_ipv6 = None self.edns_diagnostics = None self.th_factories = None self.processes = None self.dlv_domain = None self.meta_only = None self.cache_level = None self.names = None self.analysis_structured = None self.args = None self._arg_mapping = None self._logger = logger self._zones_to_serve = [] def build_parser(self, prog): self.parser = argparse.ArgumentParser(description='Issue diagnostic DNS queries', prog=prog) helper = DomainListArgHelper() # python3/python2 dual compatibility stdout_buffer = io.open(sys.stdout.fileno(), 'wb', closefd=False) try: self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r', encoding='utf-8'), action='store', metavar='', help='Read names from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-f', '--names-file', type=argparse.FileType('r'), action='store', metavar='', help='Read names from a file') self.parser.add_argument('-d', '--debug', type=int, choices=range(4), default=2, action='store', metavar='', help='Set debug level') try: self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r', encoding='utf-8'), action='store', metavar='', help='Read diagnostic queries from a file') except TypeError: # this try/except is for # python3/python2 dual compatibility self.parser.add_argument('-r', '--input-file', type=argparse.FileType('r'), action='store', metavar='', help='Read diagnostic queries from a file') self.parser.add_argument('-t', '--threads', type=self.positive_int_not_darwin, default=1, action='store', metavar='', help='Use the specified number of threads for parallel queries') self.parser.add_argument('-4', '--ipv4', const=True, default=False, action='store_const', help='Use IPv4 only') self.parser.add_argument('-6', '--ipv6', const=True, default=False, action='store_const', help='Use IPv6 only') self.parser.add_argument('-b', '--source-ip', type=self.bindable_ip, default=[], action='append', metavar='
', help='Use the specified source IPv4 or IPv6 address for queries') self.parser.add_argument('-u', '--looking-glass-url', type=self.valid_url, action='append', metavar='', help='Issue queries through the DNS looking glass at the specified URL') self.parser.add_argument('-k', '--insecure', const=True, default=False, action='store_const', help='Do not verify the TLS certificate for a DNS looking glass using HTTPS') self.parser.add_argument('-a', '--ancestor', type=self.valid_domain_name, default=None, action='store', metavar='', help='Query the ancestry of each domain name through the specified ancestor') self.parser.add_argument('-R', '--rr-types', type=self.comma_separated_dns_types, action='store', metavar=',[...]', help='Issue queries for only the specified type(s) during analysis') self.parser.add_argument('-s', '--recursive-servers', type=helper.recursive_servers_for_domain, default=[], action='append', metavar='[,...]', help='Query the specified recursive server(s)') self.parser.add_argument('-A', '--authoritative-analysis', const=True, default=False, action='store_const', help='Query authoritative servers, instead of recursive servers') self.parser.add_argument('-x', '--authoritative-servers', type=helper.authoritative_name_server_mappings, default=[], action='append', metavar='[+]:[,...]', help='Query the specified authoritative servers for a domain') self.parser.add_argument('-N', '--delegation-information', type=helper.delegation_name_server_mappings, default=[], action='append', metavar=':[,...]', help='Use the specified delegation information for a domain') self.parser.add_argument('-D', '--ds', type=helper.ds_for_domain, default=[], action='append', metavar=':""[,""...]', help='Use the specified DS records for a domain') self.parser.add_argument('--no-nsid', const=True, default=False, action='store_const', help='Do not use the NSID EDNS option in queries') self.parser.add_argument('-e', '--client-subnet', type=self.ecs_option, action='store', metavar='[:]', help='Use the DNS client subnet option with the specified subnet and prefix length in queries') self.parser.add_argument('-c', '--cookie', type=self.dns_cookie_option, default=self.dns_cookie_rand(), action='store', metavar='', help='Use the specified DNS cookie value in queries') self.parser.add_argument('-E', '--edns', const=True, default=False, action='store_const', help='Issue queries to check EDNS compatibility') self.parser.add_argument('-o', '--output-file', type=argparse.FileType('wb'), default=stdout_buffer, action='store', metavar='', help='Save the output to the specified file') self.parser.add_argument('-p', '--pretty-output', const=True, default=False, action='store_const', help='Format JSON output with indentation and newlines') self.parser.add_argument('domain_name', type=self.valid_domain_name, action='store', nargs='*', metavar='', help='Domain names') self._arg_mapping = dict([(a.dest, '/'.join(a.option_strings)) for a in self.parser._actions]) def parse_args(self, args): self.args = self.parser.parse_args(args) @classmethod def positive_int_not_darwin(cls, arg): val = cls.positive_int(arg) if platform.system().lower() == 'darwin': msg = "Using more than one thread is not currently supported on MacOS." raise argparse.ArgumentTypeError(msg) return val @classmethod def positive_int(cls, arg): try: val = int(arg) except ValueError: msg = "The argument must be a positive integer: %s" % val raise argparse.ArgumentTypeError(msg) else: if val < 1: msg = "The argument must be a positive integer: %d" % val raise argparse.ArgumentTypeError(msg) return val @classmethod def bindable_ip(cls, arg): try: addr = IPAddr(cls.BRACKETS_RE.sub(r'\1', arg)) except ValueError: raise argparse.ArgumentTypeError('The IP address was invalid: "%s"' % arg) if addr.version == 4: fam = socket.AF_INET else: fam = socket.AF_INET6 try: s = socket.socket(fam) s.bind((addr, 0)) except socket.error as e: if e.errno == errno.EADDRNOTAVAIL: raise argparse.ArgumentTypeError('Cannot bind to specified IP address: "%s"' % addr) finally: try: s.close() except: pass return addr @classmethod def valid_url(cls, arg): url = urlparse.urlparse(arg) if url.scheme not in ('http', 'https', 'ws', 'ssh'): raise argparse.ArgumentTypeError('Unsupported URL scheme: "%s"' % url.scheme) # check that version is >= 2.7.9 if HTTPS is requested if url.scheme == 'https': vers0, vers1, vers2 = sys.version_info[:3] if (2, 7, 9) > (vers0, vers1, vers2): raise argparse.ArgumentTypeError('Python version >= 2.7.9 is required to use a DNS looking glass with HTTPS.') elif url.scheme == 'ws': if url.hostname is not None: raise argparse.ArgumentTypeError('WebSocket URL must designate a local UNIX domain socket.') return arg @classmethod def comma_separated_dns_types(cls, arg): rdtypes = [] arg = arg.strip() if not arg: return rdtypes for r in arg.split(','): try: rdtypes.append(dns.rdatatype.from_text(r.strip())) except dns.rdatatype.UnknownRdatatype: raise argparse.ArgumentTypeError('Invalid resource record type: %s' % (r)) return rdtypes @classmethod def valid_domain_name(cls, arg): # python3/python2 dual compatibility if isinstance(arg, bytes): arg = codecs.decode(arg, sys.getfilesystemencoding()) try: return dns.name.from_text(arg) except dns.exception.DNSException: raise argparse.ArgumentTypeError('Invalid domain name: "%s"' % arg) @classmethod def nsid_option(cls): return dns.edns.GenericOption(dns.edns.NSID, b'') @classmethod def ecs_option(cls, arg): try: addr, prefix_len = arg.split('/', 1) except ValueError: addr = arg prefix_len = None try: addr = IPAddr(addr) except ValueError: raise argparse.ArgumentTypeError('The IP address was invalid: "%s"' % addr) if addr.version == 4: addrlen = 4 family = 1 else: addrlen = 16 family = 2 if prefix_len is None: prefix_len = addrlen << 3 else: try: prefix_len = int(prefix_len) except ValueError: raise argparse.ArgumentTypeError('The prefix length was invalid: "%s"' % prefix_len) if prefix_len < 0 or prefix_len > (addrlen << 3): raise argparse.ArgumentTypeError('The prefix length was invalid: "%d"' % prefix_len) bytes_masked, remainder = divmod(prefix_len, 8) wire = struct.pack(b'!H', family) wire += struct.pack(b'!B', prefix_len) wire += struct.pack(b'!B', 0) wire += addr._ipaddr_bytes[:bytes_masked] if remainder: # python3/python2 dual compatibility byte = addr._ipaddr_bytes[bytes_masked] if isinstance(addr._ipaddr_bytes, str): byte = ord(byte) mask = ~(2**(8 - remainder)-1) wire += struct.pack('B', mask & byte) return dns.edns.GenericOption(8, wire) @classmethod def dns_cookie_option(cls, arg): if not arg: return None try: cookie = binascii.unhexlify(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError('The DNS cookie provided was not valid hexadecimal: "%s"' % arg) if len(cookie) != 8: raise argparse.ArgumentTypeError('The DNS client cookie provided had a length of %d, but only a length of %d is valid .' % (len(cookie), 8)) return dns.edns.GenericOption(10, cookie) @classmethod def dns_cookie_rand(cls): r = random.getrandbits(64) cookie = struct.pack(b'Q', r) return cls.dns_cookie_option(binascii.hexlify(cookie)) def aggregate_delegation_info(self): localhost = dns.name.from_text('localhost') try: self.bindable_ip('::1') except argparse.ArgumentTypeError: use_ipv6_loopback = False loopback = IPAddr('127.0.0.1') loopback_rdtype = dns.rdatatype.A loopback_rdtype_cls = dns.rdtypes.IN.A.A else: use_ipv6_loopback = True loopback = IPAddr('::1') loopback_rdtype = dns.rdatatype.AAAA loopback_rdtype_cls = dns.rdtypes.IN.AAAA.AAAA self.rdclass = dns.rdataclass.IN for arg in self.args.recursive_servers + self.args.authoritative_servers: zone_name = arg.domain for name, rdtype in arg.delegation_mapping: if (name, rdtype) not in self.explicit_delegations: self.explicit_delegations[(name, rdtype)] = arg.delegation_mapping[(name, rdtype)] else: self.explicit_delegations[(name, rdtype)].update(arg.delegation_mapping[(name, rdtype)]) self.odd_ports.update(arg.odd_ports) self.stop_at[arg.domain] = arg.stop_at if arg.filename is not None: zone = ZoneFileToServe(arg.domain, arg.filename) self._zones_to_serve.append(zone) self.explicit_delegations[(zone_name, dns.rdatatype.NS)].add(dns.rdtypes.ANY.NS.NS(dns.rdataclass.IN, dns.rdatatype.NS, localhost)) self.explicit_delegations[(localhost, loopback_rdtype)] = dns.rrset.RRset(localhost, dns.rdataclass.IN, loopback_rdtype) self.explicit_delegations[(localhost, loopback_rdtype)].add(loopback_rdtype_cls(dns.rdataclass.IN, loopback_rdtype, loopback)) self.odd_ports[(zone_name, loopback)] = zone.port delegation_info_by_zone = OrderedDict() for arg in self.args.ds + self.args.delegation_information: zone_name = arg.domain.parent() if (zone_name, dns.rdatatype.NS) in self.explicit_delegations: raise argparse.ArgumentTypeError('Cannot use "' + lb2s(zone_name.to_text()) + '" with %(authoritative_servers)s if a child zone is specified with %(delegation_information)s' % self._arg_mapping) if zone_name not in delegation_info_by_zone: delegation_info_by_zone[zone_name] = {} for name, rdtype in arg.delegation_mapping: if (name, rdtype) not in delegation_info_by_zone[zone_name]: delegation_info_by_zone[zone_name][(name, rdtype)] = arg.delegation_mapping[(name, rdtype)] else: delegation_info_by_zone[zone_name][(name, rdtype)].update(arg.delegation_mapping[(name, rdtype)]) for zone_name in delegation_info_by_zone: zone = ZoneFileToServe.from_mappings(zone_name, delegation_info_by_zone[zone_name], use_ipv6_loopback) self._zones_to_serve.append(zone) self.explicit_delegations[(zone_name, dns.rdatatype.NS)] = dns.rrset.RRset(zone_name, dns.rdataclass.IN, dns.rdatatype.NS) self.explicit_delegations[(zone_name, dns.rdatatype.NS)].add(dns.rdtypes.ANY.NS.NS(dns.rdataclass.IN, dns.rdatatype.NS, localhost)) self.explicit_delegations[(localhost, loopback_rdtype)] = dns.rrset.RRset(localhost, dns.rdataclass.IN, loopback_rdtype) self.explicit_delegations[(localhost, loopback_rdtype)].add(loopback_rdtype_cls(dns.rdataclass.IN, loopback_rdtype, loopback)) self.odd_ports[(zone_name, loopback)] = zone.port self.stop_at[zone_name] = True def populate_recursive_servers(self): if not self.args.authoritative_analysis and not self.args.recursive_servers: try: resolver = Resolver.from_file(RESOLV_CONF, StandardRecursiveQueryCD, transport_manager=tm) except ResolvConfError: raise ResolvConfError('If servers are not specified with the %s option, then %s must have valid nameserver entries.' % \ (self._arg_mapping['recursive_servers'], RESOLV_CONF)) if (WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS) not in self.explicit_delegations: self.explicit_delegations[(WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS)] = dns.rrset.RRset(WILDCARD_EXPLICIT_DELEGATION, dns.rdataclass.IN, dns.rdatatype.NS) for i, server in enumerate(resolver._servers): if IPAddr(server).version == 6: rdtype = dns.rdatatype.AAAA else: rdtype = dns.rdatatype.A name = dns.name.from_text('ns%d' % i) self.explicit_delegations[(WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS)].add(dns.rdtypes.ANY.NS.NS(dns.rdataclass.IN, dns.rdatatype.NS, name)) if (name, rdtype) not in self.explicit_delegations: self.explicit_delegations[(name, rdtype)] = dns.rrset.RRset(name, dns.rdataclass.IN, rdtype) self.explicit_delegations[(name, rdtype)].add(dns.rdata.from_text(dns.rdataclass.IN, rdtype, server)) def check_args(self): if not self.args.names_file and not self.args.domain_name and not self.args.input_file: raise argparse.ArgumentTypeError('If no domain names are supplied as command-line arguments, then either %(input_file)s or %(names_file)s must be used.' % \ self._arg_mapping) if self.args.names_file and self.args.domain_name: raise argparse.ArgumentTypeError('If %(names_file)s is used, then domain names may not supplied as command line arguments.' % \ self._arg_mapping) if self.args.authoritative_analysis and self.args.recursive_servers: raise argparse.ArgumentTypeError('If %(authoritative_analysis)s is used, then %(recursive_servers)s cannot be used.' % \ self._arg_mapping) if self.args.authoritative_servers and not self.args.authoritative_analysis: raise argparse.ArgumentTypeError('%(authoritative_servers)s may only be used in conjunction with %(authoritative_analysis)s.' % \ self._arg_mapping) if self.args.delegation_information and not self.args.authoritative_analysis: raise argparse.ArgumentTypeError('%(delegation_information)s may only be used in conjunction with %(authoritative_analysis)s.' % \ self._arg_mapping) if self.args.ds and not self.args.delegation_information: raise argparse.ArgumentTypeError('%(ds)s may only be used in conjunction with %(delegation_information)s.' % \ self._arg_mapping) def set_kwargs(self): if self.args.ancestor is not None: self.ceiling = self.args.ancestor elif self.args.authoritative_analysis: self.ceiling = None else: self.ceiling = dns.name.root if self.args.rr_types is not None: self.explicit_only = True else: self.explicit_only = False # if both are specified or neither is specified, then they're both tried if (self.args.ipv4 and self.args.ipv6) or \ (not self.args.ipv4 and not self.args.ipv6): self.try_ipv4 = True self.try_ipv6 = True # if one or the other is specified, then only the one specified is # tried else: if self.args.ipv4: self.try_ipv4 = True self.try_ipv6 = False else: # self.args.ipv6 self.try_ipv4 = False self.try_ipv6 = True for ip in self.args.source_ip: if ip.version == 4: self.client_ipv4 = ip else: self.client_ipv6 = ip if self.args.looking_glass_url: self.th_factories = [] for looking_glass_url in self.args.looking_glass_url: url = urlparse.urlparse(looking_glass_url) if url.scheme in ('http', 'https'): self.th_factories.append(transport.DNSQueryTransportHandlerHTTPFactory(looking_glass_url, insecure=self.args.insecure)) elif url.scheme == 'ws': self.th_factories.append(transport.DNSQueryTransportHandlerWebSocketServerFactory(url.path)) elif url.scheme == 'ssh': self.th_factories.append(transport.DNSQueryTransportHandlerRemoteCmdFactory(looking_glass_url)) else: self.th_factories = None # the following options are not documented in usage, because they don't # apply to most users #if args.dlv is not None: # dlv_domain = args.dlv #else: # dlv_domain = None #try: # cache_level = int(opts['-C']) #except (KeyError, ValueError): # cache_level = None self.dlv_domain = None self.cache_level = None self.meta_only = None if self.args.client_subnet: CustomQueryMixin.edns_options.append(self.args.client_subnet) if not self.args.no_nsid: CustomQueryMixin.edns_options.append(self.nsid_option()) if self.args.cookie: CustomQueryMixin.edns_options.append(self.args.cookie) def set_buffers(self): # This entire method is for # python3/python2 dual compatibility if self.args.input_file is not None: if self.args.input_file.fileno() == sys.stdin.fileno(): filename = self.args.input_file.fileno() else: filename = self.args.input_file.name self.args.input_file.close() self.args.input_file = io.open(filename, 'r', encoding='utf-8') if self.args.names_file is not None: if self.args.names_file.fileno() == sys.stdin.fileno(): filename = self.args.names_file.fileno() else: filename = self.args.names_file.name self.args.names_file.close() self.args.names_file = io.open(filename, 'r', encoding='utf-8') if self.args.output_file is not None: if self.args.output_file.fileno() == sys.stdout.fileno(): filename = self.args.output_file.fileno() else: filename = self.args.output_file.name self.args.output_file.close() self.args.output_file = io.open(filename, 'wb') def check_network_connectivity(self): if self.args.authoritative_analysis: if self.try_ipv4 and get_client_address(A_ROOT_IPV4) is None: self._logger.warning('No global IPv4 connectivity detected') if self.try_ipv6 and get_client_address(A_ROOT_IPV6) is None: self._logger.warning('No global IPv6 connectivity detected') def get_log_level(self): if self.args.debug > 2: return logging.DEBUG elif self.args.debug > 1: return logging.INFO elif self.args.debug > 0: return logging.WARNING else: return logging.ERROR def ingest_input(self): if not self.args.input_file: return analysis_str = self.args.input_file.read() if not analysis_str: if self.args.input_file.fileno() != sys.stdin.fileno(): raise AnalysisInputError('No input') else: raise AnalysisInputError() try: self.analysis_structured = json.loads(analysis_str) except ValueError: raise AnalysisInputError('There was an error parsing the JSON input: "%s"' % self.args.input_file.name) # check version if '_meta._dnsviz.' not in self.analysis_structured or 'version' not in self.analysis_structured['_meta._dnsviz.']: raise AnalysisInputError('No version information in JSON input: "%s"' % self.args.input_file.name) try: major_vers, minor_vers = [int(x) for x in str(self.analysis_structured['_meta._dnsviz.']['version']).split('.', 1)] except ValueError: raise AnalysisInputError('Version of JSON input is invalid: %s' % self.analysis_structured['_meta._dnsviz.']['version']) # ensure major version is a match and minor version is no greater # than the current minor version curr_major_vers, curr_minor_vers = [int(x) for x in str(DNS_RAW_VERSION).split('.', 1)] if major_vers != curr_major_vers or minor_vers > curr_minor_vers: raise AnalysisInputError('Version %d.%d of JSON input is incompatible with this software.' % (major_vers, minor_vers)) def ingest_names(self): self.names = OrderedDict() if self.args.domain_name: for name in self.args.domain_name: if name not in self.names: self.names[name] = None return if self.args.names_file: args = self.args.names_file else: try: args = self.analysis_structured['_meta._dnsviz.']['names'] except KeyError: raise AnalysisInputError('No names found in JSON input!') for arg in args: name = arg.strip() # python3/python2 dual compatibility if hasattr(name, 'decode'): name = name.decode('utf-8') try: name = dns.name.from_text(name) except UnicodeDecodeError as e: self._logger.error('%s: "%s"' % (e, name)) except dns.exception.DNSException: self._logger.error('The domain name was invalid: "%s"' % name) else: if name not in self.names: self.names[name] = None def serve_zones(self): for zone in self._zones_to_serve: zone.serve() def build_helper(logger, cmd, subcmd): arghelper = ArgHelper(logger) arghelper.build_parser('%s %s' % (cmd, subcmd)) return arghelper def main(argv): global th_factories global explicit_delegations global odd_ports try: arghelper = build_helper(logger, sys.argv[0], argv[0]) arghelper.parse_args(argv[1:]) logger.setLevel(arghelper.get_log_level()) try: arghelper.check_args() arghelper.set_kwargs() arghelper.set_buffers() arghelper.check_network_connectivity() arghelper.aggregate_delegation_info() arghelper.populate_recursive_servers() arghelper.ingest_input() arghelper.ingest_names() arghelper.serve_zones() except argparse.ArgumentTypeError as e: arghelper.parser.error(str(e)) except ResolvConfError as e: arghelper.parser.print_usage(sys.stderr) sys.stderr.write("%(prog)s: error: %(errmsg)s\n" % { 'prog': arghelper.parser.prog, 'errmsg': str(e) }) sys.exit(5) except (ZoneFileServiceError, MissingExecutablesError) as e: s = str(e) if s: logger.error(s) sys.exit(1) except AnalysisInputError as e: s = str(e) if s: logger.error(s) sys.exit(3) th_factories = arghelper.th_factories explicit_delegations = arghelper.explicit_delegations odd_ports = arghelper.odd_ports if arghelper.args.authoritative_analysis: if arghelper.args.threads > 1: cls = ParallelAnalyst else: cls = BulkAnalyst else: if arghelper.args.threads > 1: cls = RecursiveParallelAnalyst else: cls = RecursiveBulkAnalyst if arghelper.args.pretty_output: kwargs = { 'indent': 4, 'separators': (',', ': ') } else: kwargs = {} dnsviz_meta = { 'version': DNS_RAW_VERSION, 'names': [lb2s(n.to_text()) for n in arghelper.names] } NameServerMappingsForDomain.cleanup_resolvers() _init_tm() name_objs = [] if arghelper.args.input_file: cache = {} for name in arghelper.names: if name.canonicalize().to_text() not in arghelper.analysis_structured: logger.error('The domain name was not found in the analysis input: "%s"' % name.to_text()) continue name_objs.append(OnlineDomainNameAnalysis.deserialize(name, arghelper.analysis_structured, cache)) else: if arghelper.args.threads > 1: a = cls(arghelper.rdclass, arghelper.try_ipv4, arghelper.try_ipv6, arghelper.client_ipv4, arghelper.client_ipv6, CustomQueryMixin, arghelper.ceiling, arghelper.args.edns, arghelper.stop_at, arghelper.cache_level, arghelper.args.rr_types, arghelper.explicit_only, arghelper.dlv_domain, arghelper.args.threads) else: if cls.use_full_resolver: _init_full_resolver() else: _init_stub_resolver() a = cls(arghelper.rdclass, arghelper.try_ipv4, arghelper.try_ipv6, arghelper.client_ipv4, arghelper.client_ipv6, CustomQueryMixin, arghelper.ceiling, arghelper.args.edns, arghelper.stop_at, arghelper.cache_level, arghelper.args.rr_types, arghelper.explicit_only, arghelper.dlv_domain) name_objs = a.analyze(arghelper.names) _cleanup_tm() name_objs = [x for x in name_objs if x is not None] if not name_objs: sys.exit(4) d = OrderedDict() for name_obj in name_objs: name_obj.serialize(d, arghelper.meta_only) d['_meta._dnsviz.'] = dnsviz_meta try: arghelper.args.output_file.write(json.dumps(d, ensure_ascii=False, **kwargs).encode('utf-8')) except IOError as e: logger.error('Error writing analysis: %s' % e) sys.exit(3) except KeyboardInterrupt: logger.error('Interrupted.') sys.exit(4) if __name__ == "__main__": main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/commands/query.py0000644000175000017500000003623315001472663017011 0ustar00caseycasey#!/usr/bin/env python # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2015-2016 VeriSign, Inc. # # Copyright 2016-2021 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import getopt import socket import subprocess import sys import dns.name, dns.rdataclass, dns.rdatatype from dnsviz.ipaddr import IPAddr from dnsviz import resolver as Resolver def _get_nameservers_for_name(addr): nameservers = [] try: addrinfo = socket.getaddrinfo(addr, 53, 0, 0, socket.IPPROTO_TCP) except socket.gaierror: sys.stderr.write('Unable to resolve "%s"\n' % addr) else: for item in addrinfo: nameservers.append(IPAddr(item[4][0])) return nameservers def usage(err=None): if err is not None: err += '\n\n' else: err = '' sys.stderr.write('''%sUsage: %s %s [@global-server] [domain] [q-type] [q-class] {q-opt} {global-d-opt} host [@local-server] {local-d-opt} [ host [@local-server] {local-d-opt} [...]] Where: domain is in the Domain Name System q-class is one of (in...) [default: in] q-type is one of (a,mx,ns,soa,txt,...) [default:a] q-opt is one of: -x dot-notation (shortcut for reverse lookups) -b address (bind to source address) -q name (specify query name) -t type (specify query type) -c class (specify query class) -4 (use IPv4 query transport only) -6 (use IPv6 query transport only) d-opt is of the form +keyword[=value], where keyword is: +[no]trace (Trace delegation down from root [+dnssec]) +trusted-key=#### (filename containing Trusted Key when chasing DNSSEC sigs) global d-opts and servers (before host name) affect all queries. local d-opts and servers (after host name) affect only that lookup. -h (print help and exit) ''' % (err, sys.argv[0], __name__.split('.')[-1])) class DVCommandLineQuery: def __init__(self, qname, rdtype, rdclass): self.qname = qname self.rdtype = rdtype self.rdclass = rdclass self.nameservers = [] self.query_options = [] self.trace = False self.trusted_keys_file = None def process_query_options(self, global_options): for arg in global_options + self.query_options: if arg == '+trace': self.trace = True elif arg == '+notrace': self.trace = False elif arg.startswith('+trusted-key') and \ (len(arg) <= 12 or arg[12] == '='): try: opt, arg = arg.split('=') if not arg: raise ValueError() except ValueError: sys.stderr.write('+trusted-key requires a filename argument.\n') sys.exit(1) else: self.trusted_keys_file = arg else: sys.stderr.write('Option "%s" not recognized.\n' % arg) sys.exit(1) def process_nameservers(self, nameservers, use_ipv4, use_ipv6): processed_nameservers = [] for addr in self.nameservers: processed_nameservers.extend(_get_nameservers_for_name(addr)) if not use_ipv4: processed_nameservers = [x for x in processed_nameservers if x.version != 4] if not use_ipv6: processed_nameservers = [x for x in processed_nameservers if x.version != 6] self.nameservers = nameservers + processed_nameservers def _get_rdtype(self, options): if self.rdtype is None: return options['rdtype'] else: return self.rdtype def _get_rdclass(self, options): if self.rdclass is None: return options['rdclass'] else: return self.rdclass def query_and_display(self, options): dnsget_args = ['dnsviz', 'probe'] dnsviz_args = ['dnsviz', 'print'] dnsget_args.extend(['-d', '1', '-a', '.']) if options['use_ipv4'] and not options['use_ipv6']: dnsget_args.append('-4') if options['use_ipv6'] and not options['use_ipv4']: dnsget_args.append('-6') if options['client_ipv4'] is not None: dnsget_args.extend(['-b', options['client_ipv4']]) if options['client_ipv6'] is not None: dnsget_args.extend(['-b', options['client_ipv6']]) dnsget_args.extend(['-R', dns.rdatatype.to_text(self._get_rdtype(options))]) if self.trace: dnsget_args.append('-A') else: if self.nameservers[0].version == 6: dnsget_args.extend(['-s', '[%s]' % (self.nameservers[0])]) else: dnsget_args.extend(['-s', self.nameservers[0]]) dnsget_args.append(self.qname) if self.trusted_keys_file is not None: dnsviz_args.extend(['-t', self.trusted_keys_file]) dnsviz_args.extend(['-R', dns.rdatatype.to_text(self._get_rdtype(options))]) try: dnsget_p = subprocess.Popen(dnsget_args, stdout=subprocess.PIPE) dnsviz_p = subprocess.Popen(dnsviz_args, stdin=dnsget_p.stdout) except OSError as e: sys.stderr.write('error: %s\n' % e) return False else: dnsget_p.stdout.close() dnsviz_p.communicate() return dnsget_p.returncode == 0 and dnsviz_p.returncode == 0 class DVCommandLine: def __init__(self, args): self.args = args self.arg_index = 0 self.options = { 'rdtype': None, 'rdclass': None, 'use_ipv4': None, 'use_ipv6': None, 'client_ipv4': None, 'client_ipv6': None, } self.nameservers = [] self.global_query_options = [] self.queries = [] self._process_args() self._process_network() self._process_nameservers() if not self.queries: self.queries.append(DVCommandLineQuery('.', dns.rdatatype.NS, dns.rdataclass.IN)) for q in self.queries: q.process_nameservers(self.nameservers, self.options['use_ipv4'], self.options['use_ipv6']) q.process_query_options(self.global_query_options) if not q.nameservers and not q.trace: sys.stderr.write('No nameservers to query\n') sys.exit(1) if self.options['rdtype'] is None: self.options['rdtype'] = dns.rdatatype.A if self.options['rdclass'] is None: self.options['rdclass'] = dns.rdataclass.IN def query_and_display(self): ret = True for q in self.queries: if not q.query_and_display(self.options): ret = False return ret def _get_arg(self, has_arg): try: if len(self.args[self.arg_index]) > 2: if not has_arg: sys.stderr.write('"%s" option does not take arguments\n' % self.args[self.arg_index][:2]) sys.exit(1) return self.args[self.arg_index][2:] else: if not has_arg: return None else: self.arg_index += 1 if self.arg_index >= len(self.args): sys.stderr.write('"%s" option requires an argument\n' % self.args[self.arg_index - 1]) sys.exit(1) return self.args[self.arg_index] finally: self.arg_index += 1 def _add_server_to_options(self, query): addr = self.args[self.arg_index][1:] self.arg_index += 1 if query is None: self.nameservers.append(addr) else: query.nameservers.append(addr) def _add_reverse_query(self): arg = self._get_arg(True) try: addr = IPAddr(arg) except ValueError: sys.stderr.write('Invalid IP address: "%s"\n' % arg) sys.exit(1) else: qname = addr.arpa_name() return DVCommandLineQuery(qname, dns.rdatatype.PTR, dns.rdataclass.IN) def _add_qname_from_opt(self): qname = self._get_arg(True) return DVCommandLineQuery(qname, None, None) def _add_default_option(self): if self.options['rdclass'] is None: try: self.options['rdclass'] = dns.rdataclass.from_text(self.args[self.arg_index]) except dns.rdataclass.UnknownRdataclass: pass else: self.arg_index += 1 return True if self.options['rdtype'] is None: try: self.options['rdtype'] = dns.rdatatype.from_text(self.args[self.arg_index]) except dns.rdatatype.UnknownRdatatype: pass else: self.arg_index += 1 return True return False def _add_qname(self): qname = self.args[self.arg_index] self.arg_index += 1 # check for optional type try: rdtype = dns.rdatatype.from_text(self.args[self.arg_index]) except (IndexError, dns.rdatatype.UnknownRdatatype): # no type detected; use default rdtype/rdclass rdtype = None rdclass = None else: self.arg_index += 1 # now check for optional class try: rdclass = dns.rdataclass.from_text(self.args[self.arg_index]) except (IndexError, dns.rdataclass.UnknownRdataclass): # no class detected; use default rdclass rdclass = None else: self.arg_index += 1 return DVCommandLineQuery(qname, rdtype, rdclass) def _add_option(self): if self.args[self.arg_index].startswith('-h'): usage() sys.exit(0) elif self.args[self.arg_index].startswith('-b'): arg = self._get_arg(True) try: addr = IPAddr(arg) except ValueError: sys.stderr.write('Invalid IP address: "%s"\n' % arg) sys.exit(1) if addr.version == 6: family = socket.AF_INET6 else: family = socket.AF_INET try: s = socket.socket(family) s.bind((addr, 0)) except socket.error as e: if e.errno == errno.EADDRNOTAVAIL: sys.stderr.write('Cannot bind to specified IP address: "%s"\n' % addr) sys.exit(1) else: del s if addr.version == 6: self.options['client_ipv6'] = addr else: self.options['client_ipv4'] = addr elif self.args[self.arg_index].startswith('-c'): arg = self._get_arg(True) try: self.options['rdclass'] = dns.rdataclass.from_text(arg) except dns.rdataclass.UnknownRdataclass: sys.stderr.write('Unknown class: "%s".\n' % arg) sys.exit(1) elif self.args[self.arg_index].startswith('-t'): arg = self._get_arg(True) try: self.options['rdtype'] = dns.rdatatype.from_text(arg) except dns.rdatatype.UnknownRdatatype: sys.stderr.write('Unknown type: "%s".\n' % arg) sys.exit(1) elif self.args[self.arg_index].startswith('-6'): self._get_arg(False) self.options['use_ipv6'] = True elif self.args[self.arg_index].startswith('-4'): self._get_arg(False) self.options['use_ipv4'] = True else: sys.stderr.write('Option "%s" not recognized.\n' % self.args[self.arg_index][:2]) sys.exit(1) def _add_query_option(self, query): if query is None: self.global_query_options.append(self.args[self.arg_index]) else: query.query_options.append(self.args[self.arg_index]) self.arg_index += 1 def _process_args(self): query = None while self.arg_index < len(self.args): # server address if self.args[self.arg_index][0] == '@': self._add_server_to_options(query) # reverse lookup elif self.args[self.arg_index].startswith('-x'): query = self._add_reverse_query() self.queries.append(query) # forward lookup (with -q) elif self.args[self.arg_index].startswith('-q'): query = self._add_qname_from_opt() self.queries.append(query) # options elif self.args[self.arg_index][0] == '-': self._add_option() # query options elif self.args[self.arg_index][0] == '+': self._add_query_option(query) # global query class/type elif query is None and self._add_default_option(): pass # name to be queried else: query = self._add_qname() self.queries.append(query) def _process_network(self): if self.options['use_ipv4'] is None and self.options['use_ipv6'] is None: self.options['use_ipv4'] = True self.options['use_ipv6'] = True if not self.options['use_ipv4']: self.options['use_ipv4'] = False if not self.options['use_ipv6']: self.options['use_ipv6'] = False def _process_nameservers(self): if not self.nameservers: processed_nameservers = Resolver.get_standard_resolver()._servers else: processed_nameservers = [] for addr in self.nameservers: processed_nameservers.extend(_get_nameservers_for_name(addr)) if not self.options['use_ipv4']: processed_nameservers = [x for x in processed_nameservers if x.version != 4] if not self.options['use_ipv6']: processed_nameservers = [x for x in processed_nameservers if x.version != 6] self.nameservers = processed_nameservers def main(argv): try: q = DVCommandLine(argv[1:]) if q.query_and_display(): sys.exit(0) else: sys.exit(1) except KeyboardInterrupt: sys.exit(1) if __name__ == "__main__": main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253948.0 dnsviz-0.11.1/dnsviz/config.py.in0000644000175000017500000000257015001473074015707 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 Verisign, Inc. # # Copyright 2016-2021 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import os import sys _prefix = '__DNSVIZ_INSTALL_PREFIX__' if (hasattr(sys, 'real_prefix') or hasattr(sys, 'base_prefix')) and \ not _prefix: DNSVIZ_INSTALL_PREFIX = sys.prefix else: DNSVIZ_INSTALL_PREFIX = _prefix DNSVIZ_SHARE_PATH = os.getenv('DNSVIZ_SHARE_PATH', os.path.join(DNSVIZ_INSTALL_PREFIX, 'share', 'dnsviz')) JQUERY_PATH = __JQUERY_PATH__ JQUERY_UI_PATH = __JQUERY_UI_PATH__ JQUERY_UI_CSS_PATH = __JQUERY_UI_CSS_PATH__ RAPHAEL_PATH = __RAPHAEL_PATH__ RESOLV_CONF = __RESOLV_CONF__ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/crypto.py0000644000175000017500000003240215001472663015355 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import atexit import base64 import binascii import logging import struct import hashlib import os import re from cryptography.hazmat.backends import openssl as OpenSSL from cryptography.hazmat.primitives.asymmetric import dsa as DSA from cryptography.hazmat.primitives.asymmetric import ec as EC from cryptography.hazmat.primitives.asymmetric import ed25519 as ED25519 from cryptography.hazmat.primitives.asymmetric import ed448 as ED448 from cryptography.hazmat.primitives.asymmetric import rsa as RSA from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import utils from cryptography.hazmat.primitives import hashes from cryptography.exceptions import InvalidSignature from . import format as fmt lb2s = fmt.latin1_binary_to_string logger = logging.getLogger(__name__) ALG_TYPE_DNSSEC = 0 ALG_TYPE_DIGEST = 1 ALG_TYPE_NSEC3 = 2 ALG_TYPE_DNSSEC_TEXT = [ 'algorithm', 'digest algorithm', 'NSEC3 algorithm', ] _crypto_sources = { 'M2Crypto >= 0.24.0 and openssl >= 1.1.0 plus the OpenSSL GOST Engine': (set([12]), set([3]), set()), } _logged_modules = (set(), set(), set()) _supported_algs = set([1,3,5,6,7,8,10,13,14,15,16]) _supported_digest_algs = set([1,2,4]) _supported_nsec3_algs = set([1]) GOST_PREFIX = b'\x30\x63\x30\x1c\x06\x06\x2a\x85\x03\x02\x02\x13\x30\x12\x06\x07\x2a\x85\x03\x02\x02\x23\x01\x06\x07\x2a\x85\x03\x02\x02\x1e\x01\x03\x43\x00\x04\x40' GOST_ENGINE_NAME = b'gost' GOST_DIGEST_NAME = b'GOST R 34.11-94' # For backwards compatibility with cryptography < 36.0. # See https://cryptography.io/en/latest/faq/#faq-missing-backend backend = OpenSSL.backend # python3/python2 dual compatibility if not isinstance(GOST_ENGINE_NAME, str): GOST_ENGINE_NAME = lb2s(GOST_ENGINE_NAME) GOST_DIGEST_NAME = lb2s(GOST_DIGEST_NAME) try: # available from python 3.1 base64encodebytes = base64.encodebytes except AttributeError: # available until python 3.8 base64encodebytes = base64.encodestring EC_NOCOMPRESSION = b'\x04' def _init_dynamic(): try: Engine.load_dynamic() except Engine.EngineError: pass else: atexit.register(Engine.cleanup) def _check_gost_support(): _gost_init() try: md = EVP.MessageDigest(GOST_DIGEST_NAME) except ValueError: pass else: _supported_algs.add(12) _supported_digest_algs.add(3) finally: _gost_cleanup() def alg_is_supported(alg): return alg in _supported_algs def digest_alg_is_supported(alg): return alg in _supported_digest_algs def nsec3_alg_is_supported(alg): return alg in _supported_nsec3_algs def _log_unsupported_alg(alg, alg_type): for mod in _crypto_sources: if alg in _crypto_sources[mod][alg_type]: if mod not in _logged_modules[alg_type]: _logged_modules[alg_type].add(mod) logger.warning('Warning: Without the installation of %s, cryptographic validation of DNSSEC %s %d (and possibly others) is not supported.' % (mod, ALG_TYPE_DNSSEC_TEXT[alg_type], alg)) return def _gost_init(): try: gost = Engine.Engine(GOST_ENGINE_NAME) gost.init() gost.set_default() except ValueError: pass def _gost_cleanup(): from M2Crypto import Engine try: gost = Engine.Engine(GOST_ENGINE_NAME) except ValueError: pass else: gost.finish() try: from M2Crypto import Engine, EVP, m2 _init_dynamic() _check_gost_support() except: pass def validate_ds_digest(digest_alg, digest, dnskey_msg): mydigest = get_ds_digest(digest_alg, dnskey_msg) if mydigest is None: return None else: return mydigest == digest def get_ds_digest(digest_alg, dnskey_msg): if not digest_alg_is_supported(digest_alg): _log_unsupported_alg(digest_alg, ALG_TYPE_DIGEST) return None if digest_alg == 1: md = hashes.Hash(hashes.SHA1(), backend) md.update(dnskey_msg) return md.finalize() elif digest_alg == 2: md = hashes.Hash(hashes.SHA256(), backend) md.update(dnskey_msg) return md.finalize() elif digest_alg == 3: _gost_init() try: md = EVP.MessageDigest(GOST_DIGEST_NAME) md.update(dnskey_msg) return md.final() finally: _gost_cleanup() elif digest_alg == 4: md = hashes.Hash(hashes.SHA384(), backend) md.update(dnskey_msg) return md.finalize() def _dnskey_to_dsa(key): # get T t = key[0] # python3/python2 dual compatibility if not isinstance(t, int): t = ord(t) offset = 1 # get Q new_offset = offset+20 # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): q = int.from_bytes(key[offset:new_offset], 'big') else: q = int(key[offset:new_offset].encode('hex'), 16) offset = new_offset # get P new_offset = offset+64+(t<<3) # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): p = int.from_bytes(key[offset:new_offset], 'big') else: p = int(key[offset:new_offset].encode('hex'), 16) offset = new_offset # get G new_offset = offset+64+(t<<3) # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): g = int.from_bytes(key[offset:new_offset], 'big') else: g = int(key[offset:new_offset].encode('hex'), 16) offset = new_offset # get Y new_offset = offset+64+(t<<3) # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): y = int.from_bytes(key[offset:new_offset], 'big') else: y = int(key[offset:new_offset].encode('hex'), 16) offset = new_offset # create the DSA public key param_nums = DSA.DSAParameterNumbers(p, q, g) dsa = DSA.DSAPublicNumbers(y, param_nums) try: return dsa.public_key(backend) except ValueError: return None def _dnskey_to_rsa(key): try: # get the exponent length e_len = key[0] except IndexError: return None # python3/python2 dual compatibility if not isinstance(e_len, int): e_len = ord(e_len) offset = 1 if e_len == 0: e_len, = struct.unpack(b'!H',key[1:3]) offset = 3 if len(key) < offset + e_len: return None # get the exponent # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): e = int.from_bytes(key[offset:offset+e_len], 'big') else: e = int(key[offset:offset+e_len].encode('hex'), 16) offset += e_len if len(key) <= offset: return None # get the modulus # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): n = int.from_bytes(key[offset:], 'big') else: n = int(key[offset:].encode('hex'), 16) # create the RSA public key rsa = RSA.RSAPublicNumbers(e, n) try: return rsa.public_key(backend) except ValueError: return None def _dnskey_to_gost(key): der = GOST_PREFIX + key pem = b'-----BEGIN PUBLIC KEY-----\n'+base64encodebytes(der)+b'-----END PUBLIC KEY-----' return EVP.load_key_string_pubkey(pem) def _dnskey_to_ed(alg, key): if alg == 15: try: return ED25519.Ed25519PublicKey.from_public_bytes(key) except ValueError: return None elif alg == 16: try: return ED448.Ed448PublicKey.from_public_bytes(key) except ValueError: return None else: raise ValueError('Algorithm not supported') def _dnskey_to_ec(alg, key): if alg == 13: curve = EC.SECP256R1() elif alg == 14: curve = EC.SECP384R1() else: raise ValueError('Algorithm not supported') try: return EC.EllipticCurvePublicKey.from_encoded_point(curve, EC_NOCOMPRESSION + key) except ValueError: return None def _validate_rrsig_rsa(alg, sig, msg, key): pubkey = _dnskey_to_rsa(key) # if the key is invalid, then the signature is also invalid if pubkey is None: return False if alg in (1,): hsh = hashes.MD5() elif alg in (5,7): hsh = hashes.SHA1() elif alg in (8,): hsh = hashes.SHA256() elif alg in (10,): hsh = hashes.SHA512() else: raise ValueError('RSA Algorithm unknown.') try: pubkey.verify(sig, msg, padding.PKCS1v15(), hsh) except InvalidSignature: return False else: return True def _validate_rrsig_dsa(alg, sig, msg, key): pubkey = _dnskey_to_dsa(key) # if the key is invalid, then the signature is also invalid if pubkey is None: return False # get T t = sig[0] # python3/python2 dual compatibility if not isinstance(t, int): t = ord(t) offset = 1 # get R new_offset = offset+20 # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): r = int.from_bytes(sig[offset:new_offset], 'big') else: r = int(sig[offset:new_offset].encode('hex'), 16) offset = new_offset # get S new_offset = offset+20 # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): s = int.from_bytes(sig[offset:new_offset], 'big') else: s = int(sig[offset:new_offset].encode('hex'), 16) offset = new_offset sig = utils.encode_dss_signature(r, s) try: pubkey.verify(sig, msg, hashes.SHA1()) except InvalidSignature: return False else: return True def _validate_rrsig_gost(alg, sig, msg, key): _gost_init() try: pubkey = _dnskey_to_gost(key) # if the key is invalid, then the signature is also invalid if pubkey is None: return False pubkey.md = m2.get_digestbyname(GOST_DIGEST_NAME) pubkey.verify_init() pubkey.verify_update(msg) return pubkey.verify_final(sig) == 1 finally: _gost_cleanup() def _validate_rrsig_ec(alg, sig, msg, key): pubkey = _dnskey_to_ec(alg, key) # if the key is invalid, then the signature is also invalid if pubkey is None: return False if alg in (13,): alg = EC.ECDSA(hashes.SHA256()) sigsize = 64 elif alg in (14,): alg = EC.ECDSA(hashes.SHA384()) sigsize = 96 else: raise ValueError('EC hash algorithm unknown!') if sigsize != len(sig): return False offset = 0 # get R new_offset = offset+sigsize//2 # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): r = int.from_bytes(sig[offset:new_offset], 'big') else: r = int(sig[offset:new_offset].encode('hex'), 16) offset = new_offset # get S new_offset = offset+sigsize//2 # python3/python2 dual compatibility if hasattr(int, 'from_bytes'): s = int.from_bytes(sig[offset:new_offset], 'big') else: s = int(sig[offset:new_offset].encode('hex'), 16) offset = new_offset sig = utils.encode_dss_signature(r, s) try: pubkey.verify(sig, msg, alg) except InvalidSignature: return False else: return True def _validate_rrsig_ed(alg, sig, msg, key): pubkey = _dnskey_to_ed(alg, key) # if the key is invalid, then the signature is also invalid if pubkey is None: return False try: pubkey.verify(sig, msg) except InvalidSignature: return False else: return True def validate_rrsig(alg, sig, msg, key): if not alg_is_supported(alg): _log_unsupported_alg(alg, ALG_TYPE_DNSSEC) return None # create an RSA key object for RSA keys if alg in (1,5,7,8,10): return _validate_rrsig_rsa(alg, sig, msg, key) elif alg in (3,6): return _validate_rrsig_dsa(alg, sig, msg, key) elif alg in (12,): return _validate_rrsig_gost(alg, sig, msg, key) elif alg in (13,14): return _validate_rrsig_ec(alg, sig, msg, key) elif alg in (15,16): return _validate_rrsig_ed(alg, sig, msg, key) def get_digest_for_nsec3(val, salt, alg, iterations): if not nsec3_alg_is_supported(alg): _log_unsupported_alg(alg, ALG_TYPE_NSEC3) return None if alg == 1: hash_func = hashlib.sha1 for i in range(iterations + 1): val = hash_func(val + salt).digest() return val ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/format.py0000644000175000017500000001302315001472663015323 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2021 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import calendar import codecs import datetime import re import time import dns.name, dns.rdatatype DNSKEY_FLAGS = {'ZONE': 0x0100, 'SEP': 0x0001, 'revoke': 0x0080} DNSKEY_PROTOCOLS = { 3: 'DNSSEC' } DNSKEY_ALGORITHMS = { 1: 'RSA/MD5', 2: 'Diffie-Hellman', 3: 'DSA/SHA1', 5: 'RSA/SHA-1', 6: 'DSA-NSEC3-SHA1', 7: 'RSASHA1-NSEC3-SHA1', \ 8: 'RSA/SHA-256', 10: 'RSA/SHA-512', 12: 'GOST R 34.10-2001', 13: 'ECDSA Curve P-256 with SHA-256', 14: 'ECDSA Curve P-384 with SHA-384', 15: 'Ed25519', 16: 'Ed448' } DS_DIGEST_TYPES = { 1: 'SHA-1', 2: 'SHA-256', 3: 'GOST 34.11-94', 4: 'SHA-384' } NSEC3_FLAGS = {'OPTOUT': 0x01} DNS_FLAG_DESCRIPTIONS = { 32768: 'Query Response', 1024: 'Authoritative Answer', 512: 'Truncated Response', 256: 'Recursion Desired', 128: 'Recursion Available', 32: 'Authentic Data', 16: 'Checking Disabled' } EDNS_FLAG_DESCRIPTIONS = { 32768: 'DNSSEC answer OK' } EDNS_OPT_DESCRIPTIONS = { 3: 'NSID', 8: 'edns-client-subnet', 10: 'COOKIE' } FMT_MS = '%Y-%m-%d %H:%M:%S.%f %Z' FMT_NO_MS = '%Y-%m-%d %H:%M:%S %Z' ZERO = datetime.timedelta(0) class UTC(datetime.tzinfo): '''UTC''' def utcoffset(self, dt): return ZERO def tzname(self, dt): # python3/python2 dual compatibility if type(b'') is str: return b'UTC' else: return 'UTC' def dst(self, dt): return ZERO utc = UTC() ################# # Timestamp conversions def timestamp_to_datetime(timestamp, tz=utc): return datetime.datetime.fromtimestamp(timestamp, tz) def datetime_to_timestamp(dt): return calendar.timegm(dt.timetuple()) + dt.microsecond/1.0e6 def str_to_datetime(s, tz=utc): return timestamp_to_datetime(str_to_timestamp(s), tz) def str_to_timestamp(s): try: return calendar.timegm(time.strptime(s, FMT_NO_MS)) except ValueError: return calendar.timegm(time.strptime(s, FMT_MS)) def datetime_to_str(dt): if dt.microsecond: return dt.strftime(FMT_MS) else: return dt.strftime(FMT_NO_MS) def timestamp_to_str(timestamp): return datetime_to_str(timestamp_to_datetime(timestamp)) ################# # Human representation of time def humanize_time(seconds, days=None): if days is None: days, remainder = divmod(seconds, 86400) else: remainder = seconds hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) output = '' if days > 0: if days != 1: plural = 's' else: plural = '' output += '%d day%s' % (days, plural) else: if hours > 0: if hours != 1: plural = 's' else: plural = '' output += '%d hour%s' % (hours, plural) if minutes > 0: if output: output += ', ' if minutes != 1: plural = 's' else: plural = '' output += '%d minute%s' % (minutes, plural) if not output: if seconds != 1: plural = 's' else: plural = '' output += '%d second%s' % (seconds, plural) return output def format_diff(date_now, date_relative): if date_now > date_relative: diff = date_now - date_relative suffix = 'in the past' else: diff = date_relative - date_now suffix = 'in the future' return '%s %s' % (humanize_time(diff.seconds, diff.days), suffix) ################# # Human representation of DNS names def format_nsec3_name(name): return lb2s(dns.name.from_text(name.labels[0].upper(), name.parent().canonicalize()).to_text()) def format_nsec3_rrset_text(nsec3_rrset_text): return re.sub(r'^(\d+\s+\d+\s+\d+\s+\S+\s+)([0-9a-zA-Z]+)', lambda x: '%s%s' % (x.group(1), x.group(2).upper()), nsec3_rrset_text).rstrip('.') def humanize_name(name, idn=False, canonicalize=True): if canonicalize: name = name.canonicalize() if idn: try: name = name.to_unicode() except UnicodeError: name = lb2s(name.to_text()) else: name = lb2s(name.to_text()) if name == '.': return name return name.rstrip('.') def latin1_binary_to_string(s): # python3/python2 dual compatibility #XXX In places where this method wraps calls to dns.name.Name.to_text(), # this is no longer needed with dnspython 1.15.0 if isinstance(s, bytes): return codecs.decode(s, 'latin1') return s lb2s = latin1_binary_to_string ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/ipaddr.py0000644000175000017500000000656715001472663015315 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import binascii import codecs import re import socket INTERFACE_RE = re.compile(r'%[a-z0-9]+$') class LinkLocalAddress(ValueError): pass class IPAddr(str): def __new__(cls, string): # python 2/3 compatibility if isinstance(string, bytes): string = codecs.decode(string, 'latin1') if ':' in string: af = socket.AF_INET6 vers = 6 if INTERFACE_RE.search(string) is not None: raise LinkLocalAddress() else: af = socket.AF_INET vers = 4 try: ipaddr_bytes = socket.inet_pton(af, string) except socket.error: raise ValueError('Invalid value for IP address: %s' % string) obj = super(IPAddr, cls).__new__(cls, socket.inet_ntop(af, ipaddr_bytes)) obj._ipaddr_bytes = ipaddr_bytes obj.version = vers return obj def _check_class_for_cmp(self, other): if self.__class__ != other.__class__: raise TypeError('Cannot compare IPAddr to %s!' % other.__class__.__name__) def __lt__(self, other): self._check_class_for_cmp(other) if len(self._ipaddr_bytes) < len(other._ipaddr_bytes): return True elif len(self._ipaddr_bytes) > len(other._ipaddr_bytes): return False else: return self._ipaddr_bytes < other._ipaddr_bytes def __eq__(self, other): if other is None: return False if isinstance(other, IPAddr): return self._ipaddr_bytes == other._ipaddr_bytes else: return super(IPAddr, self) == other def __hash__(self): return hash(self._ipaddr_bytes) def arpa_name(self): if self.version == 6: nibbles = [n for n in binascii.hexlify(self._ipaddr_bytes)] nibbles.reverse() name = '.'.join(nibbles) name += '.ip6.arpa.' else: octets = self.split('.') octets.reverse() name = '.'.join(octets) name += '.in-addr.arpa.' return name LOOPBACK_IPV4_RE = re.compile(r'^127') IPV4_MAPPED_IPV6_RE = re.compile(r'^::(ffff:)?\d+.\d+.\d+.\d+$', re.IGNORECASE) LOOPBACK_IPV6 = IPAddr('::1') RFC_1918_RE = re.compile(r'^(0?10|172\.0?(1[6-9]|2[0-9]|3[0-1])|192\.168)\.') LINK_LOCAL_RE = re.compile(r'^fe[89ab][0-9a-f]:', re.IGNORECASE) UNIQ_LOCAL_RE = re.compile(r'^fd[0-9a-f]{2}:', re.IGNORECASE) ZERO_SLASH8_RE = re.compile(r'^0\.') ANY_IPV6 = IPAddr('::') ANY_IPV4 = IPAddr('0.0.0.0') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/query.py0000644000175000017500000026624415001472663015217 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2021 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import binascii import bisect import copy import errno import io import socket import struct import time # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # python3/python2 dual compatibility try: import queue except ImportError: import Queue as queue import dns.edns, dns.exception, dns.flags, dns.message, dns.rcode, \ dns.rdataclass, dns.rdatatype from .ipaddr import * from .response import * from . import transport from .format import latin1_binary_to_string as lb2s RETRY_CAUSE_NETWORK_ERROR = RESPONSE_ERROR_NETWORK_ERROR = 1 RETRY_CAUSE_FORMERR = RESPONSE_ERROR_FORMERR = 2 RETRY_CAUSE_TIMEOUT = RESPONSE_ERROR_TIMEOUT = 3 RETRY_CAUSE_OTHER = RESPONSE_ERROR_OTHER = 4 RETRY_CAUSE_TC_SET = 5 RETRY_CAUSE_RCODE = RESPONSE_ERROR_INVALID_RCODE = 6 RETRY_CAUSE_DIAGNOSTIC = 7 retry_causes = { RETRY_CAUSE_NETWORK_ERROR: 'NETWORK_ERROR', RETRY_CAUSE_FORMERR: 'FORMERR', RETRY_CAUSE_TIMEOUT: 'TIMEOUT', RETRY_CAUSE_OTHER: 'ERROR', RETRY_CAUSE_TC_SET: 'TC', RETRY_CAUSE_RCODE: 'INVALID_RCODE', RETRY_CAUSE_DIAGNOSTIC: 'DIAGNOSTIC' } retry_cause_codes = { 'NETWORK_ERROR': RETRY_CAUSE_NETWORK_ERROR, 'FORMERR': RETRY_CAUSE_FORMERR, 'TIMEOUT': RETRY_CAUSE_TIMEOUT, 'ERROR': RETRY_CAUSE_OTHER, 'TC': RETRY_CAUSE_TC_SET, 'INVALID_RCODE': RETRY_CAUSE_RCODE, 'DIAGNOSTIC': RETRY_CAUSE_DIAGNOSTIC, } response_errors = { RESPONSE_ERROR_NETWORK_ERROR: retry_causes[RETRY_CAUSE_NETWORK_ERROR], RESPONSE_ERROR_FORMERR: retry_causes[RETRY_CAUSE_FORMERR], RESPONSE_ERROR_TIMEOUT: retry_causes[RETRY_CAUSE_TIMEOUT], RESPONSE_ERROR_OTHER: retry_causes[RETRY_CAUSE_OTHER], RESPONSE_ERROR_INVALID_RCODE: retry_causes[RETRY_CAUSE_RCODE] } response_error_codes = { retry_causes[RETRY_CAUSE_NETWORK_ERROR]: RESPONSE_ERROR_NETWORK_ERROR, retry_causes[RETRY_CAUSE_FORMERR]: RESPONSE_ERROR_FORMERR, retry_causes[RETRY_CAUSE_TIMEOUT]: RESPONSE_ERROR_TIMEOUT, retry_causes[RETRY_CAUSE_OTHER]: RESPONSE_ERROR_OTHER, retry_causes[RETRY_CAUSE_RCODE]: RESPONSE_ERROR_INVALID_RCODE } RETRY_ACTION_NO_CHANGE = 1 RETRY_ACTION_USE_TCP = 2 RETRY_ACTION_USE_UDP = 3 RETRY_ACTION_SET_FLAG = 4 RETRY_ACTION_CLEAR_FLAG = 5 RETRY_ACTION_DISABLE_EDNS = 6 RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD = 7 RETRY_ACTION_SET_EDNS_FLAG = 8 RETRY_ACTION_CLEAR_EDNS_FLAG = 9 RETRY_ACTION_ADD_EDNS_OPTION = 10 RETRY_ACTION_REMOVE_EDNS_OPTION = 11 RETRY_ACTION_CHANGE_SPORT = 12 RETRY_ACTION_CHANGE_EDNS_VERSION = 13 RETRY_ACTION_UPDATE_DNS_COOKIE = 14 retry_actions = { RETRY_ACTION_NO_CHANGE: 'NO_CHANGE', RETRY_ACTION_USE_TCP: 'USE_TCP', # implies CHANGE_SPORT RETRY_ACTION_USE_UDP: 'USE_UDP', # implies CHANGE_SPORT RETRY_ACTION_SET_FLAG: 'SET_FLAG', # implies CHANGE_SPORT RETRY_ACTION_CLEAR_FLAG: 'CLEAR_FLAG', # implies CHANGE_SPORT RETRY_ACTION_DISABLE_EDNS: 'DISABLE_EDNS', # implies CHANGE_SPORT RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD: 'CHANGE_UDP_MAX_PAYLOAD', # implies USE_UDP, CHANGE_SPORT RETRY_ACTION_SET_EDNS_FLAG: 'SET_EDNS_FLAG', # implies CHANGE_SPORT RETRY_ACTION_CLEAR_EDNS_FLAG: 'CLEAR_EDNS_FLAG', # implies CHANGE_SPORT RETRY_ACTION_ADD_EDNS_OPTION: 'ADD_EDNS_OPTION', # implies CHANGE_SPORT RETRY_ACTION_REMOVE_EDNS_OPTION: 'REMOVE_EDNS_OPTION', # implies CHANGE_SPORT RETRY_ACTION_CHANGE_SPORT: 'CHANGE_SPORT', RETRY_ACTION_CHANGE_EDNS_VERSION: 'CHANGE_EDNS_VERSION', # implies CHANGE_SPORT RETRY_ACTION_UPDATE_DNS_COOKIE: 'UPDATE_DNS_COOKIE', # implies CHANGE_SPORT } retry_action_codes = { 'NO_CHANGE': RETRY_ACTION_NO_CHANGE, 'USE_TCP': RETRY_ACTION_USE_TCP, 'USE_UDP': RETRY_ACTION_USE_UDP, 'SET_FLAG': RETRY_ACTION_SET_FLAG, 'CLEAR_FLAG': RETRY_ACTION_CLEAR_FLAG, 'DISABLE_EDNS': RETRY_ACTION_DISABLE_EDNS, 'CHANGE_UDP_MAX_PAYLOAD': RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD, 'SET_EDNS_FLAG': RETRY_ACTION_SET_EDNS_FLAG, 'CLEAR_EDNS_FLAG': RETRY_ACTION_CLEAR_EDNS_FLAG, 'ADD_EDNS_OPTION': RETRY_ACTION_ADD_EDNS_OPTION, 'REMOVE_EDNS_OPTION': RETRY_ACTION_REMOVE_EDNS_OPTION, 'CHANGE_SPORT': RETRY_ACTION_CHANGE_SPORT, 'CHANGE_EDNS_VERSION': RETRY_ACTION_CHANGE_EDNS_VERSION, 'UPDATE_DNS_COOKIE': RETRY_ACTION_UPDATE_DNS_COOKIE, } DNS_COOKIE_NO_COOKIE = 0 DNS_COOKIE_CLIENT_COOKIE_ONLY = 1 DNS_COOKIE_SERVER_COOKIE_FRESH = 2 DNS_COOKIE_SERVER_COOKIE_STATIC = 3 DNS_COOKIE_SERVER_COOKIE_BAD = 4 DNS_COOKIE_IMPROPER_LENGTH = 5 MIN_QUERY_TIMEOUT = 0.1 MAX_CNAME_REDIRECTION = 40 class AcceptResponse(Exception): '''An exception raised to stop the process of retrying DNS queries when an acceptable response or error condition has been satisfied.''' pass class BindError(Exception): '''An error resulting from unsuccessfully trying to bind to an address or port.''' pass class SourceAddressBindError(BindError): '''An error resulting from unsuccessfully trying to bind to an address.''' pass class PortBindError(BindError): '''An error resulting from unsuccessfully trying to bind to a port.''' pass class NoValidServersToQuery(Exception): '''An exception raised when a query is executed and the collective transport handlers designated don't have the proper network capabilities to issue queries to all the servers.''' pass class DNSQueryRetryAttempt: '''A failed attempt at a DNS query that invokes a subsequent retry.''' def __init__(self, response_time, cause, cause_arg, action, action_arg): self.response_time = response_time self.cause = cause self.cause_arg = cause_arg self.action = action self.action_arg = action_arg def __repr__(self): return ' %s>' % (retry_causes[self.cause], retry_actions[self.action]) def serialize(self): '''Return a serialized version of the query.''' d = OrderedDict() d['time_elapsed'] = int(self.response_time * 1000) d['cause'] = retry_causes.get(self.cause, 'UNKNOWN') if self.cause_arg is not None: if self.cause == RETRY_CAUSE_NETWORK_ERROR: errno_name = errno.errorcode.get(self.cause_arg, None) if errno_name is not None: d['cause_arg'] = errno_name else: d['cause_arg'] = self.cause_arg d['action'] = retry_actions.get(self.action, 'UNKNOWN') if self.action_arg is not None: d['action_arg'] = self.action_arg return d @classmethod def deserialize(cls, d): '''Return an instance built from a serialized version of the DNSQueryRetryAttempt.''' # compatibility with version 1.0 if 'response_time' in d: response_time = d['response_time'] else: response_time = d['time_elapsed']/1000.0 cause = retry_cause_codes[d['cause']] if 'cause_arg' in d: if cause == RETRY_CAUSE_NETWORK_ERROR: # compatibility with version 1.0 if isinstance(d['cause_arg'], int): cause_arg = d['cause_arg'] else: if hasattr(errno, d['cause_arg']): cause_arg = getattr(errno, d['cause_arg']) else: cause_arg = None else: cause_arg = d['cause_arg'] else: cause_arg = None action = retry_action_codes[d['action']] if 'action_arg' in d: action_arg = d['action_arg'] else: action_arg = None return DNSQueryRetryAttempt(response_time, cause, cause_arg, action, action_arg) class DNSResponseHandlerFactory(object): '''A factory class that holds arguments to create a DNSResponseHandler instance.''' def __init__(self, cls, *args, **kwargs): self._cls = cls self._args = args self._kwargs = kwargs def build(self): '''Instantiate a DNSResponseHandler with the args and kwargs saved with the initialization of this factory.''' obj = self._cls.__new__(self._cls, *self._args, __instantiate=True, **self._kwargs) obj.__init__(*self._args, **self._kwargs) return obj class DNSResponseHandler(object): '''A base class for handling DNS responses (or exceptions) arising from a query attempt.''' def __new__(cls, *args, **kwargs): '''Redirect the instantiation of a DNSResponseHandler to create instead a Factory, from which a DNSResponseHandler in turn is built.''' if kwargs.pop('__instantiate', None): return super(DNSResponseHandler, cls).__new__(cls) return DNSResponseHandlerFactory(cls, *args, **kwargs) def set_context(self, params, history, request): '''Set local parameters pertaining to DNS query.''' self._params = params self._history = history self._request = request def handle(self, response_wire, response, response_time): '''Handle a DNS response. The response might be an actual DNS message or some type of exception that was raised during query.''' raise NotImplemented def _get_retry_qty(self, cause): '''Return the number of retries associated with the DNS query, optionally limited to those with a given cause.''' if cause is None: return len(self._history) total = 0 for i in range(len(self._history) - 1, -1, -1): if self._history[i].cause == cause: total += 1 else: break return total def _get_num_timeouts(self, response): '''Return the number of retries attributed to timeouts.''' if isinstance(response, dns.exception.Timeout): return self._get_retry_qty(RETRY_CAUSE_TIMEOUT) + 1 return 0 def _get_num_network_errors(self, response): '''Return the number of retries attributed to network errors.''' if isinstance(response, (socket.error, EOFError)): return self._get_retry_qty(RETRY_CAUSE_NETWORK_ERROR) + 1 return 0 class ActionIndependentDNSResponseHandler(DNSResponseHandler): '''A DNSResponseHandler that is consulted regardless of whether or not the response was "handled" previously by another handler.''' pass class RetryOnNetworkErrorHandler(DNSResponseHandler): '''Retry the query after some exponentially growing wait period upon a network error.''' def __init__(self, max_errors): self._max_errors = max_errors def handle(self, response_wire, response, response_time): errors = self._get_num_network_errors(response) if errors >= self._max_errors: raise AcceptResponse() if isinstance(response, (socket.error, EOFError)): if hasattr(response, 'errno'): errno1 = response.errno else: errno1 = None self._params['wait'] = 0.2*(2**errors) if self._params['tcp']: action = RETRY_ACTION_CHANGE_SPORT else: action = RETRY_ACTION_NO_CHANGE return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_NETWORK_ERROR, errno1, action, None) class UseTCPOnTCFlagHandler(DNSResponseHandler): '''Retry with TCP if the TC flag is set in the response.''' def handle(self, response_wire, response, response_time): # python3/python2 dual compatibility if isinstance(response_wire, str): map_func = lambda x: ord(x) else: map_func = lambda x: x if response_wire is not None and map_func(response_wire[2]) & 0x02: self._params['tcp'] = True return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TC_SET, len(response_wire), RETRY_ACTION_USE_TCP, None) class DisableEDNSOnFormerrHandler(DNSResponseHandler): '''Disable EDNS if there was some type of issue parsing the message. Some servers don't handle EDNS appropriately.''' def handle(self, response_wire, response, response_time): if isinstance(response, (struct.error, dns.exception.FormError)) and self._request.edns >= 0: self._request.use_edns(False) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_FORMERR, None, RETRY_ACTION_DISABLE_EDNS, None) class ReduceUDPMaxPayloadOnTimeoutHandler(DNSResponseHandler): '''Reduce the EDNS UDP max payload after a given number of timeouts. Some servers attempt to send payloads that exceed their PMTU.''' def __init__(self, reduced_payload, timeouts): self._reduced_payload = reduced_payload self._timeouts = timeouts def handle(self, response_wire, response, response_time): timeouts = self._get_num_timeouts(response) if not self._params['tcp'] and timeouts >= self._timeouts and self._request.payload > self._reduced_payload: self._request.use_edns(self._request.edns, self._request.ednsflags, self._reduced_payload, options=self._request.options) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD, self._reduced_payload) class ClearEDNSFlagOnTimeoutHandler(DNSResponseHandler): '''Clear an EDNS flag after a given number of timeouts.''' def __init__(self, flag, timeouts): self._flag = flag self._timeouts = timeouts def handle(self, response_wire, response, response_time): timeouts = self._get_num_timeouts(response) if not self._params['tcp'] and timeouts >= self._timeouts and (self._request.ednsflags & self._flag): self._request.ednsflags &= ~(self._flag & 0xffff) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_CLEAR_EDNS_FLAG, self._flag) class ChangeEDNSVersionOnTimeoutHandler(DNSResponseHandler): '''Change EDNS version after a given number of timeouts.''' def __init__(self, edns, timeouts): self._edns = edns self._timeouts = timeouts def handle(self, response_wire, response, response_time): timeouts = self._get_num_timeouts(response) if not self._params['tcp'] and timeouts >= self._timeouts and self._request.edns != self._edns: self._request.use_edns(self._edns, self._request.ednsflags, self._request.payload, options=self._request.options) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_CHANGE_EDNS_VERSION, self._edns) class RemoveEDNSOptionOnTimeoutHandler(DNSResponseHandler): '''Remove EDNS option after a given number of timeouts.''' def __init__(self, timeouts): self._timeouts = timeouts def handle(self, response_wire, response, response_time): timeouts = self._get_num_timeouts(response) if not self._params['tcp'] and timeouts >= self._timeouts and self._request.options: opt = self._request.options[0] self._request.use_edns(self._request.edns, self._request.ednsflags, self._request.payload, options=self._request.options[1:]) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_REMOVE_EDNS_OPTION, opt.otype) class DisableEDNSOnTimeoutHandler(DNSResponseHandler): '''Disable EDNS after a given number of timeouts. Some servers don't respond to EDNS queries.''' def __init__(self, timeouts): self._timeouts = timeouts def handle(self, response_wire, response, response_time): timeouts = self._get_num_timeouts(response) if not self._params['tcp'] and timeouts >= self._timeouts and self._request.edns >= 0: self._request.use_edns(False) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_DISABLE_EDNS, None) class SetFlagOnRcodeHandler(DNSResponseHandler): '''Set a flag when a given rcode is returned. One example of the use of this class is to determine if the cause of the SERVFAIL is related to DNSSEC validation failure by retrying with the CD flag.''' def __init__(self, flag, rcode): self._flag = flag self._rcode = rcode def handle(self, response_wire, response, response_time): if isinstance(response, dns.message.Message) and response.rcode() == self._rcode and not self._request.flags & self._flag: self._request.flags |= self._flag return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_RCODE, self._rcode, RETRY_ACTION_SET_FLAG, self._flag) class DisableEDNSOnRcodeHandler(DNSResponseHandler): '''Disable EDNS if the RCODE in the response indicates that the server doesn't implement EDNS.''' def handle(self, response_wire, response, response_time): if isinstance(response, dns.message.Message) and response.rcode() in (dns.rcode.NOTIMP, dns.rcode.FORMERR, dns.rcode.SERVFAIL) and self._request.edns >= 0: self._request.use_edns(False) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_RCODE, response.rcode(), RETRY_ACTION_DISABLE_EDNS, None) class RemoveEDNSOptionOnRcodeHandler(DNSResponseHandler): '''Remove an EDNS option if the RCODE in the response indicates that the server didn't handle the request properly.''' def __init__(self, rcode): self._rcode = rcode def handle(self, response_wire, response, response_time): if isinstance(response, dns.message.Message) and response.rcode() == self._rcode and self._request.options: opt = self._request.options[0] self._request.use_edns(self._request.edns, self._request.ednsflags, self._request.payload, options=self._request.options[1:]) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_RCODE, response.rcode(), RETRY_ACTION_REMOVE_EDNS_OPTION, opt.otype) class AddServerCookieOnBADCOOKIE(DNSResponseHandler): '''Update the DNS Cookie EDNS option with the server cookie when a BADCOOKIE rcode is received.''' def _add_server_cookie(self, response): try: client_opt = [o for o in self._request.options if o.otype == 10][0] except IndexError: return False try: server_opt = [o for o in response.options if o.otype == 10][0] except IndexError: return False client_cookie = client_opt.data[:8] server_cookie1 = client_opt.data[8:] server_cookie2 = server_opt.data[8:] if server_cookie1 == server_cookie2: return False client_opt.data = client_cookie + server_cookie2 return True def handle(self, response_wire, response, response_time): if isinstance(response, dns.message.Message) and response.rcode() == 23: if self._add_server_cookie(response): return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_RCODE, response.rcode(), RETRY_ACTION_UPDATE_DNS_COOKIE, None) class UseUDPOnTimeoutHandler(DNSResponseHandler): '''Revert to UDP if TCP connectivity fails.''' def __init__(self, timeouts): self._timeouts = timeouts def handle(self, response_wire, response, response_time): timeouts = self._get_num_timeouts(response) if timeouts >= self._timeouts and self._params['tcp']: self._params['tcp'] = False return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_USE_UDP, None) class UseUDPOnNetworkErrorHandler(DNSResponseHandler): '''Retry the query after some exponentially growing wait period upon a network error.''' def __init__(self, max_errors): self._max_errors = max_errors def handle(self, response_wire, response, response_time): errors = self._get_num_network_errors(response) if errors >= self._max_errors and self._params['tcp']: if hasattr(response, 'errno'): errno1 = response.errno else: errno1 = None self._params['tcp'] = False return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_NETWORK_ERROR, errno1, RETRY_ACTION_USE_UDP, None) if isinstance(response, (socket.error, EOFError)): if hasattr(response, 'errno'): errno1 = response.errno else: errno1 = None self._params['wait'] = 0.2*(2**errors) if self._params['tcp']: action = RETRY_ACTION_CHANGE_SPORT else: action = RETRY_ACTION_NO_CHANGE return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_NETWORK_ERROR, errno1, action, None) class PMTUBoundingHandler(DNSResponseHandler): # define states START = 1 # if TIMEOUT -> reduce payload -> REDUCED_PAYLOAD # else -> return (pass through to other handlers) REDUCED_PAYLOAD = 2 # if TIMEOUT -> return # if TC -> set lower bound, use TCP -> TCP_FOR_TRUNCATE # if error -> return # else -> set lower bound (msg size), use TCP -> TCP_FOR_UPPER_BOUND USE_TCP = 3 # if TIMEOUT -> return # if error -> return # else -> set upper bound, set increase payload (msg payload - 1) -> TCP_MINUS_ONE TCP_MINUS_ONE = 5 # if TIMEOUT -> reduce payload (upper - lower)/2 -> PICKLE # if errors of some sort (maybe with subhandlers?) -> return # else -> keep upper bound, return PICKLE = 6 # if upper - lower <= 1 -> use TCP -> TCP_FINAL # if TIMEOUT -> set upper bound, reduce payload ((upper - lower)/2 - lower)/2, PICKLE # -> TC??? # if error -> return # else -> set lower bound, increase payload (upper - (upper - lower)/2)/2, PICKLE TCP_FINAL = 7 INVALID = 8 def __init__(self, reduced_payload, initial_timeouts, max_timeouts, bounding_timeout): self._reduced_payload = reduced_payload self._initial_timeouts = initial_timeouts self._max_timeouts = max_timeouts self._bounding_timeout = bounding_timeout self._lower_bound = None self._upper_bound = None self._water_mark = None self._state = self.START def handle(self, response_wire, response, response_time): if self._state == self.INVALID: return # python3/python2 dual compatibility if isinstance(response_wire, str): map_func = lambda x: ord(x) else: map_func = lambda x: x timeouts = self._get_num_timeouts(response) is_timeout = isinstance(response, dns.exception.Timeout) is_valid = isinstance(response, dns.message.Message) and response.rcode() in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN) is_truncated = response_wire is not None and map_func(response_wire[2]) & 0x02 if response_wire is not None: response_len = len(response_wire) else: response_len = None if self._request.edns >= 0 and \ (is_timeout or is_valid or is_truncated): pass else: self._state = self.INVALID return if self._state == self.START: if timeouts >= self._initial_timeouts: self._lower_bound = self._reduced_payload self._upper_bound = self._request.payload - 1 self._request.use_edns(self._request.edns, self._request.ednsflags, self._reduced_payload, options=self._request.options) self._state = self.REDUCED_PAYLOAD return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD, self._reduced_payload) elif self._state == self.REDUCED_PAYLOAD: if timeouts >= self._max_timeouts: self._state == self.INVALID return None if not is_timeout: if is_truncated or is_valid: self._lower_bound = self._water_mark = response_len self._params['timeout'] = self._bounding_timeout self._params['tcp'] = True self._state = self.USE_TCP if is_truncated: return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TC_SET, response_len, RETRY_ACTION_USE_TCP, None) else: return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, response_len, RETRY_ACTION_USE_TCP, None) elif self._state == self.USE_TCP: if not is_timeout and is_valid: #XXX this is cheating because we're not reporting the change to UDP self._params['tcp'] = False payload = response_len - 1 self._request.use_edns(self._request.edns, self._request.ednsflags, payload, options=self._request.options) self._state = self.TCP_MINUS_ONE return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, response_len, RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD, payload) elif self._state == self.TCP_MINUS_ONE: if is_timeout: self._upper_bound = self._request.payload - 1 payload = self._lower_bound + (self._upper_bound + 1 - self._lower_bound)//2 self._request.use_edns(self._request.edns, self._request.ednsflags, payload, options=self._request.options) self._state = self.PICKLE return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD, payload) # if the size of the message is less than the watermark, then perhaps we were rate limited elif response_wire is not None and response_len < self._water_mark: # but if this isn't the first time, just quit. it could be that # the server simply has some wonky way of determining how/where to truncate. if self._history[-1].cause == RETRY_CAUSE_DIAGNOSTIC and self._history[-1].action == RETRY_ACTION_CHANGE_SPORT: self._params['tcp'] = True self._state = self.TCP_FINAL return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, None, RETRY_ACTION_USE_TCP, None) else: self._params['wait'] = 1.0 return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, None, RETRY_ACTION_CHANGE_SPORT, None) # if the response was truncated, then the size of the payload # received via TCP is the largest we can receive elif is_truncated: self._params['tcp'] = True self._state = self.TCP_FINAL return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TC_SET, response_len, RETRY_ACTION_USE_TCP, None) elif self._state == self.PICKLE: if self._upper_bound - self._lower_bound <= 1: self._params['tcp'] = True self._state = self.TCP_FINAL if is_truncated: return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TC_SET, response_len, RETRY_ACTION_USE_TCP, None) elif is_timeout: return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_USE_TCP, None) elif not is_valid: return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, None, RETRY_ACTION_USE_TCP, None) elif is_timeout: self._upper_bound = self._request.payload - 1 payload = self._lower_bound + (self._upper_bound + 1 - self._lower_bound)//2 self._request.use_edns(self._request.edns, self._request.ednsflags, payload, options=self._request.options) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD, payload) # if the size of the message is less than the watermark, then perhaps we were rate limited elif response_len < self._water_mark: # but if this isn't the first time, just quit. it could be that # the server simply has some wonky way of determining how/where to truncate. if self._history[-1].cause == RETRY_CAUSE_DIAGNOSTIC and self._history[-1].action == RETRY_ACTION_CHANGE_SPORT: self._params['tcp'] = True self._state = self.TCP_FINAL return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, None, RETRY_ACTION_USE_TCP, None) else: self._params['wait'] = 1.0 return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, None, RETRY_ACTION_CHANGE_SPORT, None) elif is_valid: self._lower_bound = self._request.payload payload = self._lower_bound + (self._upper_bound + 1 - self._lower_bound)//2 self._request.use_edns(self._request.edns, self._request.ednsflags, payload, options=self._request.options) return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_DIAGNOSTIC, response_len, RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD, payload) elif self._state == self.TCP_FINAL: pass elif self._state == self.INVALID: pass class ChangeTimeoutOnTimeoutHandler(ActionIndependentDNSResponseHandler): '''Modify timeout value when a certain number of timeouts is reached.''' def __init__(self, timeout, timeouts): self._timeout = timeout self._timeouts = timeouts def handle(self, response_wire, response, response_time): timeouts = self._get_num_timeouts(response) if isinstance(response, dns.exception.Timeout) and timeouts == self._timeouts: self._params['timeout'] = self._timeout class RetryOnTimeoutHandler(DNSResponseHandler): '''Retry with no change when a query times out.''' def handle(self, response_wire, response, response_time): if isinstance(response, dns.exception.Timeout): if self._params['tcp']: action = RETRY_ACTION_CHANGE_SPORT else: action = RETRY_ACTION_NO_CHANGE return DNSQueryRetryAttempt(response_time, RETRY_CAUSE_TIMEOUT, None, action, None) class DefaultAcceptHandler(DNSResponseHandler): '''Accept the response if there was no other reason to not accept it.''' def handle(self, response_wire, response, response_time): raise AcceptResponse() class LifetimeHandler(ActionIndependentDNSResponseHandler): '''Stop handling and retrying if the designated lifetime has been exceeded.''' def __init__(self, lifetime): self._lifetime = lifetime self._start = time.time() def handle(self, response_wire, response, response_time): if self.time_remaining() <= 0: raise AcceptResponse() def time_remaining(self): return max(self._start + self._lifetime - time.time(), 0) class MaxTimeoutsHandler(ActionIndependentDNSResponseHandler): '''Stop handling and retrying if the maximum number of timeouts has been exceeded.''' def __init__(self, max_timeouts): self._max_timeouts = max_timeouts def handle(self, response_wire, response, response_time): if self._get_num_timeouts(response) >= self._max_timeouts: raise AcceptResponse() class DNSQueryHandler: '''A handler associated with a DNS query to a server.''' def __init__(self, query, request, server_cookie, server_cookie_status, params, response_handlers, server, client): self.query = query self.request = request self.params = params self.server_cookie = server_cookie self.server_cookie_status = server_cookie_status self._response_handlers = response_handlers self.history = [] self._server = server self._client = client for handler in self._response_handlers: handler.set_context(self.params, self.history, self.request) if query.lifetime is not None: self._expiration = time.time() + query.lifetime else: self._expiration = None self._set_query_time() def _set_query_time(self): self.query_time = time.time() + self.params['wait'] def _reset_wait(self): self.params['wait'] = 0 def get_query_transport_meta(self): return transport.DNSQueryTransportMeta(self.request.to_wire(), self._server, self.params['tcp'], self.get_timeout(), \ self.query.odd_ports.get(self._server, self.query.port), src=self._client, sport=self.params['sport']) def get_remaining_lifetime(self): if self._expiration is None: # send arbitrarily high value return 86400 return max(self._expiration - time.time(), 0) def get_timeout(self): if self._expiration is None: return self.params['timeout'] timeout = min(self.params['timeout'], self.get_remaining_lifetime()) if timeout < MIN_QUERY_TIMEOUT: return MIN_QUERY_TIMEOUT return timeout def handle_response(self, response_wire, response, response_time, client, sport): retry_action = None try: for handler in self._response_handlers: if retry_action is None: retry_action = handler.handle(response_wire, response, response_time) if retry_action is not None: if retry_action.action == RETRY_ACTION_NO_CHANGE: self.params['sport'] = sport else: self.params['sport'] = None elif isinstance(handler, ActionIndependentDNSResponseHandler): handler.handle(response_wire, response, response_time) if retry_action is not None: # If we were unable to bind to the source address, then this is # our fault if retry_action.cause == RETRY_CAUSE_NETWORK_ERROR and retry_action.cause_arg == errno.EADDRNOTAVAIL: raise AcceptResponse # If there is no client-side connectivity, then simply return. # #XXX (Note that this only catches the case when a client IP has # not been explicitly specified (i.e., self._client is None). # Explicitly specifying a client IP that cannot connect to a # given destination (e.g., because it is of the wrong address # scope) will result in a regular network failure with # EHOSTUNREACH or ENETUNREACH, as there is no scope comparison # in this code.) if retry_action.cause == RETRY_CAUSE_NETWORK_ERROR and retry_action.cause_arg in (errno.EHOSTUNREACH, errno.ENETUNREACH, errno.EAFNOSUPPORT) and client is None: raise AcceptResponse # if this error was our fault, don't add it to the history if retry_action.cause == RETRY_CAUSE_NETWORK_ERROR and retry_action.cause_arg == errno.EMFILE: pass else: self.history.append(retry_action) self._set_query_time() self._reset_wait() except AcceptResponse: return response class AggregateDNSResponse(object): ttl_cmp = False def __init__(self): self.answer_info = [] self.nodata_info = [] self.nxdomain_info = [] self.referral_info = [] self.truncated_info = [] self.error_info = [] def _aggregate_response(self, server, client, response, qname, rdtype, rdclass, bailiwick): if response.is_valid_response(): if response.is_complete_response(): is_referral = response.is_referral(qname, rdtype, rdclass, bailiwick) self._aggregate_answer(server, client, response, is_referral, qname, rdtype, rdclass) else: truncated_info = TruncatedResponse(response.message.to_wire()) DNSResponseComponent.insert_into_list(truncated_info, self.truncated_info, server, client, response) else: self._aggregate_error(server, client, response) def _aggregate_answer(self, server, client, response, referral, qname, rdtype, rdclass): msg = response.message # sort with the most specific DNAME infos first dname_rrsets = [x for x in msg.answer if x.rdtype == dns.rdatatype.DNAME and x.rdclass == rdclass] dname_rrsets.sort(reverse=True) qname_sought = qname try: i = 0 while i < MAX_CNAME_REDIRECTION: # synthesize a CNAME from a DNAME, if possible synthesized_cname_info = None for dname_rrset in dname_rrsets: if qname_sought.parent().is_subdomain(dname_rrset.name): synthesized_cname_info = RRsetInfo(cname_from_dname(qname_sought, dname_rrset), self.ttl_cmp, RRsetInfo(dname_rrset, self.ttl_cmp)) break try: rrset_info = self._aggregate_answer_rrset(server, client, response, qname_sought, rdtype, rdclass, referral) # if there was a synthesized CNAME, add it to the rrset_info if rrset_info.rrset.rdtype == dns.rdatatype.CNAME and rrset_info.rrset.rdclass == rdclass and synthesized_cname_info is not None: synthesized_cname_info = rrset_info.create_or_update_cname_from_dname_info(synthesized_cname_info, server, client, response, rdclass) synthesized_cname_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) except KeyError: if synthesized_cname_info is None: raise synthesized_cname_info = DNSResponseComponent.insert_into_list(synthesized_cname_info, self.answer_info, server, client, response) synthesized_cname_info.dname_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) rrset_info = synthesized_cname_info if rrset_info.rrset.rdtype == dns.rdatatype.CNAME and rrset_info.rrset.rdclass == rdclass: qname_sought = rrset_info.rrset[0].target else: break i += 1 except KeyError: if referral and rdtype != dns.rdatatype.DS: # add referrals try: rrset = [x for x in msg.authority if qname.is_subdomain(x.name) and x.rdtype == dns.rdatatype.NS and x.rdclass == rdclass][0] except IndexError: pass else: referral_info = ReferralResponse(rrset.name) DNSResponseComponent.insert_into_list(referral_info, self.referral_info, server, client, response) # with referrals, don't do any further processing return # don't store no answer or NXDOMAIN info for names other than qname # if recursion is not desired and available if qname_sought != qname and not response.recursion_desired_and_available(): return if msg.rcode() == dns.rcode.NXDOMAIN: neg_response_info_list = self.nxdomain_info else: neg_response_info_list = self.nodata_info neg_response_info = NegativeResponseInfo(qname_sought, rdtype, self.ttl_cmp) neg_response_info = DNSResponseComponent.insert_into_list(neg_response_info, neg_response_info_list, server, client, response) neg_response_info.create_or_update_nsec_info(server, client, response, rdclass, referral) neg_response_info.create_or_update_soa_info(server, client, response, rdclass, referral) def _aggregate_answer_rrset(self, server, client, response, qname, rdtype, rdclass, referral): msg = response.message try: rrset = msg.find_rrset(msg.answer, qname, rdclass, rdtype) except KeyError: rrset = msg.find_rrset(msg.answer, qname, rdclass, dns.rdatatype.CNAME) rrset_info = RRsetInfo(rrset, self.ttl_cmp) rrset_info = DNSResponseComponent.insert_into_list(rrset_info, self.answer_info, server, client, response) rrset_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) return rrset_info def _aggregate_error(self, server, client, response): msg = response.message if msg is None: error_info = DNSResponseError(response.error, response.errno) else: error_info = DNSResponseError(RESPONSE_ERROR_INVALID_RCODE, msg.rcode()) error_info = DNSResponseComponent.insert_into_list(error_info, self.error_info, server, client, response) class DNSQuery(object): '''An simple DNS Query and its responses.''' def __init__(self, qname, rdtype, rdclass, flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp): self.qname = qname self.rdtype = rdtype self.rdclass = rdclass self.flags = flags self.edns = edns self.edns_max_udp_payload = edns_max_udp_payload self.edns_flags = edns_flags self.edns_options = edns_options self.tcp = tcp self.responses = {} def copy(self, bailiwick_map, default_bailiwick, with_responses=True): '''Return a clone of the current DNSQuery instance. Parameters are passed by reference rather than copied. Note: if it turns out that these member variables might be modified somehow by other instances in future use, then these will need to be copies.''' clone = DNSQuery(self.qname, self.rdtype, self.rdclass, self.flags, self.edns, self.edns_max_udp_payload, self.edns_flags, self.edns_options, self.tcp) if with_responses: for server in self.responses: bailiwick = bailiwick_map.get(server, default_bailiwick) for client, response in self.responses[server].items(): response_clone = response.copy() response_clone.query = clone clone.add_response(server, client, response_clone, bailiwick) return clone def join(self, query, bailiwick_map, default_bailiwick): if not (isinstance(query, DNSQuery)): raise ValueError('A DNSQuery instance can only be joined with another DNSQuery instance.') if not (self.qname.to_text() == query.qname.to_text() and self.rdtype == query.rdtype and \ self.rdclass == query.rdclass and self.flags == query.flags and \ self.edns == query.edns and self.edns_max_udp_payload == query.edns_max_udp_payload and \ self.edns_flags == query.edns_flags and self.edns_options == query.edns_options and \ self.tcp == query.tcp): raise ValueError('DNS query parameters for DNSQuery instances being joined must be the same.') clone = self.copy(bailiwick_map, default_bailiwick) for server in query.responses: bailiwick = bailiwick_map.get(server, default_bailiwick) for client, response in query.responses[server].items(): response_clone = response.copy() response_clone.query = clone clone.add_response(server, client, response_clone, bailiwick) return clone def project(self, servers, bailiwick_map, default_bailiwick): if servers.difference(self.responses): raise ValueError('A DNSQuery can only project responses from servers that have been queried.') clone = self.copy(bailiwick_map, default_bailiwick, with_responses=False) for server in servers: bailiwick = bailiwick_map.get(server, default_bailiwick) for client, response in self.responses[server].items(): response_clone = response.copy() response_clone.query = clone clone.add_response(server, client, response_clone, bailiwick) return clone def add_response(self, server, client, response, bailiwick): if server not in self.responses: self.responses[server] = {} if response.query is not None and response.query is not self: raise ValueError('Response for %s/%s is already associated with a query.' % (self.qname, dns.rdatatype.to_text(self.rdtype))) if client in self.responses[server]: raise ValueError('Response for %s/%s from server %s to client %s already exists.' % (self.qname, dns.rdatatype.to_text(self.rdtype), server, client)) response.query = self self.responses[server][client] = response def is_authoritative_answer_all(self): val = None for server in self.responses: for response in self.responses[server].values(): if not (response.is_valid_response() and response.is_complete_response()): continue if response.is_authoritative() and response.is_answer(self.qname, self.rdtype): val = True else: return False if val is None: val = False return val def is_answer_any(self): for server in self.responses: for response in self.responses[server].values(): if not (response.is_valid_response() and response.is_complete_response()): continue if response.is_answer(self.qname, self.rdtype): return True return False def is_nxdomain_all(self): val = None for server in self.responses: for response in self.responses[server].values(): if not (response.is_valid_response() and response.is_complete_response()): continue if response.is_nxdomain(self.qname, self.rdtype): if val is None: val = True else: return False if val is None: val = False return val def is_not_delegation_all(self): val = None for server in self.responses: for response in self.responses[server].values(): if not (response.is_valid_response() and response.is_complete_response()): continue if response.not_delegation(self.qname, self.rdtype): if val is None: val = True else: return False if val is None: val = False return val def is_valid_complete_response_any(self): for server in self.responses: for response in self.responses[server].values(): if response.is_valid_response() and response.is_complete_response(): return True return False def is_valid_complete_authoritative_response_any(self): for server in self.responses: for response in self.responses[server].values(): if response.is_valid_response() and response.is_complete_response() and response.is_authoritative(): return True return False def servers_with_valid_complete_response(self, bailiwick_map, default_bailiwick): servers_clients = set() for server in self.responses: bailiwick = bailiwick_map.get(server, default_bailiwick) for client, response in self.responses[server].items(): if response.is_valid_response() and response.is_complete_response() and not response.is_referral(self.qname, self.rdtype, self.rdclass, bailiwick): servers_clients.add((server, client)) return servers_clients def is_nxdomain_any(self): for server in self.responses: for response in self.responses[server].values(): if not (response.is_valid_response() and response.is_complete_response()): continue if response.is_nxdomain(self.qname, self.rdtype): return True return False def serialize(self, meta_only=False): d = OrderedDict(( ('qname', lb2s(self.qname.to_text())), ('qclass', dns.rdataclass.to_text(self.rdclass)), ('qtype', dns.rdatatype.to_text(self.rdtype)), )) d['options'] = OrderedDict(( ('flags', self.flags), )) if self.edns >= 0: d['options']['edns_version'] = self.edns d['options']['edns_max_udp_payload'] = self.edns_max_udp_payload d['options']['edns_flags'] = self.edns_flags d['options']['edns_options'] = [] for o in self.edns_options: s = io.BytesIO() o.to_wire(s) d['options']['edns_options'].append((o.otype, lb2s(binascii.hexlify(s.getvalue())))) d['options']['tcp'] = self.tcp d['responses'] = OrderedDict() servers = list(self.responses.keys()) servers.sort() for server in servers: d['responses'][server] = OrderedDict() clients = list(self.responses[server].keys()) clients.sort() for client in clients: if meta_only: d['responses'][server][client] = self.responses[server][client].serialize_meta() else: d['responses'][server][client] = self.responses[server][client].serialize() return d @classmethod def deserialize(self, d, bailiwick_map, default_bailiwick, cookie_jar_map, default_cookie_jar, cookie_standin, cookie_bad): qname = dns.name.from_text(d['qname']) rdclass = dns.rdataclass.from_text(d['qclass']) rdtype = dns.rdatatype.from_text(d['qtype']) d1 = d['options'] flags = d1['flags'] if 'edns_version' in d1: edns = d1['edns_version'] edns_max_udp_payload = d1['edns_max_udp_payload'] edns_flags = d1['edns_flags'] edns_options = [] for otype, data in d1['edns_options']: edns_options.append(dns.edns.GenericOption(otype, binascii.unhexlify(data))) else: edns = None edns_max_udp_payload = None edns_flags = None edns_options = [] tcp = d1['tcp'] q = DNSQuery(qname, rdtype, rdclass, flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp) server_cookie = None server_cookie_status = DNS_COOKIE_NO_COOKIE if edns >= 0: try: cookie_opt = [o for o in edns_options if o.otype == 10][0] except IndexError: pass else: if len(cookie_opt.data) == 8: server_cookie_status = DNS_COOKIE_CLIENT_COOKIE_ONLY elif len(cookie_opt.data) >= 16 and len(cookie_opt.data) <= 40: if cookie_opt.data[8:] == cookie_standin: # initially assume that there is a cookie for the server; # change the value later if there isn't server_cookie_status = DNS_COOKIE_SERVER_COOKIE_FRESH elif cookie_opt.data[8:] == cookie_bad: server_cookie_status = DNS_COOKIE_SERVER_COOKIE_BAD else: server_cookie_status = DNS_COOKIE_SERVER_COOKIE_STATIC else: server_cookie_status = DNS_COOKIE_IMPROPER_LENGTH for server in d['responses']: server_ip = IPAddr(server) bailiwick = bailiwick_map.get(server_ip, default_bailiwick) cookie_jar = cookie_jar_map.get(server_ip, default_cookie_jar) server_cookie = cookie_jar.get(server_ip, None) status = server_cookie_status if status == DNS_COOKIE_SERVER_COOKIE_FRESH and server_cookie is None: status = DNS_COOKIE_CLIENT_COOKIE_ONLY for client in d['responses'][server]: q.add_response(server_ip, IPAddr(client), DNSResponse.deserialize(d['responses'][server][client], q, server_cookie, status), bailiwick) return q class DNSQueryAggregateDNSResponse(DNSQuery, AggregateDNSResponse): def __init__(self, qname, rdtype, rdclass, flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp): DNSQuery.__init__(self, qname, rdtype, rdclass, flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp) AggregateDNSResponse.__init__(self) def add_response(self, server, client, response, bailiwick): super(DNSQueryAggregateDNSResponse, self).add_response(server, client, response, bailiwick) self._aggregate_response(server, client, response, self.qname, self.rdtype, self.rdclass, bailiwick) class MultiQuery(object): '''An simple DNS Query and its responses.''' def __init__(self, qname, rdtype, rdclass): self.qname = qname self.rdtype = rdtype self.rdclass = rdclass self.queries = {} def add_query(self, query, bailiwick_map, default_bailiwick): if not (self.qname == query.qname and self.rdtype == query.rdtype and self.rdclass == query.rdclass): raise ValueError('DNS query information must be the same as that to which query is being joined.') edns_options_str = b'' for o in query.edns_options: s = io.BytesIO() o.to_wire(s) edns_options_str += struct.pack(b'!H', o.otype) + s.getvalue() params = (query.qname.to_text(), query.flags, query.edns, query.edns_max_udp_payload, query.edns_flags, edns_options_str, query.tcp) if params in self.queries: self.queries[params] = self.queries[params].join(query, bailiwick_map, default_bailiwick) else: self.queries[params] = query def project(self, servers, bailiwick_map, default_bailiwick): query = self.__class__(self.qname, self.rdtype, self.rdclass) for params in self.queries: query.add_query(self.queries[params].project(servers, bailiwick_map, default_bailiwick)) return query def is_nxdomain_all(self): for params in self.queries: if not self.queries[params].is_nxdomain_all(): return False return True def is_valid_complete_authoritative_response_any(self): for params in self.queries: if self.queries[params].is_valid_complete_authoritative_response_any(): return True return False class MultiQueryAggregateDNSResponse(MultiQuery, AggregateDNSResponse): def __init__(self, qname, rdtype, rdclass): MultiQuery.__init__(self, qname, rdtype, rdclass) AggregateDNSResponse.__init__(self) def add_query(self, query, bailiwick_map, default_bailiwick): super(MultiQueryAggregateDNSResponse, self).add_query(query, bailiwick_map, default_bailiwick) for server in query.responses: bailiwick = bailiwick_map.get(server, default_bailiwick) for client, response in query.responses[server].items(): self._aggregate_response(server, client, response, self.qname, self.rdtype, self.rdclass, bailiwick) class TTLDistinguishingMultiQueryAggregateDNSResponse(MultiQueryAggregateDNSResponse): ttl_cmp = True class ExecutableDNSQuery(DNSQuery): '''An executable DNS Query.''' default_th_factory = transport.DNSQueryTransportHandlerDNSPrivateFactory() def __init__(self, qname, rdtype, rdclass, servers, bailiwick, client_ipv4, client_ipv6, port, odd_ports, cookie_jar, cookie_standin, cookie_bad, flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp, response_handlers, query_timeout, max_attempts, lifetime): super(ExecutableDNSQuery, self).__init__(qname, rdtype, rdclass, flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp) if not isinstance(servers, set): if isinstance(servers, (list, tuple)): servers = set(servers) else: servers = set([servers]) if not servers: raise ValueError("At least one server must be specified for an ExecutableDNSQuery") self.servers = servers self.bailiwick = bailiwick self.client_ipv4 = client_ipv4 self.client_ipv6 = client_ipv6 self.port = port if odd_ports is None: odd_ports = {} self.odd_ports = odd_ports if cookie_jar is None: cookie_jar = {} self.cookie_jar = cookie_jar self.cookie_standin = cookie_standin self.cookie_bad = cookie_bad self.response_handlers = response_handlers self.query_timeout = query_timeout if lifetime is None and max_attempts is None: raise ValueError("At least one of lifetime or max_attempts must be specified for an ExecutableDNSQuery instance.") self.max_attempts = max_attempts self.lifetime = lifetime self._executed = False def get_query_handler(self, server): edns_options = copy.deepcopy(self.edns_options) server_cookie = None server_cookie_status = DNS_COOKIE_NO_COOKIE if self.edns >= 0: try: cookie_opt = [o for o in edns_options if o.otype == 10][0] except IndexError: pass else: if len(cookie_opt.data) == 8: server_cookie_status = DNS_COOKIE_CLIENT_COOKIE_ONLY elif len(cookie_opt.data) >= 16 and len(cookie_opt.data) <= 40: if cookie_opt.data[8:] == self.cookie_standin: if server in self.cookie_jar: # if there is a cookie for this server, # then add it server_cookie = self.cookie_jar[server] cookie_opt.data = cookie_opt.data[:8] + server_cookie server_cookie_status = DNS_COOKIE_SERVER_COOKIE_FRESH else: # otherwise, send just the client cookie. cookie_opt.data = cookie_opt.data[:8] server_cookie_status = DNS_COOKIE_CLIENT_COOKIE_ONLY elif cookie_opt.data[8:] == self.cookie_bad: server_cookie_status = DNS_COOKIE_SERVER_COOKIE_BAD else: server_cookie_status = DNS_COOKIE_SERVER_COOKIE_STATIC else: server_cookie_status = DNS_COOKIE_IMPROPER_LENGTH request = dns.message.Message() request.flags = self.flags request.find_rrset(request.question, self.qname, self.rdclass, self.rdtype, create=True, force_unique=True) request.use_edns(self.edns, self.edns_flags, self.edns_max_udp_payload, options=edns_options) if server.version == 6: client = self.client_ipv6 else: client = self.client_ipv4 params = { 'tcp': self.tcp, 'sport': None, 'wait': 0, 'timeout': self.query_timeout } response_handlers = [RetryOnNetworkErrorHandler(3).build()] + [h.build() for h in self.response_handlers] + \ [RetryOnTimeoutHandler().build(), DefaultAcceptHandler().build()] if self.max_attempts is not None: response_handlers.append(MaxTimeoutsHandler(self.max_attempts).build()) if self.lifetime is not None: response_handlers.append(LifetimeHandler(self.lifetime).build()) return DNSQueryHandler(self, request, server_cookie, server_cookie_status, params, response_handlers, server, client) @classmethod def execute_queries(cls, *queries, **kwargs): '''Execute the query to a given server, and handle it appropriately.''' tm = kwargs.get('tm', None) if tm is None: # this starts a thread that stops when tm goes out of scope tm = transport.DNSQueryTransportManager() th_factories = kwargs.get('th_factories', None) if th_factories is None: th_factories = (cls.default_th_factory,) request_list = [] response_queue = queue.Queue() ignore_queryid = kwargs.get('ignore_queryid', True) response_wire_map = {} query_handlers = {} query_time = None for th_factory in th_factories: if not th_factory.cls.singleton: th = th_factory.build(processed_queue=response_queue) for query in queries: qtm_for_server = False for server in query.servers: if not th_factory.cls.allow_loopback_query and (LOOPBACK_IPV4_RE.match(server) or server == LOOPBACK_IPV6): continue if not th_factory.cls.allow_private_query and (RFC_1918_RE.match(server) or LINK_LOCAL_RE.match(server) or UNIQ_LOCAL_RE.match(server)): continue qtm_for_server = True qh = query.get_query_handler(server) qtm = qh.get_query_transport_meta() query_handlers[qtm] = qh if th_factory.cls.singleton: th = th_factory.build(processed_queue=response_queue) th.add_qtm(qtm) th.init_req() bisect.insort(request_list, (qh.query_time, th)) else: # find the maximum query time if query_time is None or qh.query_time > query_time: query_time = qh.query_time th.add_qtm(qtm) if not qtm_for_server: raise NoValidServersToQuery('No valid servers to query!') if not th_factory.cls.singleton: th.init_req() bisect.insort(request_list, (query_time, th)) while query_handlers: while request_list and time.time() >= request_list[0][0]: tm.handle_msg_nowait(request_list.pop(0)[1]) t = time.time() if request_list and t < request_list[0][0]: timeout = max(request_list[0][0] - t, 0) else: timeout = None try: # pull a response from the queue th = response_queue.get(timeout=timeout) except queue.Empty: continue th.finalize() newth = th.factory.build(processed_queue=response_queue) query_time = None for qtm in th.qtms: # find its matching query meta information qh = query_handlers.pop(qtm) query = qh.query # define response as either a Message created from parsing # the wire response or an Exception if qtm.err is not None: response = qtm.err else: wire_zero_queryid = b'\x00\x00' + qtm.res[2:] if wire_zero_queryid in response_wire_map: response = response_wire_map[wire_zero_queryid] else: try: response = dns.message.from_wire(qtm.res) except Exception as e: response = e if ignore_queryid: response_wire_map[wire_zero_queryid] = response if qtm.res: msg_size = len(qtm.res) else: msg_size = None response_time = round(qtm.end_time - qtm.start_time, 3) response = qh.handle_response(qtm.res, response, response_time, qtm.src, qtm.sport) # if no response was returned, then resubmit the modified query if response is None: qtm = qh.get_query_transport_meta() # find the maximum query time if query_time is None or qh.query_time > query_time: query_time = qh.query_time query_handlers[qtm] = qh newth.add_qtm(qtm) continue # otherwise store away the response (or error), history, and response time if isinstance(response, dns.message.Message): msg = response err = None errno1 = None else: msg = None if isinstance(response, dns.exception.Timeout): err = RESPONSE_ERROR_TIMEOUT elif isinstance(response, (socket.error, EOFError)): err = RESPONSE_ERROR_NETWORK_ERROR elif isinstance(response, (struct.error, dns.exception.FormError)): err = RESPONSE_ERROR_FORMERR #XXX need to determine how to handle non-parsing # validation errors with dnspython (e.g., signature with # no keyring) else: err = RESPONSE_ERROR_OTHER if hasattr(response, 'errno'): errno1 = response.errno else: errno1 = None response_obj = DNSResponse(msg, msg_size, err, errno1, qh.history, response_time, query, qh.server_cookie, qh.server_cookie_status) # if client IP is not specified, and there is a socket # failure, then src might be None if qtm.src is not None: src = IPAddr(qtm.src) else: src = qtm.src # If this was a network error, determine if it was a binding # error if err == RESPONSE_ERROR_NETWORK_ERROR: if errno1 == errno.EADDRNOTAVAIL and qh._client is not None: raise SourceAddressBindError('Unable to bind to local address %s (%s)' % (qh._client, errno.errorcode[errno1])) elif errno1 == errno.EADDRINUSE or \ (errno1 == errno.EACCES and qtm.src is None): # Address/port in use (EADDRINUSE) or insufficient # permissions to bind to port if qh.params['sport'] is not None: raise PortBindError('Unable to bind to local port %d (%s)' % (qh.params['sport'], errno.errorcode[errno1])) else: raise PortBindError('Unable to bind to local port (%s)' % (errno.errorcode[errno1])) elif qtm.src is None and errno1 not in (errno.EHOSTUNREACH, errno.ENETUNREACH, errno.EAFNOSUPPORT, errno.EADDRNOTAVAIL): # If source is None it didn't bind properly. There are several sub-cases: # 1. If the bind() failed and resulted in an errno # value of EHOSTUNREACH, it is because there was no # proper IPv4 or IPv6 connectivity; the error for # this is handled elsewhere). # 2. If socket() failed and resulted in an errno value # of EAFNOSUPPORT, then there is no IPv6 support. # 3. If connect() failed and resulted in an errno value # of EADDRNOTAVAIL, then there is no IPv6 support. # In other cases, it was something unknown, so # raise an error. raise BindError('Unable to bind to local address (%s)' % (errno.errorcode.get(errno1, "unknown"))) # if src is None, then it is a connectivity issue on our # side, so don't record it in the responses if src is not None: query.add_response(qh._server, src, response_obj, query.bailiwick) # This query is now executed, at least in part query._executed = True if newth.qtms: newth.init_req() bisect.insort(request_list, (query_time, newth)) def require_executed(func): def _func(self, *args, **kwargs): assert self._executed == True, "ExecutableDNSQuery has not been executed." return func(self, *args, **kwargs) return _func def require_not_executed(func): def _func(self, *args, **kwargs): assert self._executed == False, "ExecutableDNSQuery has already been executed." return func(self, *args, **kwargs) return _func def add_response(self, server, client, response, bailiwick): super(ExecutableDNSQuery, self).add_response(server, client, response, bailiwick) if not self.servers.difference(self.responses): self._executed = True @require_not_executed def execute(self, ignore_queryid=True, tm=None, th_factories=None): self.execute_queries(self, ignore_queryid=ignore_queryid, tm=tm, th_factories=th_factories) join = require_executed(DNSQuery.join) project = require_executed(DNSQuery.project) is_authoritative_answer_all = require_executed(DNSQuery.is_authoritative_answer_all) is_nxdomain_all = require_executed(DNSQuery.is_nxdomain_all) is_not_delegation_all = require_executed(DNSQuery.is_not_delegation_all) is_nxdomain_any = require_executed(DNSQuery.is_nxdomain_any) class DNSQueryFactory(object): '''A simple, extensible class interface for instantiating DNSQuery objects.''' flags = 0 edns = -1 edns_max_udp_payload = 4096 edns_flags = 0 edns_options = [] tcp = False query_timeout = 3.0 max_attempts = 5 lifetime = 15.0 response_handlers = [] def __new__(cls, qname, rdtype, rdclass, servers, bailiwick=None, client_ipv4=None, client_ipv6=None, port=53, odd_ports=None, cookie_jar=None, cookie_standin=None, cookie_bad=None, query_timeout=None, max_attempts=None, lifetime=None, executable=True): if query_timeout is None: query_timeout = cls.query_timeout if max_attempts is None: max_attempts = cls.max_attempts if lifetime is None: lifetime = cls.lifetime if executable: return ExecutableDNSQuery(qname, rdtype, rdclass, servers, bailiwick, client_ipv4, client_ipv6, port, odd_ports, cookie_jar, cookie_standin, cookie_bad, cls.flags, cls.edns, cls.edns_max_udp_payload, cls.edns_flags, cls.edns_options, cls.tcp, cls.response_handlers, query_timeout, max_attempts, lifetime) else: return DNSQuery(qname, rdtype, rdclass, cls.flags, cls.edns, cls.edns_max_udp_payload, cls.edns_flags, cls.edns_options, cls.tcp) def __init__(self, *args, **kwargs): raise NotImplemented() @classmethod def add_mixin(cls, mixin_cls): class _foo(cls): flags = cls.flags | getattr(mixin_cls, 'flags', 0) edns_flags = cls.edns_flags | getattr(mixin_cls, 'edns_flags', 0) edns_options = cls.edns_options + copy.deepcopy(getattr(mixin_cls, 'edns_options', [])) return _foo @classmethod def get_cookie_opt(cls): try: return [o for o in cls.edns_options if o.otype == 10][0] except IndexError: return None @classmethod def add_server_cookie(cls, server_cookie): cookie_opt = cls.get_cookie_opt() if cookie_opt is not None: if len(cookie_opt.data) != 8: raise TypeError('COOKIE option must have length of 8.') cookie_opt.data += server_cookie return cls @classmethod def remove_cookie_option(cls): cookie_opt = cls.get_cookie_opt() if cookie_opt is not None: cls.edns_options.remove(cookie_opt) return cls class SimpleDNSQuery(DNSQueryFactory): '''A simple query, no frills.''' pass class RecursiveDNSQuery(SimpleDNSQuery): '''A simple recursive query.''' flags = SimpleDNSQuery.flags | dns.flags.RD class StandardQuery(SimpleDNSQuery): '''A standard old-school DNS query that handles truncated packets.''' response_handlers = \ SimpleDNSQuery.response_handlers + \ [UseTCPOnTCFlagHandler()] class StandardRecursiveQuery(StandardQuery, RecursiveDNSQuery): '''A standard old-school recursive DNS query that handles truncated packets.''' pass class StandardRecursiveQueryCD(StandardRecursiveQuery): '''A recursive DNS query that retries with checking disabled if the response code is SERVFAIL.''' response_handlers = \ StandardRecursiveQuery.response_handlers + \ [SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL)] class EDNS0Query(StandardQuery): '''A standard query with EDNS0.''' edns = 0 class RecursiveEDNS0Query(EDNS0Query, RecursiveDNSQuery): '''A standard recursive query with EDNS0.''' pass class DNSSECQuery(EDNS0Query): '''A standard query requesting DNSSEC records.''' edns_flags = EDNS0Query.edns_flags | dns.flags.DO class RecursiveDNSSECQuery(DNSSECQuery, RecursiveDNSQuery): '''A standard recursive query requesting DNSSEC records.''' pass class QuickDNSSECQuery(DNSSECQuery): '''A standard DNSSEC query, designed for quick turnaround.''' response_handlers = DNSSECQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), RemoveEDNSOptionOnRcodeHandler(dns.rcode.FORMERR), DisableEDNSOnFormerrHandler(), DisableEDNSOnRcodeHandler() ] query_timeout = 1.0 max_attempts = 1 lifetime = 3.0 class DiagnosticQuery(DNSSECQuery): '''A robust query with a number of handlers, designed to detect common DNS compatibility and connectivity issues.''' response_handlers = DNSSECQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), RemoveEDNSOptionOnRcodeHandler(dns.rcode.FORMERR), DisableEDNSOnFormerrHandler(), DisableEDNSOnRcodeHandler(), ReduceUDPMaxPayloadOnTimeoutHandler(512, 4), RemoveEDNSOptionOnTimeoutHandler(6), ClearEDNSFlagOnTimeoutHandler(dns.flags.DO, 10), DisableEDNSOnTimeoutHandler(11), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(1.0, 4), ChangeTimeoutOnTimeoutHandler(2.0, 5), ChangeTimeoutOnTimeoutHandler(1.0, 6), ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - no change # 4 - reduce udp max payload to 512; change timeout to 1 second # 5 - change timeout to 2 seconds # 6 - remove EDNS option (if any); change timeout to 1 second # 7 - remove EDNS option (if any) # 8 - remove EDNS option (if any) # 9 - remove EDNS option (if any) # 10 - clear DO flag; # 11 - disable EDNS # 12 - return (give up) query_timeout = 1.0 max_attempts = 12 lifetime = 16.0 class RecursiveDiagnosticQuery(RecursiveDNSSECQuery): '''A robust query to a cache with a number of handlers, designed to detect common DNS compatibility and connectivity issues.''' response_handlers = DNSSECQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), RemoveEDNSOptionOnRcodeHandler(dns.rcode.FORMERR), DisableEDNSOnFormerrHandler(), SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL), DisableEDNSOnRcodeHandler(), ReduceUDPMaxPayloadOnTimeoutHandler(512, 5), RemoveEDNSOptionOnTimeoutHandler(7), ClearEDNSFlagOnTimeoutHandler(dns.flags.DO, 11), DisableEDNSOnTimeoutHandler(12), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(4.0, 3), ChangeTimeoutOnTimeoutHandler(8.0, 4), ChangeTimeoutOnTimeoutHandler(1.0, 5), ChangeTimeoutOnTimeoutHandler(2.0, 6), ChangeTimeoutOnTimeoutHandler(1.0, 7), ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - change timeout to 4 seconds # 4 - change timeout to 8 seconds # 5 - reduce udp max payload to 512; change timeout to 1 second # 6 - change timeout to 2 seconds # 7 - remove EDNS option (if any); change timeout to 1 second # 8 - remove EDNS option (if any) # 9 - remove EDNS option (if any) # 10 - remove EDNS option (if any) # 11 - clear DO flag # 12 - disable EDNS # 13 - return (give up) query_timeout = 1.0 max_attempts = 13 lifetime = 26.0 class TCPDiagnosticQuery(DNSSECQuery): '''A robust query with a number of handlers, designed to detect common DNS compatibility and connectivity issues over TCP.''' tcp = True response_handlers = \ [ RemoveEDNSOptionOnRcodeHandler(dns.rcode.FORMERR), DisableEDNSOnFormerrHandler(), DisableEDNSOnRcodeHandler(), ChangeTimeoutOnTimeoutHandler(4.0, 2) ] # For timeouts: # 1 - no change # 2 - change timeout to 4 seconds # 3 - return query_timeout = 2.0 max_attempts = 3 lifetime = 10.0 class RecursiveTCPDiagnosticQuery(RecursiveDNSSECQuery): '''A robust query with a number of handlers, designed to detect common DNS compatibility and connectivity issues, beginning with TCP.''' tcp = True response_handlers = \ [ RemoveEDNSOptionOnRcodeHandler(dns.rcode.FORMERR), DisableEDNSOnFormerrHandler(), SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL), DisableEDNSOnRcodeHandler(), ChangeTimeoutOnTimeoutHandler(4.0, 2), ChangeTimeoutOnTimeoutHandler(8.0, 3) ] # For timeouts: # 1 - no change # 2 - change timeout to 4 seconds # 3 - change timeout to 8 seconds # 4 - return query_timeout = 2.0 max_attempts = 4 lifetime = 18.0 class PMTUDiagnosticQuery(DNSSECQuery): response_handlers = \ [PMTUBoundingHandler(512, 4, 6, 1.0)] + \ DNSSECQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), RemoveEDNSOptionOnRcodeHandler(dns.rcode.FORMERR), DisableEDNSOnFormerrHandler(), DisableEDNSOnRcodeHandler(), RemoveEDNSOptionOnTimeoutHandler(6), ClearEDNSFlagOnTimeoutHandler(dns.flags.DO, 10), DisableEDNSOnTimeoutHandler(11), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(1.0, 4), ChangeTimeoutOnTimeoutHandler(2.0, 5), ChangeTimeoutOnTimeoutHandler(1.0, 6), ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - no change # 4 - reduce udp max payload to 512; change timeout to 1 second # 5 - change timeout to 2 seconds # 6 - remove EDNS option (if any); change timeout to 1 second # 7 - remove EDNS option (if any) # 8 - remove EDNS option (if any) # 9 - remove EDNS option (if any) # 10 - clear DO flag; # 11 - disable EDNS # 12 - return (give up) query_timeout = 1.0 max_attempts = 12 lifetime = 22.0 # set this a little longer due to pickle stage class RecursivePMTUDiagnosticQuery(RecursiveDNSSECQuery): response_handlers = \ [PMTUBoundingHandler(512, 5, 7, 1.0)] + \ DNSSECQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), RemoveEDNSOptionOnRcodeHandler(dns.rcode.FORMERR), DisableEDNSOnFormerrHandler(), SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL), DisableEDNSOnRcodeHandler(), RemoveEDNSOptionOnTimeoutHandler(7), ClearEDNSFlagOnTimeoutHandler(dns.flags.DO, 11), DisableEDNSOnTimeoutHandler(12), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(4.0, 3), ChangeTimeoutOnTimeoutHandler(8.0, 4), ChangeTimeoutOnTimeoutHandler(1.0, 5), ChangeTimeoutOnTimeoutHandler(2.0, 6), ChangeTimeoutOnTimeoutHandler(1.0, 7), ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - change timeout to 4 seconds # 4 - change timeout to 8 seconds # 5 - reduce udp max payload to 512; change timeout to 1 second # 6 - change timeout to 2 seconds # 7 - remove EDNS option (if any); change timeout to 1 second # 8 - remove EDNS option (if any) # 9 - remove EDNS option (if any) # 10 - remove EDNS option (if any) # 11 - clear DO flag # 12 - disable EDNS # 13 - return (give up) query_timeout = 1.0 max_attempts = 13 lifetime = 32.0 # set this a little longer due to pickle stage class TruncationDiagnosticQuery(DNSSECQuery): '''A simple query to test the results of a query with capabilities of only receiving back a small (512 byte) payload.''' response_handlers = \ [ AddServerCookieOnBADCOOKIE(), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(4.0, 3) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - change timeout to 4 seconds edns_max_udp_payload = 512 query_timeout = 1.0 max_attempts = 4 lifetime = 8.0 class RecursiveTruncationDiagnosticQuery(DNSSECQuery, RecursiveDNSQuery): '''A simple recursive query to test the results of a query with capabilities of only receiving back a small (512 byte) payload.''' response_handlers = \ [ AddServerCookieOnBADCOOKIE(), SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(4.0, 3), ChangeTimeoutOnTimeoutHandler(8.0, 4) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - change timeout to 4 seconds # 4 - change timeout to 8 seconds edns_max_udp_payload = 512 query_timeout = 1.0 max_attempts = 5 lifetime = 18.0 class EDNSVersionDiagnosticQuery(SimpleDNSQuery): '''A query designed to test unknown EDNS version compatibility.''' edns = 100 edns_max_udp_payload = 512 response_handlers = \ SimpleDNSQuery.response_handlers + \ [ ChangeEDNSVersionOnTimeoutHandler(0, 4), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(1.0, 4) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - no change # 4 - change EDNS version to 0; change timeout to 1 second # 5 - return query_timeout = 1.0 max_attempts = 5 lifetime = 7.0 class EDNSOptDiagnosticQuery(SimpleDNSQuery): '''A query designed to test unknown EDNS option compatibility.''' edns = 0 edns_max_udp_payload = 512 edns_options = [dns.edns.GenericOption(100, b'')] response_handlers = \ SimpleDNSQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), RemoveEDNSOptionOnTimeoutHandler(4), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(1.0, 4) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - no change # 4 - remove EDNS option (if any); change timeout to 1 second # 5 - remove EDNS option (if any) # 6 - remove EDNS option (if any) # 7 - remove EDNS option (if any) # 8 - return query_timeout = 1.0 max_attempts = 8 lifetime = 11.0 class EDNSFlagDiagnosticQuery(SimpleDNSQuery): '''A query designed to test unknown EDNS flag compatibility.''' edns = 0 edns_max_udp_payload = 512 edns_flags = SimpleDNSQuery.edns_flags | 0x80 response_handlers = \ SimpleDNSQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), RemoveEDNSOptionOnTimeoutHandler(4), ClearEDNSFlagOnTimeoutHandler(0x80, 8), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(1.0, 4) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - no change # 4 - remove EDNS option (if any); change timeout to 1 second # 5 - remove EDNS option (if any) # 6 - remove EDNS option (if any) # 7 - remove EDNS option (if any) # 8 - clear EDNS flag # 9 - return query_timeout = 1.0 max_attempts = 9 lifetime = 12.0 class RecursiveEDNSVersionDiagnosticQuery(SimpleDNSQuery): '''A query designed to test unknown EDNS version compatibility on recursive servers.''' flags = dns.flags.RD edns = 100 edns_max_udp_payload = 512 response_handlers = \ SimpleDNSQuery.response_handlers + \ [ SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL), ChangeEDNSVersionOnTimeoutHandler(0, 5), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(4.0, 3), ChangeTimeoutOnTimeoutHandler(8.0, 4), ChangeTimeoutOnTimeoutHandler(1.0, 5) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - change timeout to 4 seconds # 4 - change timeout to 8 seconds # 5 - change EDNS version to 0; change timeout to 1 second # 6 - return query_timeout = 1.0 max_attempts = 6 lifetime = 18.0 class RecursiveEDNSOptDiagnosticQuery(SimpleDNSQuery): '''A query designed to test unknown EDNS option compatibility on recursive servers.''' flags = dns.flags.RD edns = 0 edns_max_udp_payload = 512 edns_options = [dns.edns.GenericOption(100, b'')] response_handlers = \ SimpleDNSQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL), RemoveEDNSOptionOnTimeoutHandler(5), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(4.0, 3), ChangeTimeoutOnTimeoutHandler(8.0, 4), ChangeTimeoutOnTimeoutHandler(1.0, 5) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - change timeout to 4 seconds # 4 - change timeout to 8 seconds # 5 - remove EDNS option (if any); change timeout to 1 second # 6 - remove EDNS option (if any) # 7 - remove EDNS option (if any) # 8 - remove EDNS option (if any) # 9 - return query_timeout = 1.0 max_attempts = 9 lifetime = 21.0 class RecursiveEDNSFlagDiagnosticQuery(SimpleDNSQuery): '''A query designed to test unknown EDNS flag compatibility on recursive servers.''' flags = dns.flags.RD edns = 0 edns_max_udp_payload = 512 edns_flags = SimpleDNSQuery.edns_flags | 0x80 response_handlers = \ SimpleDNSQuery.response_handlers + \ [ AddServerCookieOnBADCOOKIE(), SetFlagOnRcodeHandler(dns.flags.CD, dns.rcode.SERVFAIL), RemoveEDNSOptionOnTimeoutHandler(5), ClearEDNSFlagOnTimeoutHandler(0x80, 9), ChangeTimeoutOnTimeoutHandler(2.0, 2), ChangeTimeoutOnTimeoutHandler(4.0, 3), ChangeTimeoutOnTimeoutHandler(8.0, 4), ChangeTimeoutOnTimeoutHandler(1.0, 5) ] # For timeouts: # 1 - no change # 2 - change timeout to 2 seconds # 3 - change timeout to 4 seconds # 4 - change timeout to 8 seconds # 5 - remove EDNS option (if any); change timeout to 1 second # 6 - remove EDNS option (if any) # 7 - remove EDNS option (if any) # 8 - remove EDNS option (if any) # 9 - clear EDNS flag # 10 - return query_timeout = 1.0 max_attempts = 10 lifetime = 22.0 def main(): import json import sys import getopt def usage(): sys.stderr.write('Usage: %s [-r] [-j] [...]\n' % (sys.argv[0])) sys.exit(1) try: opts, args = getopt.getopt(sys.argv[1:], 'rj') opts = dict(opts) except getopt.error: usage() if len(args) < 3: usage() if '-r' in opts: cls = RecursiveDiagnosticQuery else: cls = DiagnosticQuery d = cls(dns.name.from_text(args[0]), dns.rdatatype.from_text(args[1]), dns.rdataclass.IN, [IPAddr(x) for x in args[2:]]) d.execute() if '-j' in opts: print(json.dumps(d.serialize(), indent=4, separators=(',', ': '))) else: print('Responses for %s/%s:' % (args[0], args[1])) for server in d.responses: for client, response in d.responses[server].items(): if response.message is not None: print(' from %s: %s (%d bytes in %dms)' % (server, repr(response.message), len(response.message.to_wire()), int(response.response_time*1000))) else: print(' from %s: (ERR: %s) (%dms)' % (server, repr(response.error), int(response.response_time*1000))) print(' (src: %s)' % (client)) if response.history: print(' (history: %s)' % (response.history)) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/resolver.py0000644000175000017500000010325715001472663015705 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import bisect import io import logging import math import random import threading import time from .config import RESOLV_CONF from . import query from .ipaddr import * from . import response as Response from . import transport from . import util import dns.rdataclass, dns.exception, dns.message, dns.rcode, dns.resolver MAX_CNAME_REDIRECTION = 20 logger = logging.getLogger(__name__) class ResolvConfError(Exception): pass _r = None def get_standard_resolver(): global _r if _r is None: _r = Resolver.from_file(RESOLV_CONF, query.StandardRecursiveQuery) return _r _rd = None def get_dnssec_resolver(): global _rd if _rd is None: _rd = Resolver.from_file(RESOLV_CONF, query.RecursiveDNSSECQuery) return _rd class DNSAnswer: '''An answer to a DNS query, including the full DNS response message, the RRset requested, and the server.''' def __init__(self, qname, rdtype, response, server): self.response = response self.server = server self.rrset = None self._handle_nxdomain(response) i = 0 qname_sought = qname while i < MAX_CNAME_REDIRECTION: try: self.rrset = response.find_rrset(response.answer, qname_sought, dns.rdataclass.IN, rdtype) i = MAX_CNAME_REDIRECTION except KeyError: try: rrset = response.find_rrset(response.answer, qname_sought, dns.rdataclass.IN, dns.rdatatype.CNAME) qname_sought = rrset[0].target except KeyError: break i += 1 self._handle_noanswer() def _handle_nxdomain(self, response): if response.rcode() == dns.rcode.NXDOMAIN: raise dns.resolver.NXDOMAIN() def _handle_noanswer(self): if self.rrset is None: raise dns.resolver.NoAnswer() class DNSAnswerNoAnswerAllowed(DNSAnswer): '''An answer to a DNS query, including the full DNS response message, the RRset requested, and the server.''' def _handle_noanswer(self): pass class Resolver: '''A simple stub DNS resolver.''' def __init__(self, servers, query_cls, timeout=1.0, max_attempts=5, lifetime=15.0, shuffle=False, client_ipv4=None, client_ipv6=None, port=53, transport_manager=None, th_factories=None): if lifetime is None and max_attempts is None: raise ValueError("At least one of lifetime or max_attempts must be specified for a Resolver instance.") self._servers = servers self._query_cls = query_cls self._timeout = timeout self._max_attempts = max_attempts self._lifetime = lifetime self._shuffle = shuffle self._client_ipv4 = client_ipv4 self._client_ipv6 = client_ipv6 self._port = port self._transport_manager = transport_manager self._th_factories = th_factories @classmethod def from_file(cls, resolv_conf, query_cls, **kwargs): servers = [] try: with io.open(resolv_conf, 'r', encoding='utf-8') as f: for line in f: line = line.strip() words = line.split() if len(words) > 1 and words[0] == 'nameserver': try: servers.append(IPAddr(words[1])) except LinkLocalAddress: logger.warning('Link-local addresses in %s are not currently supported.' % (resolv_conf)) except ValueError: pass except IOError as e: raise ResolvConfError('Unable to open %s: %s' % (resolv_conf, str(e))) if not servers: raise ResolvConfError('No servers found in %s' % (resolv_conf)) return Resolver(servers, query_cls, **kwargs) def query(self, qname, rdtype, rdclass=dns.rdataclass.IN, accept_first_response=False, continue_on_servfail=True): return list(self.query_multiple((qname, rdtype, rdclass), accept_first_response=accept_first_response, continue_on_servfail=continue_on_servfail).values())[0] def query_for_answer(self, qname, rdtype, rdclass=dns.rdataclass.IN, allow_noanswer=False): answer = list(self.query_multiple_for_answer((qname, rdtype, rdclass), allow_noanswer=allow_noanswer).values())[0] if isinstance(answer, DNSAnswer): return answer else: raise answer def query_multiple_for_answer(self, *query_tuples, **kwargs): if kwargs.pop('allow_noanswer', False): answer_cls = DNSAnswerNoAnswerAllowed else: answer_cls = DNSAnswer responses = self.query_multiple(*query_tuples, accept_first_response=False, continue_on_servfail=True) answers = {} for query_tuple, (server, response) in responses.items(): # no servers were queried if response is None: answers[query_tuple] = dns.resolver.NoNameservers() # response was valid elif response.is_complete_response() and response.is_valid_response(): try: answers[query_tuple] = answer_cls(query_tuple[0], query_tuple[1], response.message, server) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN) as e: answers[query_tuple] = e # response was timeout or network error elif response.error in (query.RESPONSE_ERROR_TIMEOUT, query.RESPONSE_ERROR_NETWORK_ERROR): answers[query_tuple] = dns.exception.Timeout() # there was a response, but it was invalid for some reason else: answers[query_tuple] = dns.resolver.NoNameservers() return answers def query_multiple(self, *query_tuples, **kwargs): valid_servers = {} responses = {} last_responses = {} attempts = {} accept_first_response = kwargs.get('accept_first_response', False) continue_on_servfail = kwargs.get('continue_on_servfail', True) query_tuples = set(query_tuples) for query_tuple in query_tuples: attempts[query_tuple] = 0 valid_servers[query_tuple] = set(self._servers) if self._shuffle: servers = self._servers[:] random.shuffle(servers) else: servers = self._servers tuples_to_query = query_tuples.difference(last_responses) start = time.time() while tuples_to_query and (self._lifetime is None or time.time() - start < self._lifetime): now = time.time() queries = {} for query_tuple in tuples_to_query: if not valid_servers[query_tuple]: try: last_responses[query_tuple] = responses[query_tuple] except KeyError: last_responses[query_tuple] = None, None continue while query_tuple not in queries: cycle_num, server_index = divmod(attempts[query_tuple], len(servers)) # if we've exceeded our maximum attempts, then break out if cycle_num >= self._max_attempts: try: last_responses[query_tuple] = responses[query_tuple] except KeyError: last_responses[query_tuple] = None, None break server = servers[server_index] if server in valid_servers[query_tuple]: if self._lifetime is not None: timeout = min(self._timeout, max((start + self._lifetime) - now, 0)) else: timeout = self._timeout q = self._query_cls(query_tuple[0], query_tuple[1], query_tuple[2], server, None, client_ipv4=self._client_ipv4, client_ipv6=self._client_ipv6, port=self._port, query_timeout=timeout, max_attempts=1) queries[query_tuple] = q attempts[query_tuple] += 1 query.ExecutableDNSQuery.execute_queries(*list(queries.values()), tm=self._transport_manager, th_factories=self._th_factories) for query_tuple, q in queries.items(): # no response means we didn't even try because we don't have # proper connectivity if not q.responses: server = list(q.servers)[0] valid_servers[query_tuple].remove(server) if not valid_servers[query_tuple]: last_responses[query_tuple] = server, None continue server, client_response = list(q.responses.items())[0] client, response = list(client_response.items())[0] responses[query_tuple] = (server, response) # if we received a complete message with an acceptable rcode, # then accept it as the last response if response.is_complete_response() and response.is_valid_response(): last_responses[query_tuple] = responses[query_tuple] # if we received a message that was incomplete (i.e., # truncated), had an invalid rcode, was malformed, or was # otherwise invalid, then accept the response (if directed), # and invalidate the server elif response.message is not None or \ response.error not in (query.RESPONSE_ERROR_TIMEOUT, query.RESPONSE_ERROR_NETWORK_ERROR): # accept_first_response is true, then accept the response if accept_first_response: last_responses[query_tuple] = responses[query_tuple] # if the response was SERVFAIL, and we were not directed to # continue, then accept the response elif response.message is not None and \ response.message.rcode() == dns.rcode.SERVFAIL and not continue_on_servfail: last_responses[query_tuple] = responses[query_tuple] valid_servers[query_tuple].remove(server) tuples_to_query = query_tuples.difference(last_responses) for query_tuple in tuples_to_query: last_responses[query_tuple] = responses[query_tuple] return last_responses class CacheEntry: def __init__(self, rrset, source, expiration, rcode, soa_rrset): self.rrset = rrset self.source = source self.expiration = expiration self.rcode = rcode self.soa_rrset = soa_rrset class ServFail(Exception): pass class FullResolver: '''A full iterative DNS resolver, following hints.''' SRC_PRIMARY_ZONE = 0 SRC_SECONDARY_ZONE = 1 SRC_AUTH_ANS = 2 SRC_AUTH_AUTH = 3 SRC_GLUE_PRIMARY_ZONE = 4 SRC_GLUE_SECONDARY_ZONE = 5 SRC_NONAUTH_ANS = 6 SRC_ADDITIONAL = 7 SRC_NONAUTH_AUTH = 7 MIN_TTL = 60 MAX_CHAIN = 20 default_th_factory = transport.DNSQueryTransportHandlerDNSFactory() def __init__(self, hints=util.get_root_hints(), query_cls=(query.QuickDNSSECQuery, query.DiagnosticQuery), client_ipv4=None, client_ipv6=None, odd_ports=None, cookie_standin=None, transport_manager=None, th_factories=None, max_ttl=None): self._hints = hints self._query_cls = query_cls self._client_ipv4 = client_ipv4 self._client_ipv6 = client_ipv6 if odd_ports is None: odd_ports = {} self._odd_ports = odd_ports self._transport_manager = transport_manager if th_factories is None: self._th_factories = (self.default_th_factory,) else: self._th_factories = th_factories self.allow_loopback_query = not bool([x for x in self._th_factories if not x.cls.allow_loopback_query]) self.allow_private_query = not bool([x for x in self._th_factories if not x.cls.allow_private_query]) self._max_ttl = max_ttl self._cookie_standin = cookie_standin self._cookie_jar = {} self._cache = {} self._expirations = [] self._cache_lock = threading.Lock() def _allow_server(self, server): if not self.allow_loopback_query and (LOOPBACK_IPV4_RE.search(server) is not None or server == LOOPBACK_IPV6): return False if not self.allow_private_query and (RFC_1918_RE.search(server) is not None or LINK_LOCAL_RE.search(server) is not None or UNIQ_LOCAL_RE.search(server) is not None): return False if ZERO_SLASH8_RE.search(server) is not None: return False return True def flush_cache(self): with self._cache_lock: self._cache = {} self._expirations = [] def expire_cache(self): t = time.time() with self._cache_lock: if self._expirations and self._expirations[0][0] > t: return future_index = bisect.bisect_right(self._expirations, (t, None)) for i in range(future_index): cache_key = self._expirations[i][1] del self._cache[cache_key] self._expirations = self._expirations[future_index:] def cache_put(self, name, rdtype, rrset, source, rcode, soa_rrset, ttl): t = time.time() if rrset is not None: ttl = max(rrset.ttl, self.MIN_TTL) elif soa_rrset is not None: ttl = max(min(soa_rrset.ttl, soa_rrset[0].minimum), self.MIN_TTL) elif ttl is not None: ttl = max(ttl, self.MIN_TTL) else: ttl = self.MIN_TTL if self._max_ttl is not None and ttl > self._max_ttl: ttl = self._max_ttl expiration = math.ceil(t) + ttl key = (name, rdtype) new_entry = CacheEntry(rrset, source, expiration, rcode, soa_rrset) with self._cache_lock: try: old_entry = self._cache[key] except KeyError: pass else: if new_entry.source >= old_entry.source: return # remove the old entry from expirations old_index = bisect.bisect_left(self._expirations, (old_entry.expiration, key)) old_key = self._expirations.pop(old_index)[1] assert old_key == key, "Old key doesn't match new key!" self._cache[key] = new_entry bisect.insort(self._expirations, (expiration, key)) def cache_get(self, name, rdtype): try: entry = self._cache[(name, rdtype)] except KeyError: return None else: t = time.time() ttl = max(0, int(entry.expiration - t)) if entry.rrset is not None: entry.rrset.update_ttl(ttl) if entry.soa_rrset is not None: entry.soa_rrset.update_ttl(ttl) return entry def cache_dump(self): keys = self._cache.keys() keys.sort() t = time.time() for key in keys: entry = self._cache[key] def query(self, qname, rdtype, rdclass=dns.rdataclass.IN): msg = dns.message.make_response(dns.message.make_query(qname, rdtype), True) try: l = self._query(qname, rdtype, rdclass, 0, self.SRC_NONAUTH_ANS) except ServFail: msg.set_rcode(dns.rcode.SERVFAIL) else: msg.set_rcode(l[-1]) for rrset in l[:-1]: if rrset is not None: new_rrset = msg.find_rrset(msg.answer, rrset.name, rrset.rdclass, rrset.rdtype, create=True) new_rrset.update(rrset) return msg, None def query_for_answer(self, qname, rdtype, rdclass=dns.rdataclass.IN, allow_noanswer=False): response, server = self.query(qname, rdtype, rdclass) if response.rcode() == dns.rcode.SERVFAIL: raise dns.resolver.NoNameservers() if allow_noanswer: answer_cls = DNSAnswerNoAnswerAllowed else: answer_cls = DNSAnswer return answer_cls(qname, rdtype, response, server) def query_multiple_for_answer(self, *query_tuples, **kwargs): allow_noanswer = kwargs.pop('allow_noanswer', False) answers = {} for query_tuple in query_tuples: try: answers[query_tuple] = self.query_for_answer(query_tuple[0], query_tuple[1], query_tuple[2], allow_noanswer=allow_noanswer) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN, dns.resolver.NoNameservers) as e: answers[query_tuple] = e return answers def query_multiple(self, *query_tuples, **kwargs): responses = {} for query_tuple in query_tuples: responses[query_tuple] = self.query(query_tuple[0], query_tuple[1], query_tuple[2]) return responses def _get_answer(self, qname, rdtype, rdclass, max_source): # first check cache for answer entry = self.cache_get(qname, rdtype) if entry is not None and entry.source <= max_source: return [entry.rrset, entry.rcode] # check hints, if allowed if self.SRC_ADDITIONAL <= max_source and (qname, rdtype) in self._hints: return [self._hints[(qname, rdtype)], dns.rcode.NOERROR] return None def _query(self, qname, rdtype, rdclass, level, max_source, starting_domain=None): self.expire_cache() # check for max chain length if level > self.MAX_CHAIN: raise ServFail('SERVFAIL - resolution chain too long') ans = self._get_answer(qname, rdtype, rdclass, max_source) if ans: return ans # next check cache for alias ans = self._get_answer(qname, dns.rdatatype.CNAME, rdclass, max_source) if ans: return [ans[0]] + self._query(ans[0][0].target, rdtype, rdclass, level + 1, max_source) # now check for closest enclosing NS, DNAME, or hint closest_zone = qname # when rdtype is DS, start at the parent if rdtype == dns.rdatatype.DS and qname != dns.name.root: closest_zone = qname.parent() elif starting_domain is not None: assert qname.is_subdomain(starting_domain), 'qname must be a subdomain of starting_domain' closest_zone = starting_domain ns_names = {} # iterative resolution is necessary, so find the closest zone ancestor or DNAME while True: # if we are a proper superdomain, then look for DNAME if closest_zone != qname: entry = self.cache_get(closest_zone, dns.rdatatype.DNAME) if entry is not None and entry.rrset is not None: cname_rrset = Response.cname_from_dname(qname, entry.rrset) return [entry.rrset, cname_rrset] + self._query(cname_rrset[0].target, rdtype, rdclass, level + 1, max_source) # look for NS records in cache ans = self._get_answer(closest_zone, dns.rdatatype.NS, rdclass, self.SRC_ADDITIONAL) if ans and ans[0] is not None: ns_rrset = ans[0] for ns_rdata in ans[0]: addrs = set() for a_rdtype in dns.rdatatype.A, dns.rdatatype.AAAA: ans1 = self._get_answer(ns_rdata.target, a_rdtype, rdclass, self.SRC_ADDITIONAL) if ans1 and ans1[0]: for a_rdata in ans1[0]: addrs.add(IPAddr(a_rdata.address)) if addrs: ns_names[ns_rdata.target] = addrs else: ns_names[ns_rdata.target] = None # if there were NS records associated with the names, then # no need to continue if ns_names: break # otherwise, continue upwards until some are found try: closest_zone = closest_zone.parent() except dns.name.NoParent: raise ServFail('SERVFAIL - no NS RRs at root') ret = None soa_rrset = None rcode = None # iterate, following referrals down the namespace tree while True: bailiwick = ns_rrset.name is_referral = False # query names first for which there are addresses ns_names_with_addresses = [n for n in ns_names if ns_names[n] is not None] random.shuffle(ns_names_with_addresses) ns_names_without_addresses = list(set(ns_names).difference(ns_names_with_addresses)) random.shuffle(ns_names_without_addresses) all_ns_names = ns_names_with_addresses + ns_names_without_addresses previous_valid_answer = set() for query_cls in self._query_cls: # query each server until we get a match for ns_name in all_ns_names: is_referral = False if ns_names[ns_name] is None: # first get the addresses associated with each name ns_names[ns_name] = set() for a_rdtype in dns.rdatatype.A, dns.rdatatype.AAAA: if ns_name.is_subdomain(bailiwick): if bailiwick == dns.name.root: sd = bailiwick else: sd = bailiwick.parent() else: sd = None try: a_rrset = self._query(ns_name, a_rdtype, dns.rdataclass.IN, level + 1, self.SRC_ADDITIONAL, starting_domain=sd)[-2] except ServFail: a_rrset = None if a_rrset is not None: for rdata in a_rrset: ns_names[ns_name].add(IPAddr(rdata.address)) for server in ns_names[ns_name].difference(previous_valid_answer): # server disallowed by policy if not self._allow_server(server): continue q = query_cls(qname, rdtype, rdclass, (server,), bailiwick, self._client_ipv4, self._client_ipv6, self._odd_ports.get((bailiwick, server), 53), cookie_jar=self._cookie_jar, cookie_standin=self._cookie_standin) q.execute(tm=self._transport_manager, th_factories=self._th_factories) is_referral = False if not q.responses: # No network connectivity continue server1, client_response = list(q.responses.items())[0] client, response = list(client_response.items())[0] server_cookie = response.get_server_cookie() if server_cookie is not None: self._cookie_jar[server1] = server_cookie if not (response.is_valid_response() and response.is_complete_response()): continue previous_valid_answer.add(server) soa_rrset = None rcode = response.message.rcode() # response is acceptable try: # first check for exact match ret = [[x for x in response.message.answer if x.name == qname and x.rdtype == rdtype and x.rdclass == rdclass][0]] except IndexError: try: # now look for DNAME dname_rrset = [x for x in response.message.answer if qname.is_subdomain(x.name) and qname != x.name and x.rdtype == dns.rdatatype.DNAME and x.rdclass == rdclass][0] except IndexError: try: # now look for CNAME cname_rrset = [x for x in response.message.answer if x.name == qname and x.rdtype == dns.rdatatype.CNAME and x.rdclass == rdclass][0] except IndexError: ret = [None] # no answer try: soa_rrset = [x for x in response.message.authority if qname.is_subdomain(x.name) and x.rdtype == dns.rdatatype.SOA][0] except IndexError: pass # cache the NS RRset else: cname_rrset = [x for x in response.message.answer if x.name == qname and x.rdtype == dns.rdatatype.CNAME and x.rdclass == rdclass][0] ret = [cname_rrset] else: # handle DNAME: return the DNAME, CNAME and (recursively) its chain cname_rrset = Response.cname_from_dname(qname, dname_rrset) ret = [dname_rrset, cname_rrset] if response.is_referral(qname, rdtype, rdclass, bailiwick): is_referral = True a_rrsets = {} min_ttl = None ret = None # if response is referral, then we follow it ns_rrset = [x for x in response.message.authority if qname.is_subdomain(x.name) and x.rdtype == dns.rdatatype.NS][0] ns_names = response.ns_ip_mapping_from_additional(ns_rrset.name, bailiwick) for ns_name in ns_names: if not ns_names[ns_name]: ns_names[ns_name] = None else: # name is in bailiwick for a_rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): try: a_rrsets[a_rdtype] = response.message.find_rrset(response.message.additional, ns_name, a_rdtype, dns.rdataclass.IN) except KeyError: pass else: if min_ttl is None or a_rrsets[a_rdtype].ttl < min_ttl: min_ttl = a_rrsets[a_rdtype].ttl for a_rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): if a_rdtype in a_rrsets: a_rrsets[a_rdtype].update_ttl(min_ttl) self.cache_put(ns_name, a_rdtype, a_rrsets[a_rdtype], self.SRC_ADDITIONAL, dns.rcode.NOERROR, None, None) else: self.cache_put(ns_name, a_rdtype, None, self.SRC_ADDITIONAL, dns.rcode.NOERROR, None, min_ttl) if min_ttl is not None: ns_rrset.update_ttl(min_ttl) # cache the NS RRset self.cache_put(ns_rrset.name, dns.rdatatype.NS, ns_rrset, self.SRC_NONAUTH_AUTH, rcode, None, None) break elif response.is_authoritative(): terminal = True a_rrsets = {} min_ttl = None # if response is authoritative (and not a referral), then we return it try: ns_rrset = [x for x in response.message.answer + response.message.authority if qname.is_subdomain(x.name) and x.rdtype == dns.rdatatype.NS][0] except IndexError: pass else: ns_names = response.ns_ip_mapping_from_additional(ns_rrset.name, bailiwick) for ns_name in ns_names: if not ns_names[ns_name]: ns_names[ns_name] = None else: # name is in bailiwick for a_rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): try: a_rrsets[a_rdtype] = response.message.find_rrset(response.message.additional, ns_name, a_rdtype, dns.rdataclass.IN) except KeyError: pass else: if min_ttl is None or a_rrsets[a_rdtype].ttl < min_ttl: min_ttl = a_rrsets[a_rdtype].ttl for a_rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): if a_rdtype in a_rrsets: a_rrsets[a_rdtype].update_ttl(min_ttl) self.cache_put(ns_name, a_rdtype, a_rrsets[a_rdtype], self.SRC_ADDITIONAL, dns.rcode.NOERROR, None, None) else: self.cache_put(ns_name, a_rdtype, None, self.SRC_ADDITIONAL, dns.rcode.NOERROR, None, min_ttl) if min_ttl is not None: ns_rrset.update_ttl(min_ttl) self.cache_put(ns_rrset.name, dns.rdatatype.NS, ns_rrset, self.SRC_AUTH_AUTH, rcode, None, None) if ret[-1] == None: self.cache_put(qname, rdtype, None, self.SRC_AUTH_ANS, rcode, soa_rrset, None) else: for rrset in ret: self.cache_put(rrset.name, rrset.rdtype, rrset, self.SRC_AUTH_ANS, rcode, None, None) if ret[-1].rdtype == dns.rdatatype.CNAME: ret += self._query(ret[-1][0].target, rdtype, rdclass, level + 1, self.SRC_NONAUTH_ANS) terminal = False if terminal: ret.append(rcode) return ret # if referral, then break if is_referral: break # if referral, then break if is_referral: break # if not referral, then we're done iterating if not is_referral: break # if we were only to ask the parent, then we're done if starting_domain is not None: break # otherwise continue onward, looking for an authoritative answer # return non-authoritative answer if ret is not None: terminal = True if ret[-1] == None: self.cache_put(qname, rdtype, None, self.SRC_NONAUTH_ANS, rcode, soa_rrset, None) else: for rrset in ret: self.cache_put(rrset.name, rrset.rdtype, rrset, self.SRC_NONAUTH_ANS, rcode, None, None) if ret[-1].rdtype == dns.rdatatype.CNAME: ret += self._query(ret[-1][0].target, rdtype, rdclass, level + 1, self.SRC_NONAUTH_ANS) terminal = False if terminal: ret.append(rcode) return ret raise ServFail('SERVFAIL - no valid responses') class PrivateFullResolver(FullResolver): default_th_factory = transport.DNSQueryTransportHandlerDNSPrivateFactory() def main(): import sys import getopt def usage(): sys.stderr.write('Usage: %s [...]\n' % (sys.argv[0])) sys.exit(1) try: opts, args = getopt.getopt(sys.argv[1:], '') opts = dict(opts) except getopt.error: usage() if len(args) < 2: usage() if len(args) < 3: r = get_standard_resolver() else: r = Resolver([IPAddr(x) for x in sys.argv[3:]], query.StandardRecursiveQuery) a = r.query_for_answer(dns.name.from_text(args[0]), dns.rdatatype.from_text(args[1])) print('Response for %s/%s:' % (args[0], args[1])) print(' from %s: %s (%d bytes)' % (a.server, repr(a.response), len(a.response.to_wire()))) print(' answer:\n %s' % (a.rrset)) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/response.py0000644000175000017500000017053515001472663015705 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2025 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import base64 import binascii import copy import errno import codecs import datetime import hashlib import io import logging import socket import struct import time # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # python3/python2 dual compatibility try: from html import escape except ImportError: from cgi import escape import dns.edns, dns.flags, dns.message, dns.rcode, dns.rdataclass, dns.rdatatype, dns.rrset from . import base32 from . import crypto from . import format as fmt from .ipaddr import IPAddr from .util import tuple_to_dict lb2s = fmt.latin1_binary_to_string class DNSResponse: '''A DNS response, including meta information''' def __init__(self, message, msg_size, error, errno1, history, response_time, query, server_cookie, server_cookie_status, review_history=True): self.message = message self.msg_size = msg_size self.error = error self.errno = errno1 self.history = history self.response_time = response_time self.query = query self.server_cookie = server_cookie self.server_cookie_status = server_cookie_status self.effective_flags = None self.effective_edns = None self.effective_edns_max_udp_payload = None self.effective_edns_flags = None self.effective_edns_options = None self.effective_tcp = None self.effective_server_cookie_status = None self.udp_attempted = None self.udp_responsive = None self.tcp_attempted = None self.tcp_responsive = None self.responsive_cause_index = None if review_history: self._review_history() def __str__(self): from . import query as Q if self.message is not None: return repr(self.message) else: return Q.response_errors.get(self.error) def __repr__(self): return '<%s: "%s">' % (self.__class__.__name__, str(self)) @classmethod def _query_tag_bind(cls, tcp, flags, edns, edns_flags, edns_max_udp_payload, edns_options, qname): s = [] if flags & dns.flags.RD: s.append('+') else: s.append('-') if edns >= 0: s.append('E(%d)' % (edns)) if tcp: s.append('T') if edns >= 0 and edns_flags & dns.flags.DO: s.append('D') if flags & dns.flags.CD: s.append('C') # Flags other than the ones commonly seen in queries if flags & dns.flags.AD: s.append('A') if flags & dns.flags.AA: s.append('a') if flags & dns.flags.TC: s.append('t') if flags & dns.flags.RA: s.append('r') if edns >= 0: # EDNS max UDP payload s.append('P(%d)' % edns_max_udp_payload) # EDNS flags other than DO if edns_flags & ~dns.flags.DO: s.append('F(0x%x)' % edns_flags) # other options for opt in edns_options: if opt.otype == 3: # NSID s.append('N') elif opt.otype == 8: # EDNS Client Subnet s.append('s') elif opt.otype == 10: # DNS cookies s.append('K') if qname.to_text() != qname.to_text().lower(): s.append('X') return s @classmethod def _query_tag_human(cls, tcp, flags, edns, edns_flags, edns_max_udp_payload, edns_options, qname): s = '' if tcp: s += 'TCP_' else: s += 'UDP_' if flags & dns.flags.RD: s += '+' else: s += '-' if flags & dns.flags.CD: s += 'C' # Flags other than the ones commonly seen in queries if flags & dns.flags.AD: s += 'A' if flags & dns.flags.AA: s += 'a' if flags & dns.flags.TC: s += 't' if flags & dns.flags.RA: s += 'r' s += '_' if edns < 0: s += 'NOEDNS_' else: s += 'EDNS%d_' % (edns) # EDNS max UDP payload s += '%d_' % edns_max_udp_payload if edns_flags & dns.flags.DO: s += 'D' # EDNS flags other than DO if edns_flags & ~dns.flags.DO: s += '%d' % edns_flags if edns_options: s += '_' # other options for opt in edns_options: if opt.otype == 3: # NSID s += 'N' elif opt.otype == 8: # EDNS Client Subnet s += 's' elif opt.otype == 10: # DNS cookies s += 'K' else: # DNS cookies s += 'O(%d)' % opt.otype if qname.to_text() != qname.to_text().lower(): s += '_0x20' return s def nsid_val(self): if self.message is None: return None if self.message.edns < 0: return None try: nsid_opt = [o for o in self.message.options if o.otype == dns.edns.NSID][0] except IndexError: return None data = nsid_opt.data # dnspython <= 1.12.x uses strings, but dnspython 1.13 uses bytearray (for python3) if isinstance(data, str): data = bytearray(data) if all(c >= 0x20 and c <= 0x7e for c in data): nsid_val = data.decode('ascii') else: nsid_val = '0x' + lb2s(binascii.hexlify(data)) return nsid_val def request_cookie_tag(self): from . import query as Q if self.effective_server_cookie_status == Q.DNS_COOKIE_NO_COOKIE: return 'NO_COOKIE' elif self.effective_server_cookie_status == Q.DNS_COOKIE_IMPROPER_LENGTH: return 'MALFORMED_COOKIE' elif self.effective_server_cookie_status == Q.DNS_COOKIE_CLIENT_COOKIE_ONLY: return 'CLIENT_COOKIE_ONLY' elif self.effective_server_cookie_status == Q.DNS_COOKIE_SERVER_COOKIE_FRESH: return 'VALID_SERVER_COOKIE' elif self.effective_server_cookie_status == Q.DNS_COOKIE_SERVER_COOKIE_BAD: return 'INVALID_SERVER_COOKIE' else: raise Exception('Unknown cookie status!') def response_cookie_tag(self): if self.message is None: return 'ERROR' if self.message.edns < 0: return 'NO_EDNS' try: cookie_opt = [o for o in self.message.options if o.otype == 10][0] except IndexError: return 'NO_COOKIE_OPT' if len(cookie_opt.data) < 8 or len(cookie_opt.data) > 40: return 'MALFORMED_COOKIE' elif len(cookie_opt.data) == 8: return 'CLIENT_COOKIE_ONLY' else: return 'CLIENT_AND_SERVER_COOKIE' def initial_query_tag(self): return ''.join(self._query_tag_human(self.query.tcp, self.query.flags, self.query.edns, self.query.edns_flags, self.query.edns_max_udp_payload, self.query.edns_options, self.query.qname)) def effective_query_tag(self): return ''.join(self._query_tag_human(self.effective_tcp, self.effective_flags, self.effective_edns, self.effective_edns_flags, self.query.edns_max_udp_payload, self.effective_edns_options, self.query.qname)) def section_rr_count(self, section): if self.message is None: return None n = 0 for i in section: n += len(i) if section is self.message.additional and self.message.edns >= 0: n += 1 return n def section_digest(self, section): if self.message is None: return None d = '' rrsets = section[:] rrsets.sort() for rrset in rrsets: d += RRsetInfo.rrset_canonicalized_to_wire(rrset, rrset.name, rrset.ttl) return 'md5'+hashlib.md5(d).hexdigest() def retries(self): return len(self.history) def total_response_time(self): t = self.response_time for retry in self.history: t += retry.response_time return t def get_cookie_opt(self): if self.message is None: return None try: return [o for o in self.message.options if o.otype == 10][0] except IndexError: return None def get_server_cookie(self): cookie_opt = self.get_cookie_opt() if cookie_opt is not None and len(cookie_opt.data) > 8: return cookie_opt.data[8:] return None def copy(self): clone = DNSResponse(self.message, self.msg_size, self.error, self.errno, self.history, self.response_time, self.query, self.server_cookie, self.server_cookie_status, review_history=False) clone.set_effective_request_options(self.effective_flags, self.effective_edns, self.effective_edns_max_udp_payload, self.effective_edns_flags, self.effective_edns_options, self.effective_tcp, self.effective_server_cookie_status) clone.set_responsiveness(self.udp_attempted, self.udp_responsive, self.tcp_attempted, self.tcp_responsive, self.responsive_cause_index, self.responsive_cause_index_tcp) return clone def set_effective_request_options(self, flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp, server_cookie_status): self.effective_flags = flags self.effective_edns = edns self.effective_edns_max_udp_payload = edns_max_udp_payload self.effective_edns_flags = edns_flags self.effective_edns_options = edns_options self.effective_tcp = tcp self.effective_server_cookie_status = server_cookie_status def set_responsiveness(self, udp_attempted, udp_responsive, tcp_attempted, tcp_responsive, responsive_cause_index, responsive_cause_index_tcp): self.udp_attempted = udp_attempted self.udp_responsive = udp_responsive self.tcp_attempted = tcp_attempted self.tcp_responsive = tcp_responsive self.responsive_cause_index = responsive_cause_index self.responsive_cause_index_tcp = responsive_cause_index_tcp def _review_history(self): from . import query as Q flags = self.query.flags edns = self.query.edns edns_max_udp_payload = self.query.edns_max_udp_payload edns_flags = self.query.edns_flags edns_options = copy.deepcopy(self.query.edns_options) server_cookie_status = self.server_cookie_status # mark whether TCP or UDP was attempted initially tcp_attempted = tcp = self.query.tcp udp_attempted = not tcp tcp_responsive = False udp_responsive = False tcp_valid = False udp_valid = False #TODO - there could be room for both a responsiveness check and a valid # check here, rather than just a valid check responsive_cause_index = None responsive_cause_index_tcp = tcp prev_index = None for i, retry in enumerate(self.history): # mark if TCP or UDP was attempted prior to this retry if tcp: tcp_attempted = True else: udp_attempted = True # Mark responsiveness if this retry wasn't caused by network error # or timeout. if retry.cause not in (Q.RETRY_CAUSE_NETWORK_ERROR, Q.RETRY_CAUSE_TIMEOUT): if tcp: tcp_responsive = True else: udp_responsive = True # If the last cause/action resulted in a valid response where there # wasn't previously on the same protocol, then mark the # cause/action. if retry.cause in (Q.RETRY_CAUSE_TC_SET, Q.RETRY_CAUSE_DIAGNOSTIC): if tcp: if responsive_cause_index is None and \ not tcp_valid and prev_index is not None and self.history[prev_index].action != Q.RETRY_ACTION_USE_TCP: responsive_cause_index = prev_index responsive_cause_index_tcp = tcp tcp_valid = True else: if responsive_cause_index is None and \ not udp_valid and prev_index is not None and self.history[prev_index].action != Q.RETRY_ACTION_USE_UDP: responsive_cause_index = prev_index responsive_cause_index_tcp = tcp udp_valid = True if retry.action == Q.RETRY_ACTION_NO_CHANGE: pass elif retry.action == Q.RETRY_ACTION_USE_TCP: tcp = True elif retry.action == Q.RETRY_ACTION_USE_UDP: tcp = False elif retry.action == Q.RETRY_ACTION_SET_FLAG: flags |= retry.action_arg elif retry.action == Q.RETRY_ACTION_CLEAR_FLAG: flags &= ~retry.action_arg elif retry.action == Q.RETRY_ACTION_DISABLE_EDNS: edns = -1 elif retry.action == Q.RETRY_ACTION_CHANGE_UDP_MAX_PAYLOAD: edns_max_udp_payload = retry.action_arg tcp = False elif retry.action == Q.RETRY_ACTION_SET_EDNS_FLAG: edns_flags |= retry.action_arg elif retry.action == Q.RETRY_ACTION_CLEAR_EDNS_FLAG: edns_flags &= ~retry.action_arg elif retry.action == Q.RETRY_ACTION_ADD_EDNS_OPTION: #TODO option data edns_options.append(dns.edns.GenericOption(retry.action_arg, b'')) elif retry.action == Q.RETRY_ACTION_REMOVE_EDNS_OPTION: filtered_options = [x for x in edns_options if retry.action_arg == x.otype] if filtered_options: edns_options.remove(filtered_options[0]) # If COOKIE option was removed, then reset # server_cookie_status if filtered_options[0].otype == 10: server_cookie_status = Q.DNS_COOKIE_NO_COOKIE elif retry.action == Q.RETRY_ACTION_CHANGE_SPORT: pass elif retry.action == Q.RETRY_ACTION_CHANGE_EDNS_VERSION: edns = retry.action_arg elif retry.action == Q.RETRY_ACTION_UPDATE_DNS_COOKIE: server_cookie_status = Q.DNS_COOKIE_SERVER_COOKIE_FRESH prev_index = i # Mark responsiveness if the ultimate query didn't result in network # error or timeout. if self.error not in (Q.RESPONSE_ERROR_NETWORK_ERROR, Q.RESPONSE_ERROR_TIMEOUT): if tcp: tcp_responsive = True else: udp_responsive = True # If the last cause/action resulted in a valid response where there # wasn't previously on the same protocol, then mark the cause/action. if self.is_valid_response(): if tcp: if responsive_cause_index is None and \ not tcp_valid and prev_index is not None and self.history[prev_index].action != Q.RETRY_ACTION_USE_TCP: responsive_cause_index = prev_index responsive_cause_index_tcp = tcp else: if responsive_cause_index is None and \ not udp_valid and prev_index is not None and self.history[prev_index].action != Q.RETRY_ACTION_USE_UDP: responsive_cause_index = prev_index responsive_cause_index_tcp = tcp # If EDNS was effectively disabled, reset EDNS options if edns < 0: edns_max_udp_payload = None edns_flags = 0 edns_options = [] server_cookie_status = Q.DNS_COOKIE_NO_COOKIE self.set_effective_request_options(flags, edns, edns_max_udp_payload, edns_flags, edns_options, tcp, server_cookie_status) self.set_responsiveness(udp_attempted, udp_responsive, tcp_attempted, tcp_responsive, responsive_cause_index, responsive_cause_index_tcp) def recursion_desired(self): '''Return True if the recursion desired (RD) bit was set in the request to the server.''' return self.is_valid_response() and self.is_complete_response() and \ bool(self.effective_flags & dns.flags.RD) def recursion_available(self): '''Return True if the server indicated that recursion was available.''' return self.is_valid_response() and self.is_complete_response() and \ bool(self.message.flags & dns.flags.RA) def recursion_desired_and_available(self): '''Return True if the recursion desired (RD) bit was set in the request to the server AND the server indicated that recursion was available.''' return self.is_valid_response() and self.is_complete_response() and \ bool(self.effective_flags & dns.flags.RD) and \ bool(self.message.flags & dns.flags.RA) def dnssec_requested(self): '''Return True if the DNSSEC OK (DO) bit was set in the request to the server.''' return self.effective_edns >= 0 and self.effective_edns_flags & dns.flags.DO def is_valid_response(self): '''Return True if the message has a sane error code, namely NOERROR or NXDOMAIN.''' return self.message is not None and self.message.rcode() in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN) def is_complete_response(self): '''Return True if the message does not have the truncation (TC) bit set.''' return self.message is not None and not bool(self.message.flags & dns.flags.TC) def is_authoritative(self): '''Return True if the message has the authoritative answer (AA) bit set.''' return self.message is not None and bool(self.message.flags & dns.flags.AA) def is_referral(self, qname, rdtype, rdclass, bailiwick, proper=False): '''Return True if this response yields a referral for the queried name.''' if not (self.is_valid_response() and self.is_complete_response()): return False # if no bailiwick is specified, then we cannot classify it as a # referral if bailiwick is None: return False # if the qname is not a proper subdomain of the bailiwick, then it # is not a referral if not (qname != bailiwick and qname.is_subdomain(bailiwick)): return False # if the name exists in the answer section with the requested rdtype or # CNAME, then it can't be a referral if [x for x in self.message.answer if x.name == qname and x.rdtype in (rdtype, dns.rdatatype.CNAME) and x.rdclass == rdclass]: return False # if an SOA record with the given qname exists, then the server # is authoritative for the name, so it is a referral try: self.message.find_rrset(self.message.authority, qname, rdclass, dns.rdatatype.SOA) return False except KeyError: pass # if proper referral is requested and qname is equal to of an NS RRset # in the authority, then it is a referral if proper: if [x for x in self.message.authority if qname == x.name and x.rdtype == dns.rdatatype.NS and x.rdclass == rdclass]: return True # if proper referral is NOT requested, qname is a subdomain of # (including equal to) an NS RRset in the authority, and qname is not # equal to bailiwick, then it is a referral else: if [x for x in self.message.authority if qname.is_subdomain(x.name) and bailiwick != x.name and x.rdtype == dns.rdatatype.NS and x.rdclass == rdclass]: return True return False def is_upward_referral(self, qname): '''Return True if this response yields an upward referral (i.e., a name that is a superdomain of qname).''' if not (self.is_valid_response() and self.is_complete_response()): return False return bool(not self.is_authoritative() and \ [x for x in self.message.authority if x.name != qname and qname.is_subdomain(x.name)]) def is_answer(self, qname, rdtype, include_cname=True): '''Return True if this response yields an answer for the queried name and type in the answer section. If include_cname is False, then only non-CNAME records count.''' if not (self.is_valid_response() and self.is_complete_response()): return False if rdtype == dns.rdatatype.ANY and [x for x in self.message.answer if x.name == qname]: return True rdtypes = [rdtype] if include_cname: rdtypes.append(dns.rdatatype.CNAME) if [x for x in self.message.answer if x.name == qname and x.rdtype in rdtypes]: return True return False def is_nxdomain(self, qname, rdtype): '''Return True if this response indicates that the queried name does not exist (i.e., is NXDOMAIN).''' if not (self.is_valid_response() and self.is_complete_response()): return False if [x for x in self.message.answer if x.name == qname and x.rdtype in (rdtype, dns.rdatatype.CNAME)]: return False if self.message.rcode() == dns.rcode.NXDOMAIN: return True return False def is_delegation(self, qname, rdtype): '''Return True if this response (from a request to a server authoritative for the immediate parent) yields NS records for the name or provides a referral or NXDOMAIN or no data response.''' # if NS or SOA records were found in the answer or authority section return self.message.get_rrset(self.message.answer, qname, dns.rdataclass.IN, dns.rdatatype.NS) is not None or \ self.message.get_rrset(self.message.authority, qname, dns.rdataclass.IN, dns.rdatatype.NS) is not None or \ self.message.get_rrset(self.message.authority, qname, dns.rdataclass.IN, dns.rdatatype.SOA) is not None def not_delegation(self, qname, rdtype): return not self.is_delegation(qname, rdtype) def ns_ip_mapping_from_additional(self, qname, bailiwick=None): ip_mapping = {} if not (self.is_valid_response() and self.is_complete_response()): return ip_mapping try: ns_rrset = self.message.find_rrset(self.message.answer, qname, dns.rdataclass.IN, dns.rdatatype.NS) except KeyError: try: ns_rrset = self.message.find_rrset(self.message.authority, qname, dns.rdataclass.IN, dns.rdatatype.NS) except KeyError: return ip_mapping # iterate over each RR in the RR RRset for ns_rr in ns_rrset: ip_mapping[ns_rr.target] = set() if bailiwick is not None and not ns_rr.target.is_subdomain(bailiwick): continue for rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA): try: a_rrset = self.message.find_rrset(self.message.additional, ns_rr.target, dns.rdataclass.IN, rdtype) except KeyError: continue ip_mapping[ns_rr.target].update([IPAddr(a_rr.to_text()) for a_rr in a_rrset]) return ip_mapping def serialize_meta(self): from . import query as Q d = OrderedDict() # populate history, if not already populated if self.effective_flags is None: self._review_history() if self.message is None: d['error'] = Q.response_errors[self.error] if self.errno is not None: errno_name = errno.errorcode.get(self.errno, None) if errno_name is not None: d['errno'] = errno_name else: d['rcode'] = dns.rcode.to_text(self.message.rcode()) if self.message.edns >= 0: d['edns_version'] = self.message.edns d['answer'] = OrderedDict(( ('count', self.section_rr_count(self.message.answer)), ('digest', self.section_digest(self.message.answer)), )) d['authority'] = OrderedDict(( ('count', self.section_rr_count(self.message.authority)), ('digest', self.section_digest(self.message.authority)), )) d['additional'] = OrderedDict(( ('count', self.section_rr_count(self.message.additional)), ('digest', self.section_digest(self.message.additional)), )) if not d['answer']['count']: del d['answer']['digest'] if not d['authority']['count']: del d['authority']['digest'] if not d['additional']['count']: del d['additional']['digest'] if self.msg_size is not None: d['msg_size'] = self.msg_size d['time_elapsed'] = int(self.response_time * 1000) d['retries'] = self.retries() if self.history: d['cumulative_response_time'] = int(self.total_response_time() * 1000) d['effective_query_options'] = OrderedDict(( ('flags', self.effective_flags), ('edns_version', self.effective_edns), ('edns_max_udp_payload', self.effective_edns_max_udp_payload), ('edns_flags', self.effective_edns_flags), ('edns_options', []), )) for o in self.effective_edns_options: s = io.BytesIO() o.to_wire(s) d['effective_query_options']['edns_options'].append((o.type, binascii.hexlify(s.getvalue()))) d['effective_query_options']['tcp'] = self.effective_tcp if self.responsive_cause_index is not None: d['responsiveness_impediment'] = OrderedDict(( ('cause', Q.retry_causes[self.history[self.responsive_cause_index].cause]), ('action', Q.retry_actions[self.history[self.responsive_cause_index].action]) )) return d def serialize(self): from . import query as Q d = OrderedDict() if self.message is None: d['message'] = None d['error'] = Q.response_errors[self.error] if self.errno is not None: errno_name = errno.errorcode.get(self.errno, None) if errno_name is not None: d['errno'] = errno_name else: d['message'] = lb2s(base64.b64encode(self.message.to_wire())) if self.msg_size is not None: d['msg_size'] = self.msg_size d['time_elapsed'] = int(self.response_time * 1000) d['history'] = [] for retry in self.history: d['history'].append(retry.serialize()) return d @classmethod def deserialize(cls, d, query, server_cookie, server_cookie_status): from . import query as Q if 'msg_size' in d: msg_size = int(d['msg_size']) else: msg_size = None if 'error' in d: error = Q.response_error_codes[d['error']] else: error = None if 'errno' in d: # compatibility with version 1.0 if isinstance(d['errno'], int): errno1 = d['errno'] else: if hasattr(errno, d['errno']): errno1 = getattr(errno, d['errno']) else: errno1 = None else: errno1 = None if d['message'] is None: message = None else: wire = base64.b64decode(d['message']) try: message = dns.message.from_wire(wire) except Exception as e: message = None if isinstance(e, (struct.error, dns.exception.FormError)): error = Q.RESPONSE_ERROR_FORMERR #XXX need to determine how to handle non-parsing # validation errors with dnspython (e.g., signature with # no keyring) else: error = Q.RESPONSE_ERROR_OTHER # compatibility with version 1.0 if 'response_time' in d: response_time = d['response_time'] else: response_time = d['time_elapsed']/1000.0 history = [] for retry in d['history']: history.append(Q.DNSQueryRetryAttempt.deserialize(retry)) return DNSResponse(message, msg_size, error, errno1, history, response_time, query, server_cookie, server_cookie_status) class DNSResponseComponent(object): def __init__(self): self.servers_clients = {} def add_server_client(self, server, client, response): if (server, client) not in self.servers_clients: self.servers_clients[(server, client)] = [] self.servers_clients[(server, client)].append(response) @classmethod def insert_into_list(cls, component_info, component_info_list, server, client, response): try: index = component_info_list.index(component_info) component_info = component_info_list[index] except ValueError: component_info_list.append(component_info) component_info.add_server_client(server, client, response) return component_info class RDataMeta(DNSResponseComponent): def __init__(self, name, ttl, rdtype, rdata): super(RDataMeta, self).__init__() self.name = name self.ttl = ttl self.rdtype = rdtype self.rdata = rdata self.rrset_info = set() class DNSKEYMeta(DNSResponseComponent): def __init__(self, name, rdata, ttl): super(DNSKEYMeta, self).__init__() self.name = name self.rdata = rdata self.ttl = ttl self.warnings = [] self.errors = [] self.rrset_info = [] self.key_tag = self.calc_key_tag(rdata) self.key_tag_no_revoke = self.calc_key_tag(rdata, True) self.key_len = self.calc_key_len(rdata) def __str__(self): return 'DNSKEY for %s (algorithm %d (%s), key tag %d)' % (fmt.humanize_name(self.name), self.rdata.algorithm, fmt.DNSKEY_ALGORITHMS.get(self.rdata.algorithm, self.rdata.algorithm), self.key_tag) @classmethod def calc_key_tag(cls, rdata, clear_revoke=False): '''Return the key_tag for the key, as specified in RFC 4034. If clear_revoke is True, then clear the revoke flag of the DNSKEY RR first.''' # python3/python2 dual compatibility if isinstance(rdata.key, bytes): if isinstance(rdata.key, str): map_func = lambda x, y: ord(x[y]) else: map_func = lambda x, y: x[y] else: map_func = lambda x, y: struct.unpack(b'B',x[y])[0] # algorithm 1 is a special case if rdata.algorithm == 1: b1 = map_func(rdata.key, -3) b2 = map_func(rdata.key, -2) return (b1 << 8) | b2 if clear_revoke: flags = rdata.flags & (~fmt.DNSKEY_FLAGS['revoke']) else: flags = rdata.flags key_str = struct.pack(b'!HBB', flags, rdata.protocol, rdata.algorithm) + rdata.key ac = 0 for i in range(len(key_str)): b = map_func(key_str, i) if i & 1: ac += b else: ac += (b << 8) ac += (ac >> 16) & 0xffff return ac & 0xffff @classmethod def calc_key_len(cls, rdata): '''Return the length of the key modulus, in bits.''' key_str = rdata.key # python3/python2 dual compatibility if isinstance(rdata.key, bytes): if isinstance(rdata.key, str): map_func = lambda x, y: ord(x[y]) else: map_func = lambda x, y: x[y] else: map_func = lambda x, y: struct.unpack(b'B',x[y])[0] # RSA keys if rdata.algorithm in (1,5,7,8,10): try: # get the exponent length e_len = map_func(key_str, 0) except IndexError: return 0 offset = 1 if e_len == 0: try: b1 = map_func(key_str, 1) b2 = map_func(key_str, 2) except IndexError: return 0 e_len = (b1 << 8) | b2 offset = 3 # get the exponent offset += e_len # get the modulus key_len = len(key_str) - offset # if something went wrong here, use key length of rdata key if key_len <= 0: return len(key_str)<<3 return key_len << 3 # DSA keys elif rdata.algorithm in (3,6): try: t = map_func(key_str, 0) except IndexError: return 0 return (64 + t*8)<<3 # GOST keys elif rdata.algorithm in (12,): return len(key_str)<<3 # EC keys elif rdata.algorithm in (13,14): return len(key_str)<<3 # EDDSA keys elif rdata.algorithm in (15,16): return len(key_str)<<3 # other keys - just guess, based on the length of the raw key material else: return len(key_str)<<3 def message_for_ds(self, clear_revoke=False): '''Return the string value suitable for hashing to create a DS record.''' if clear_revoke: flags = self.rdata.flags & (~fmt.DNSKEY_FLAGS['revoke']) else: flags = self.rdata.flags name_wire = self.name.canonicalize().to_wire() # write DNSKEY rdata in wire format rdata_wire = struct.pack(b'!HBB', flags, self.rdata.protocol, self.rdata.algorithm) return name_wire + rdata_wire + self.rdata.key def serialize(self, consolidate_clients=True, show_servers=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): from .analysis import status as Status show_id = loglevel <= logging.INFO or \ (self.warnings and loglevel <= logging.WARNING) or \ (self.errors and loglevel <= logging.ERROR) d = OrderedDict() if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if show_id: d['id'] = '%d/%d' % (self.rdata.algorithm, self.key_tag) if loglevel <= logging.DEBUG: d['description'] = formatter(str(self)) d['flags'] = self.rdata.flags d['protocol'] = self.rdata.protocol d['algorithm'] = self.rdata.algorithm d['key'] = lb2s(base64.b64encode(self.rdata.key)) d['ttl'] = self.ttl d['key_length'] = self.key_len d['key_tag'] = self.key_tag if self.rdata.flags & fmt.DNSKEY_FLAGS['revoke']: d['key_tag_pre_revoke'] = self.key_tag_no_revoke if html_format: flags = [t for (t,c) in fmt.DNSKEY_FLAGS.items() if c & self.rdata.flags] d['flags'] = '%d (%s)' % (self.rdata.flags, ', '.join(flags)) d['protocol'] = '%d (%s)' % (self.rdata.protocol, fmt.DNSKEY_PROTOCOLS.get(self.rdata.protocol, self.rdata.protocol)) d['algorithm'] = '%d (%s)' % (self.rdata.algorithm, fmt.DNSKEY_ALGORITHMS.get(self.rdata.algorithm, self.rdata.algorithm)) d['ttl'] = '%d (%s)' % (self.ttl, fmt.humanize_time(self.ttl)) if self.key_len is None: d['key_length'] = 'unknown' else: d['key_length'] = '%d bits' % (self.key_len) #TODO: put DNSKEY roles in meta, if it makes sense if loglevel <= logging.INFO: servers = tuple_to_dict(self.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.servers_clients: for response in self.servers_clients[(server,client)]: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() if self.warnings and loglevel <= logging.WARNING: d['warnings'] = [w.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for w in self.warnings] if self.errors and loglevel <= logging.ERROR: d['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=html_format) for e in self.errors] return d #XXX This class is necessary because of a bug in dnspython, in which # comparisons are not properly made for the purposes of sorting rdata for RRSIG # validation class RdataWrapper(object): def __init__(self, rdata): self._rdata = rdata def __eq__(self, other): return self._rdata.to_digestable() == other._rdata.to_digestable() def __lt__(self, other): return self._rdata.to_digestable() < other._rdata.to_digestable() class RRsetInfo(DNSResponseComponent): def __init__(self, rrset, ttl_cmp, dname_info=None): super(RRsetInfo, self).__init__() self.rrset = rrset self.ttl_cmp = ttl_cmp self.rrsig_info = {} self.wildcard_info = {} self.dname_info = dname_info if self.dname_info is not None: self.servers_clients = dname_info.servers_clients self.cname_info_from_dname = [] def __str__(self): if self.rrset.rdtype == dns.rdatatype.NSEC3: return 'RRset for %s/%s' % (fmt.format_nsec3_name(self.rrset.name).rstrip('.'), dns.rdatatype.to_text(self.rrset.rdtype)) else: return 'RRset for %s/%s' % (fmt.humanize_name(self.rrset.name), dns.rdatatype.to_text(self.rrset.rdtype)) def __repr__(self): return '<%s: "%s">' % (self.__class__.__name__, str(self)) def __eq__(self, other): if not (self.rrset == other.rrset and self.dname_info == other.dname_info): return False if self.ttl_cmp and self.rrset.ttl != other.rrset.ttl: return False return True def __hash__(self): return hash(id(self)) @classmethod def sorted_rdata_list(cls, rrset): rdata_list = [RdataWrapper(x) for x in rrset] rdata_list.sort() return rdata_list @classmethod def rrset_canonicalized_to_wire(cls, rrset, name, ttl): s = b'' name_wire = name.to_wire() rdata_list = cls.sorted_rdata_list(rrset) for rdataw in rdata_list: rdata = rdataw._rdata rdata_wire = rdata.to_digestable() rdata_len = len(rdata_wire) stuff = struct.pack(b'!HHIH', rrset.rdtype, rrset.rdclass, ttl, rdata_len) s += name_wire + stuff + rdata_wire return s def get_rrsig_info(self, rrsig): return self.rrsig_info[rrsig] def update_rrsig_info(self, server, client, response, section, rdclass, is_referral): try: rrsig_rrset = response.message.find_rrset(section, self.rrset.name, rdclass, dns.rdatatype.RRSIG, self.rrset.rdtype) for rrsig in rrsig_rrset: self.create_or_update_rrsig_info(rrsig, rrsig_rrset.ttl, server, client, response, rdclass, is_referral) except KeyError: pass if self.dname_info is not None: self.dname_info.update_rrsig_info(server, client, response, section, rdclass, is_referral) def create_or_update_rrsig_info(self, rrsig, ttl, server, client, response, rdclass, is_referral): try: rrsig_info = self.get_rrsig_info(rrsig) except KeyError: rrsig_info = self.rrsig_info[rrsig] = RDataMeta(self.rrset.name, ttl, dns.rdatatype.RRSIG, rrsig) rrsig_info.add_server_client(server, client, response) self.set_wildcard_info(rrsig, server, client, response, rdclass, is_referral) def create_or_update_cname_from_dname_info(self, synthesized_cname_info, server, client, response, rdclass): return self.insert_into_list(synthesized_cname_info, self.cname_info_from_dname, server, client, response) def is_wildcard(self, rrsig): if self.rrset.name[0] == b'*': return False return len(self.rrset.name) - 1 > rrsig.labels def reduce_wildcard(self, rrsig): if self.is_wildcard(rrsig): return dns.name.Name(('*',)+self.rrset.name.labels[-(rrsig.labels+1):]) return self.rrset.name def set_wildcard_info(self, rrsig, server, client, response, rdclass, is_referral): if self.is_wildcard(rrsig): wildcard_name = self.reduce_wildcard(rrsig) if wildcard_name not in self.wildcard_info: self.wildcard_info[wildcard_name] = NegativeResponseInfo(self.rrset.name, self.rrset.rdtype, self.ttl_cmp) self.wildcard_info[wildcard_name].add_server_client(server, client, response) self.wildcard_info[wildcard_name].create_or_update_nsec_info(server, client, response, rdclass, is_referral) def message_for_rrsig(self, rrsig): # write RRSIG in wire format rdata_wire = struct.pack(b'!HBBIIIH', rrsig.type_covered, rrsig.algorithm, rrsig.labels, rrsig.original_ttl, rrsig.expiration, rrsig.inception, rrsig.key_tag) signer_wire = rrsig.signer.canonicalize().to_wire() rrsig_canonicalized_wire = rdata_wire + signer_wire rrset_name = self.reduce_wildcard(rrsig).canonicalize() rrset_canonicalized_wire = self.rrset_canonicalized_to_wire(self.rrset, rrset_name, rrsig.original_ttl) return rrsig_canonicalized_wire + rrset_canonicalized_wire def serialize(self, consolidate_clients=True, show_servers=True, loglevel=logging.DEBUG, html_format=False, map_ip_to_ns_name=None): d = OrderedDict() if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if self.rrset.rdtype == dns.rdatatype.NSEC3: d['name'] = formatter(fmt.format_nsec3_name(self.rrset.name)) else: d['name'] = formatter(lb2s(self.rrset.name.canonicalize().to_text())) d['ttl'] = self.rrset.ttl d['type'] = dns.rdatatype.to_text(self.rrset.rdtype) d['rdata'] = [] rdata_list = [RdataWrapper(x) for x in self.rrset] rdata_list.sort() for rdataw in rdata_list: rdata = rdataw._rdata if self.rrset.rdtype == dns.rdatatype.NSEC3: d['rdata'].append(fmt.format_nsec3_rrset_text(self.rrset[0].to_text())) else: s = rdata.to_text() # python3/python2 dual compatibility if not isinstance(s, str): s = lb2s(s) d['rdata'].append(formatter(s)) if loglevel <= logging.INFO: servers = tuple_to_dict(self.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = set() for server,client in self.servers_clients: for response in self.servers_clients[(server,client)]: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.add(nsid) if nsids: d['nsid_values'] = list(nsids) d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() return d def cname_from_dname(name, dname_rrset): synthesized_cname = dns.name.Name(name.labels[:-len(dname_rrset.name)] + dname_rrset[0].target.labels) rrset = dns.rrset.RRset(name, dns.rdataclass.IN, dns.rdatatype.CNAME) rrset.update_ttl(dname_rrset.ttl) rrset.add(dns.rdtypes.ANY.CNAME.CNAME(dns.rdataclass.IN, dns.rdatatype.CNAME, synthesized_cname)) return rrset class NegativeResponseInfo(DNSResponseComponent): def __init__(self, qname, rdtype, ttl_cmp): super(NegativeResponseInfo, self).__init__() self.qname = qname self.rdtype = rdtype self.ttl_cmp = ttl_cmp self.soa_rrset_info = [] self.nsec_set_info = [] def __repr__(self): return '<%s %s/%s>' % (self.__class__.__name__, self.qname, dns.rdatatype.to_text(self.rdtype)) def __eq__(self, other): return self.qname == other.qname and self.rdtype == other.rdtype def __hash__(self): return hash(id(self)) def create_or_update_soa_info(self, server, client, response, rdclass, is_referral): soa_rrsets = [x for x in response.message.authority if x.rdtype == dns.rdatatype.SOA and x.rdclass == rdclass and self.qname.is_subdomain(x.name)] if not soa_rrsets: soa_rrsets = [x for x in response.message.authority if x.rdtype == dns.rdatatype.SOA and x.rdclass == rdclass] soa_rrsets.sort(reverse=True) try: soa_rrset = soa_rrsets[0] except IndexError: soa_rrset = None if soa_rrset is None: return None soa_rrset_info = RRsetInfo(soa_rrset, self.ttl_cmp) soa_rrset_info = self.insert_into_list(soa_rrset_info, self.soa_rrset_info, server, client, response) soa_rrset_info.update_rrsig_info(server, client, response, response.message.authority, rdclass, is_referral) return soa_rrset_info def create_or_update_nsec_info(self, server, client, response, rdclass, is_referral): for rdtype in dns.rdatatype.NSEC, dns.rdatatype.NSEC3: nsec_rrsets = [x for x in response.message.authority if x.rdtype == rdtype and x.rdclass == rdclass] if not nsec_rrsets: continue nsec_set_info = NSECSet(nsec_rrsets, is_referral, self.ttl_cmp) nsec_set_info = self.insert_into_list(nsec_set_info, self.nsec_set_info, server, client, response) for name in nsec_set_info.rrsets: nsec_set_info.rrsets[name].update_rrsig_info(server, client, response, response.message.authority, rdclass, is_referral) class NSECSet(DNSResponseComponent): def __init__(self, rrsets, referral, ttl_cmp): super(NSECSet, self).__init__() self.rrsets = {} self.referral = referral self.ttl_cmp = ttl_cmp self.nsec3_params = {} self.invalid_nsec3_owner = set() self.invalid_nsec3_hash = set() self.use_nsec3 = False for rrset in rrsets: #XXX There shouldn't be multiple NSEC(3) RRsets of the same owner # name in the same response, but check for it and address it (if # necessary) assert rrset.name not in self.rrsets self.rrsets[rrset.name] = RRsetInfo(rrset, self.ttl_cmp) if rrset.rdtype == dns.rdatatype.NSEC3: self.use_nsec3 = True key = (rrset[0].salt, rrset[0].algorithm, rrset[0].iterations) if key not in self.nsec3_params: self.nsec3_params[key] = set() self.nsec3_params[key].add(rrset.name) if not self.is_valid_nsec3_name(rrset.name, rrset[0].algorithm): self.invalid_nsec3_owner.add(rrset.name) if not self.is_valid_nsec3_hash(rrset[0].next, rrset[0].algorithm): self.invalid_nsec3_hash.add(rrset.name) self.servers_clients = {} def __repr__(self): return '<%s>' % (self.__class__.__name__) def __eq__(self, other): return self.rrsets == other.rrsets def __hash__(self): return hash(id(self)) def project(self, *names): if set(names).difference(self.rrsets): raise ValueError('NSEC name(s) don\'t exist in NSECSet') obj = self.__class__((), self.referral, self.ttl_cmp) for name in names: obj.rrsets[name] = self.rrsets[name] rrset = obj.rrsets[name].rrset if rrset.rdtype == dns.rdatatype.NSEC3: obj.use_nsec3 = True key = (rrset[0].salt, rrset[0].algorithm, rrset[0].iterations) if key not in obj.nsec3_params: obj.nsec3_params[key] = set() obj.nsec3_params[key].add(rrset.name) if not obj.is_valid_nsec3_name(rrset.name, rrset[0].algorithm): obj.invalid_nsec3_owner.add(rrset.name) if not obj.is_valid_nsec3_hash(rrset[0].next, rrset[0].algorithm): obj.invalid_nsec3_hash.add(rrset.name) obj.servers_clients = self.servers_clients.copy() return obj def add_server_client(self, server, client, response): super(NSECSet, self).add_server_client(server, client, response) for name, rrset_info in self.rrsets.items(): rrset_info.add_server_client(server, client, response) def create_or_update_rrsig_info(self, name, rrsig, ttl, server, client, response, rdclass, is_referral): self.rrsets[name].create_or_update_rrsig_info(rrsig, ttl, server, client, response, rdclass, is_referral) def is_valid_nsec3_name(self, nsec_name, algorithm): # python3/python2 dual compatibility if isinstance(nsec_name[0], str): map_func = lambda x: codecs.encode(x.upper(), 'latin1') else: map_func = lambda x: codecs.encode(chr(x).upper(), 'latin1') # check that NSEC3 name is valid if algorithm == 1: # base32hex encoding of SHA1 should be 32 bytes if len(nsec_name[0]) != 32: return False if [x for x in nsec_name[0] if map_func(x) not in base32.b32alphabet]: return False return True def is_valid_nsec3_hash(self, nsec3_hash, algorithm): # check that NSEC3 hash is valid if algorithm == 1: # length of SHA1 hash should be 20 bytes if len(nsec3_hash) != 20: return False return True def get_algorithm_support(self): valid_algorithms = set() invalid_algorithms = set() for (salt, alg, iterations) in self.nsec3_params: if crypto.nsec3_alg_is_supported(alg): valid_algorithms.add(alg) else: invalid_algorithms.add(alg) return valid_algorithms, invalid_algorithms def rdtype_exists_in_bitmap(self, nsec_name, rdtype): '''Return True if the rdtype exists in the bitmap of the NSEC(3) record corresponding to the name; False otherwise.''' rdtype_window = (rdtype >> 8) rdtype_bitmap = rdtype & 0x00ff bitmap_index, bitmap_offset = divmod(rdtype_bitmap, 8) for (window, bitmap) in self.rrsets[nsec_name].rrset[0].windows: try: # dnspython <= 1.12.x uses strings, but dnspython 1.13 uses bytearray (for python3) byte = bitmap[bitmap_index] if isinstance(bitmap, str): byte = ord(byte) if window == rdtype_window and byte & (0x80 >> bitmap_offset): return True except IndexError: pass return False def name_for_nsec3_next(self, nsec_name): '''Convert the next field of an NSEC3 RR to a DNS name.''' next_name = self.rrsets[nsec_name].rrset[0].next next_name_txt = base32.b32encode(next_name) origin = dns.name.Name(nsec_name.labels[1:]) return dns.name.from_text(next_name_txt, origin) def _nsec_covers_name(self, name, nsec_name): '''Return True if the NSEC record corresponding to NSEC name provided covers a name (i.e., proves its non-existence); False otherwise.''' prev_name = nsec_name if self.use_nsec3: next_name = self.name_for_nsec3_next(nsec_name) # test that NSEC3 names have the same parent try: if not (name.parent() == nsec_name.parent() == next_name.parent()): return False except dns.name.NoParent: return False else: next_name = self.rrsets[nsec_name].rrset[0].next if prev_name == next_name: return prev_name != name elif prev_name > next_name: return not (next_name <= name <= prev_name) else: return (prev_name < name < next_name) and not next_name.is_subdomain(name) def nsec_covering_name(self, name): '''Return the set of owner names corresponding to NSEC records in the response that cover the given name.''' excluding_names = set() for nsec_name in set(self.rrsets).difference(self.invalid_nsec3_owner.union(self.invalid_nsec3_hash)): if self._nsec_covers_name(name, nsec_name): excluding_names.add(nsec_name) return excluding_names def get_digest_name_for_nsec3(self, name, origin, salt, alg, iterations): '''Return the DNS name corresponding to the name, origin, and NSEC3 hash parameters provided.''' val = name.canonicalize().to_wire() digest = crypto.get_digest_for_nsec3(val, salt, alg, iterations) if digest is None: return None else: return dns.name.from_text(base32.b32encode(digest), origin) def nsec3_covering_name(self, name, salt, alg, iterations): '''Return the set of owner names corresponding to NSEC3 records in the response that cover the given (digest) name.''' excluding_names = set() for nsec_name in set(self.nsec3_params[(salt, alg, iterations)]).difference(self.invalid_nsec3_owner.union(self.invalid_nsec3_hash)): if self._nsec_covers_name(name, nsec_name): excluding_names.add(nsec_name) return excluding_names def _find_potential_closest_enclosers(self, qname, origin, salt, alg, iterations): '''Return a mapping of potential closest enclosers for a given name and origin, with digests computed with the given salt, algorithm, and iterations parameters. The mapping maps a name to a set of corresponding digest names. The algorithm follows that specified in RFC 5155 8.3.''' closest_enclosers = {} nsec3_names = self.nsec3_params[(salt, alg, iterations)] sname = qname flag = False while len(sname) >= len(origin): digest_name = self.get_digest_name_for_nsec3(sname, origin, salt, alg, iterations) # unsupported algorithm if digest_name is None: return closest_enclosers if digest_name not in nsec3_names: flag = False if self.nsec_covering_name(digest_name): flag = True if digest_name in nsec3_names: if flag: if sname not in closest_enclosers: closest_enclosers[sname] = set() closest_enclosers[sname].add(digest_name) break sname = dns.name.Name(sname.labels[1:]) return closest_enclosers def check_closest_encloser(self, name, nsec_name, origin): '''Return True if the candidate closest encloser meets the requirements for a closest encloser in RFC 5155.''' if not name.is_subdomain(origin): return False if self.rdtype_exists_in_bitmap(nsec_name, dns.rdatatype.DNAME): return False if self.rdtype_exists_in_bitmap(nsec_name, dns.rdatatype.NS) and \ not self.rdtype_exists_in_bitmap(nsec_name, dns.rdatatype.SOA): return False return True def get_closest_encloser(self, qname, origin): '''Return a mapping of closest enclosers for a given name and origin.''' potential_closest_enclosers = {} for salt, alg, iterations in self.nsec3_params: ret = self._find_potential_closest_enclosers(qname, origin, salt, alg, iterations) for name in ret: if name in potential_closest_enclosers: potential_closest_enclosers[name].update(ret[name]) else: potential_closest_enclosers[name] = ret[name] for name in list(potential_closest_enclosers): for nsec_name in list(potential_closest_enclosers[name]): if not self.check_closest_encloser(name, nsec_name, origin): potential_closest_enclosers[name].remove(nsec_name) if not potential_closest_enclosers[name]: del potential_closest_enclosers[name] return potential_closest_enclosers class DNSResponseError(DNSResponseComponent): def __init__(self, code, arg): super(DNSResponseError, self).__init__() self.code = code self.arg = arg def __eq__(self, other): return self.code == other.code and self.arg == other.arg def __hash__(self): return hash(id(self)) class ReferralResponse(DNSResponseComponent): def __init__(self, name): super(ReferralResponse, self).__init__() self.name = name def __eq__(self, other): return self.name == other.name def __hash__(self): return hash(id(self)) class TruncatedResponse(DNSResponseComponent): def __init__(self, wire): super(TruncatedResponse, self).__init__() self.wire = wire def __eq__(self, other): return self.wire == other.wire def __hash__(self): return hash(id(self)) def dnskey_rrset_to_ds_rrset(dnskey_rrset_info, digest_alg_map, default_algs): rrset = dns.rrset.RRset(dnskey_rrset_info.rrset.name, dnskey_rrset_info.rrset.rdclass, dns.rdatatype.DS) for dnskey_rdata in dnskey_rrset_info.rrset: dnskey_meta = DNSKEYMeta(dnskey_rrset_info.rrset.name, dnskey_rdata, dnskey_rrset_info.rrset.ttl) dnskey_meta.servers_clients = copy.deepcopy(dnskey_rrset_info.servers_clients) dnskey_msg = dnskey_meta.message_for_ds() for digest_alg in digest_alg_map.get((dnskey_meta.rdata.algorithm, dnskey_meta.key_tag), default_algs): digest = crypto.get_ds_digest(digest_alg, dnskey_meta.message_for_ds()) rdata = dns.rdtypes.ANY.DS.DS(dns.rdataclass.IN, dns.rdatatype.DS, dnskey_meta.key_tag, dnskey_meta.rdata.algorithm, digest_alg, digest) rrset.add(rdata) ds_rrset_info = RRsetInfo(rrset, dnskey_rrset_info.ttl_cmp) ds_rrset_info.servers_clients = copy.deepcopy(dnskey_rrset_info.servers_clients) return ds_rrset_info ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/transport.py0000644000175000017500000016105615001472663016101 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import base64 import bisect import codecs import errno import fcntl import io import json import os import random import re import select import socket import ssl import struct import subprocess import threading import time # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # python3/python2 dual compatibility try: import queue except ImportError: import Queue as queue try: import urllib.parse except ImportError: import urlparse import urllib urlquote = urllib else: urlparse = urllib.parse urlquote = urllib.parse import dns.exception from . import dist from .ipaddr import IPAddr, ANY_IPV6, ANY_IPV4 from .format import latin1_binary_to_string as lb2s DNS_TRANSPORT_VERSION = 1.0 MAX_PORT_BIND_ATTEMPTS=10 MAX_WAIT_FOR_REQUEST=30 HTTP_HEADER_END_RE = re.compile(r'(\r\n\r\n|\n\n|\r\r)') HTTP_STATUS_RE = re.compile(r'^HTTP/\S+ (?P\d+) ') CONTENT_LENGTH_RE = re.compile(r'^Content-[Ll]ength: (?P\d+)', re.MULTILINE) CHUNKED_ENCODING_RE = re.compile(r'^Transfer-[Ee]ncoding: chunked(\r\n|\r|\n)', re.MULTILINE) CHUNK_SIZE_RE = re.compile(r'^(?P[0-9a-fA-F]+)(;[^\r\n]+)?(\r\n|\r|\n)') CRLF_START_RE = re.compile(r'^(\r\n|\n|\r)') class SocketWrapper(object): def __init__(self): raise NotImplemented class Socket(SocketWrapper): def __init__(self, sock): self.sock = sock self.reader = sock self.writer = sock self.reader_fd = sock.fileno() self.writer_fd = sock.fileno() self.family = sock.family self.type = sock.type self.lock = None def recv(self, n): return self.sock.recv(n) def send(self, s): return self.sock.send(s) def setblocking(self, b): self.sock.setblocking(b) def bind(self, a): self.sock.bind(a) def connect(self, a): self.sock.connect(a) def getsockname(self): return self.sock.getsockname() def close(self): self.sock.close() class ReaderWriter(SocketWrapper): def __init__(self, reader, writer, proc=None): self.reader = reader self.writer = writer self.reader_fd = self.reader.fileno() self.writer_fd = self.writer.fileno() self.family = socket.AF_INET self.type = socket.SOCK_STREAM self.lock = None self.proc = proc def recv(self, n): return os.read(self.reader_fd, n) def send(self, s): return os.write(self.writer_fd, s) def setblocking(self, b): if not b: fcntl.fcntl(self.reader_fd, fcntl.F_SETFL, os.O_NONBLOCK) fcntl.fcntl(self.writer_fd, fcntl.F_SETFL, os.O_NONBLOCK) def bind(self, a): pass def connect(self, a): pass def getsockname(self): return ('localhost', 0) def close(self): pass class RemoteQueryTransportError(Exception): pass class TransportMetaDeserializationError(Exception): pass class SocketInUse(Exception): pass class DNSQueryTransportMeta(object): def __init__(self, req, dst, tcp, timeout, dport, src=None, sport=None): self.req = req self.dst = dst self.tcp = tcp self.timeout = timeout self.dport = dport self.src = src self.sport = sport self.res = None self.err = None self.start_time = None self.end_time = None def serialize_request(self): d = OrderedDict() d['req'] = lb2s(base64.b64encode(self.req)) d['dst'] = self.dst d['dport'] = self.dport if self.src is not None: d['src'] = self.src if self.sport is not None: d['sport'] = self.sport d['tcp'] = self.tcp d['timeout'] = int(self.timeout*1000) return d @classmethod def deserialize_request(cls, d): if 'req' not in d or d['req'] is None: raise TransportMetaDeserializationError('Missing "req" field in input.') try: req = base64.b64decode(d['req']) except TypeError: raise TransportMetaDeserializationError('Base64 decoding DNS request failed: %s' % d['req']) if 'dst' not in d or d['dst'] is None: raise TransportMetaDeserializationError('Missing "dst" field in input.') try: dst = IPAddr(d['dst']) except ValueError: raise TransportMetaDeserializationError('Invalid destination IP address: %s' % d['dst']) if 'dport' not in d or d['dport'] is None: raise TransportMetaDeserializationError('Missing "dport" field in input.') try: dport = int(d['dport']) if dport < 0 or dport > 65535: raise ValueError() except ValueError: raise TransportMetaDeserializationError('Invalid destination port: %s' % d['dport']) if 'src' not in d or d['src'] is None: src = None else: try: src = IPAddr(d['src']) except ValueError: raise TransportMetaDeserializationError('Invalid source IP address: %s' % d['src']) if 'sport' not in d or d['sport'] is None: sport = None else: try: sport = int(d['sport']) if sport < 0 or sport > 65535: raise ValueError() except ValueError: raise TransportMetaDeserializationError('Invalid source port: %s' % d['sport']) if 'tcp' not in d or d['tcp'] is None: raise TransportMetaDeserializationError('Missing "tcp" field in input.') else: tcp = bool(d['tcp']) if 'timeout' not in d or d['timeout'] is None: raise TransportMetaDeserializationError('Missing "timeout" field in input.') else: try: timeout = int(d['timeout'])/1000.0 except ValueError: raise TransportMetaDeserializationError('Invalid timeout value: %s' % d['timeout']) return cls(req, dst, tcp, timeout, dport, src, sport) def serialize_response(self): d = OrderedDict() if self.res is not None: d['res'] = lb2s(base64.b64encode(self.res)) else: d['res'] = None if self.err is not None: if isinstance(self.err, (socket.error, EOFError)): d['err'] = 'NETWORK_ERROR' elif isinstance(self.err, dns.exception.Timeout): d['err'] = 'TIMEOUT' else: d['err'] = 'ERROR' if hasattr(self.err, 'errno'): errno_name = errno.errorcode.get(self.err.errno, None) if errno_name is not None: d['errno'] = errno_name d['src'] = self.src d['sport'] = self.sport d['time_elapsed'] = int((self.end_time - self.start_time)*1000) return d def deserialize_response(self, d): if 'err' in d and d['err'] is not None: if d['err'] == 'NETWORK_ERROR': self.err = socket.error() if 'errno' in d and d['errno'] is not None: if hasattr(errno, d['errno']): self.err.errno = getattr(errno, d['errno']) else: raise TransportMetaDeserializationError('Unknown errno name: %s' % d['errno']) elif d['err'] == 'TIMEOUT': self.err = dns.exception.Timeout() else: raise TransportMetaDeserializationError('Unknown DNS response error: %s' % d['err']) elif not ('res' in d and d['res'] is not None): raise TransportMetaDeserializationError('Missing DNS response or response error in input.') else: try: self.res = base64.b64decode(d['res']) except TypeError: raise TransportMetaDeserializationError('Base64 decoding of DNS response failed: %s' % d['res']) if 'src' in d and d['src'] is not None: try: self.src = IPAddr(d['src']) except ValueError: raise TransportMetaDeserializationError('Invalid source IP address: %s' % d['src']) elif not isinstance(self.err, socket.error): raise TransportMetaDeserializationError('Missing "src" field in input') if 'sport' in d and d['sport'] is not None: try: self.sport = int(d['sport']) if self.sport < 0 or self.sport > 65535: raise ValueError() except ValueError: raise TransportMetaDeserializationError('Invalid source port: %s' % d['sport']) elif not isinstance(self.err, socket.error): raise TransportMetaDeserializationError('Missing "sport" field in input.') if 'time_elapsed' in d and d['time_elapsed'] is not None: try: elapsed = int(d['time_elapsed']) if elapsed < 0: raise ValueError() except ValueError: raise TransportMetaDeserializationError('Invalid time elapsed value: %s' % d['time_elapsed']) else: raise TransportMetaDeserializationError('Missing "time_elapsed" field in input.') self.end_time = time.time() self.start_time = self.end_time - (elapsed/1000.0) QTH_MODE_WRITE_READ = 0 QTH_MODE_WRITE = 1 QTH_MODE_READ = 2 QTH_SETUP_DONE = 0 QTH_SETUP_NEED_WRITE = 1 QTH_SETUP_NEED_READ = 2 class DNSQueryTransportHandler(object): singleton = False allow_loopback_query = False allow_private_query = False timeout_baseline = 0.0 mode = QTH_MODE_WRITE_READ def __init__(self, sock=None, recycle_sock=False, processed_queue=None, factory=None): self.msg_send = None self.msg_send_len = None self.msg_send_index = None self.msg_recv = None self.msg_recv_len = None self.msg_recv_buf = None self.msg_recv_index = None self.err = None self.dst = None self.dport = None self.src = None self.sport = None self.transport_type = None self.timeout = None self._processed_queue = processed_queue self.factory = factory self._sock = sock self.sock = None self.recycle_sock = recycle_sock self.expiration = None self.start_time = None self.end_time = None self.setup_state = QTH_SETUP_DONE self.qtms = [] def _set_timeout(self, qtm): if self.timeout is None or qtm.timeout > self.timeout: self.timeout = qtm.timeout def add_qtm(self, qtm): if self.singleton and self.qtms: raise TypeError('Only one DNSQueryTransportMeta instance allowed for DNSQueryTransportHandlers of singleton type!') self.qtms.append(qtm) self._set_timeout(qtm) def _check_source(self): if self.src in (ANY_IPV6, ANY_IPV4): self.src = None def finalize(self): assert self.mode in (QTH_MODE_WRITE_READ, QTH_MODE_READ), 'finalize() can only be called for modes QTH_MODE_READ and QTH_MODE_WRITE_READ' assert self.msg_recv is not None or self.err is not None, 'Query must have been executed before finalize() can be called' self._check_source() # clear out any partial responses if there was an error if self.err is not None: self.msg_recv = None if self.factory is not None: if self.recycle_sock: # if recycle_sock is requested, add the sock to the factory. # Then add the lock to the sock to prevent concurrent use of # the socket. if self.sock is not None and self._sock is None: self.factory.lock.acquire() try: if self.factory.sock is None: self.factory.sock = self.sock self.factory.sock.lock = self.factory.lock finally: self.factory.lock.release() elif self.sock is not None and self.sock is self.factory.sock: # if recycle_sock is not requested, and this sock is in the # factory, then remove it. self.factory.lock.acquire() try: if self.sock is self.factory.sock: self.factory.sock = None finally: self.factory.lock.release() #TODO change this and the overriding child methods to init_msg_send def init_req(self): raise NotImplemented def _init_msg_recv(self): self.msg_recv = b'' self.msg_recv_buf = b'' self.msg_recv_index = 0 self.msg_recv_len = None def prepare(self): if self.mode in (QTH_MODE_WRITE_READ, QTH_MODE_WRITE): assert self.msg_send is not None, 'Request must be initialized with init_req() before be added before prepare() can be called' if self.mode in (QTH_MODE_WRITE_READ, QTH_MODE_READ): self._init_msg_recv() if self.timeout is None: self.timeout = self.timeout_baseline if self._sock is not None: # if a preexisting socket is available for re-use, then use that # instead try: self._reuse_socket() self._set_start_time() except SocketInUse as e: self.err = e else: try: self._create_socket() self._configure_socket() self._bind_socket() self._set_start_time() self._connect_socket() except socket.error as e: self.err = e def _reuse_socket(self): # wait for the lock on the socket if not self._sock.lock.acquire(False): raise SocketInUse() self.sock = self._sock def _get_af(self): if self.dst.version == 6: return socket.AF_INET6 else: return socket.AF_INET def _create_socket(self): af = self._get_af() self.sock = Socket(socket.socket(af, self.transport_type)) def _configure_socket(self): self.sock.setblocking(0) def _bind_socket(self): if self.src is not None: src = self.src else: if self.sock.family == socket.AF_INET6: src = ANY_IPV6 else: src = ANY_IPV4 if self.sport is not None: self.sock.bind((src, self.sport)) else: i = 0 while True: sport = random.randint(1024, 65535) try: self.sock.bind((src, sport)) break except socket.error as e: i += 1 if i > MAX_PORT_BIND_ATTEMPTS or e.errno != socket.errno.EADDRINUSE: raise def _set_socket_info(self): src, sport = self.sock.getsockname()[:2] self.src = IPAddr(src) self.sport = sport def _get_connect_arg(self): return (self.dst, self.dport) def _connect_socket(self): try: self.sock.connect(self._get_connect_arg()) except socket.error as e: if e.errno != socket.errno.EINPROGRESS: raise def _set_start_time(self): self.expiration = self.timeout + time.time() self.start_time = time.time() def _set_end_time(self): self.end_time = time.time() if self.start_time is None: self.start_time = self.end_time def cleanup(self): # set end (and start, if necessary) times, as appropriate self._set_end_time() # close socket if self.sock is not None: self._set_socket_info() if not self.recycle_sock: self.sock.close() if self.sock.lock is not None: self.sock.lock.release() # place in processed queue, if specified if self._processed_queue is not None: self._processed_queue.put(self) def do_setup(self): pass def do_write(self): try: self.msg_send_index += self.sock.send(self.msg_send[self.msg_send_index:]) if self.msg_send_index >= self.msg_send_len: return True except socket.error as e: self.err = e return True def do_read(self): raise NotImplemented def do_timeout(self): raise NotImplemented def serialize_requests(self): d = { 'version': DNS_TRANSPORT_VERSION, 'requests': [q.serialize_request() for q in self.qtms] } return d def serialize_responses(self): d = { 'version': DNS_TRANSPORT_VERSION, 'responses': [q.serialize_response() for q in self.qtms] } return d class DNSQueryTransportHandlerDNS(DNSQueryTransportHandler): singleton = True require_queryid_match = True def finalize(self): super(DNSQueryTransportHandlerDNS, self).finalize() qtm = self.qtms[0] qtm.src = self.src qtm.sport = self.sport qtm.res = self.msg_recv qtm.err = self.err qtm.start_time = self.start_time qtm.end_time = self.end_time def init_req(self): assert self.qtms, 'At least one DNSQueryTransportMeta must be added before init_req() can be called' qtm = self.qtms[0] self.dst = qtm.dst self.dport = qtm.dport self.src = qtm.src self.sport = qtm.sport self.msg_send = qtm.req self.msg_send_len = len(qtm.req) self.msg_send_index = 0 # python3/python2 dual compatibility if isinstance(self.msg_send, str): map_func = lambda x: ord(x) else: map_func = lambda x: x self._queryid_wire = self.msg_send[:2] index = 12 while map_func(self.msg_send[index]) != 0: index += map_func(self.msg_send[index]) + 1 index += 4 self._question_wire = self.msg_send[12:index] if qtm.tcp: self.transport_type = socket.SOCK_STREAM self.msg_send = struct.pack(b'!H', self.msg_send_len) + self.msg_send self.msg_send_len += struct.calcsize(b'H') else: self.transport_type = socket.SOCK_DGRAM def _check_msg_recv_consistency(self): if self.require_queryid_match and self.msg_recv[:2] != self._queryid_wire: return False return True def do_read(self): # UDP if self.sock.type == socket.SOCK_DGRAM: try: self.msg_recv = self.sock.recv(65536) if self._check_msg_recv_consistency(): return True else: self.msg_recv = b'' except socket.error as e: self.err = e return True # TCP else: try: if self.msg_recv_len is None: if self.msg_recv_buf: buf = self.sock.recv(1) else: buf = self.sock.recv(2) if buf == b'': raise EOFError() self.msg_recv_buf += buf if len(self.msg_recv_buf) == 2: self.msg_recv_len = struct.unpack(b'!H', self.msg_recv_buf)[0] if self.msg_recv_len is not None: buf = self.sock.recv(self.msg_recv_len - self.msg_recv_index) if buf == b'': raise EOFError() self.msg_recv += buf self.msg_recv_index = len(self.msg_recv) if self.msg_recv_index >= self.msg_recv_len: return True except (socket.error, EOFError) as e: if isinstance(e, socket.error) and e.errno == socket.errno.EAGAIN: pass else: self.err = e return True def do_timeout(self): self.err = dns.exception.Timeout() class DNSQueryTransportHandlerDNSPrivate(DNSQueryTransportHandlerDNS): allow_loopback_query = True allow_private_query = True class DNSQueryTransportHandlerDNSLoose(DNSQueryTransportHandlerDNS): require_queryid_match = False class DNSQueryTransportHandlerMulti(DNSQueryTransportHandler): singleton = False def _set_timeout(self, qtm): if self.timeout is None: # allow 5 seconds for looking glass overhead, as a baseline self.timeout = self.timeout_baseline # account for worst case, in which case queries are performed serially # on the remote end self.timeout += qtm.timeout def finalize(self): super(DNSQueryTransportHandlerMulti, self).finalize() # if there was an error, then re-raise it here if self.err is not None: raise self.err # if there is no content, raise an exception if self.msg_recv is None: raise RemoteQueryTransportError('No content in response') # load the json content try: content = json.loads(codecs.decode(self.msg_recv, 'utf-8')) except ValueError: raise RemoteQueryTransportError('JSON decoding of response failed: %s' % self.msg_recv) if 'version' not in content: raise RemoteQueryTransportError('No version information in response.') try: major_vers, minor_vers = [int(x) for x in str(content['version']).split('.', 1)] except ValueError: raise RemoteQueryTransportError('Version of JSON input in response is invalid: %s' % content['version']) # ensure major version is a match and minor version is no greater # than the current minor version curr_major_vers, curr_minor_vers = [int(x) for x in str(DNS_TRANSPORT_VERSION).split('.', 1)] if major_vers != curr_major_vers or minor_vers > curr_minor_vers: raise RemoteQueryTransportError('Version %d.%d of JSON input in response is incompatible with this software.' % (major_vers, minor_vers)) if 'error' in content: raise RemoteQueryTransportError('Remote query error: %s' % content['error']) if self.mode == QTH_MODE_WRITE_READ: if 'responses' not in content: raise RemoteQueryTransportError('No DNS response information in response.') else: # self.mode == QTH_MODE_READ: if 'requests' not in content: raise RemoteQueryTransportError('No DNS requests information in response.') for i in range(len(self.qtms)): try: if self.mode == QTH_MODE_WRITE_READ: self.qtms[i].deserialize_response(content['responses'][i]) else: # self.mode == QTH_MODE_READ: self.qtms[i].deserialize_request(content['requests'][i]) except IndexError: raise RemoteQueryTransportError('DNS response or request information missing from message') except TransportMetaDeserializationError as e: raise RemoteQueryTransportError(str(e)) class DNSQueryTransportHandlerHTTP(DNSQueryTransportHandlerMulti): timeout_baseline = 5.0 def __init__(self, url, insecure=False, sock=None, recycle_sock=True, processed_queue=None, factory=None): super(DNSQueryTransportHandlerHTTP, self).__init__(sock=sock, recycle_sock=recycle_sock, processed_queue=processed_queue, factory=factory) self.transport_type = socket.SOCK_STREAM parse_result = urlparse.urlparse(url) scheme = parse_result.scheme if not scheme: scheme = 'http' elif scheme not in ('http', 'https'): raise RemoteQueryTransportError('Invalid scheme: %s' % scheme) self.use_ssl = scheme == 'https' self.host = parse_result.hostname self.dport = parse_result.port if self.dport is None: if scheme == 'http': self.dport = 80 else: # scheme == 'https' self.dport = 443 self.path = parse_result.path self.username = parse_result.username self.password = parse_result.password self.insecure = insecure if self.use_ssl: self.setup_state = QTH_SETUP_NEED_WRITE else: self.setup_state = QTH_SETUP_DONE af = 0 try: addrinfo = socket.getaddrinfo(self.host, self.dport, af, self.transport_type) except socket.gaierror: raise RemoteQueryTransportError('Unable to resolve name of HTTP host: %s' % self.host) self.dst = IPAddr(addrinfo[0][4][0]) self.chunked_encoding = None def _upgrade_socket_to_ssl(self): if isinstance(self.sock.sock, ssl.SSLSocket): return #XXX this is python >= 2.7.9 only ctx = ssl.create_default_context() if self.insecure: ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE new_sock = Socket(ctx.wrap_socket(self.sock.sock, server_hostname=self.host, do_handshake_on_connect=False)) new_sock.lock = self.sock.lock self.sock = new_sock def _post_data(self): return json.dumps(self.serialize_requests()) def _authentication_header(self): if not self.username: return '' # set username/password username = self.username if self.password: username += ':' + self.password return 'Authorization: Basic %s\r\n' % (lb2s(base64.b64encode(codecs.encode(username, 'utf-8')))) def init_req(self): data = self._post_data() self.msg_send = codecs.encode('POST %s HTTP/1.1\r\nHost: %s\r\nUser-Agent: DNSViz/%s\r\nAccept: application/json\r\n%sContent-Length: %d\r\nContent-Type: application/json\r\n\r\n%s' % (self.path, self.host, dist.version, self._authentication_header(), len(data), data), 'latin1') self.msg_send_len = len(self.msg_send) self.msg_send_index = 0 def prepare(self): super(DNSQueryTransportHandlerHTTP, self).prepare() if self.err is not None and not isinstance(self.err, SocketInUse): self.err = RemoteQueryTransportError('Error making HTTP connection: %s' % self.err) def do_setup(self): if self.use_ssl: self._upgrade_socket_to_ssl() try: self.sock.sock.do_handshake() except ssl.SSLWantReadError: self.setup_state = QTH_SETUP_NEED_READ except ssl.SSLWantWriteError: self.setup_state = QTH_SETUP_NEED_WRITE except ssl.SSLError as e: self.err = RemoteQueryTransportError('SSL Error: %s' % e) else: self.setup_state = QTH_SETUP_DONE def do_write(self): val = super(DNSQueryTransportHandlerHTTP, self).do_write() if self.err is not None: self.err = RemoteQueryTransportError('Error making HTTP request: %s' % self.err) return val def do_read(self): try: try: buf = self.sock.recv(65536) except ssl.SSLWantReadError: return False if buf == b'': raise EOFError self.msg_recv_buf += buf # still reading status and headers if self.chunked_encoding is None and self.msg_recv_len is None: headers_end_match = HTTP_HEADER_END_RE.search(lb2s(self.msg_recv_buf)) if headers_end_match is not None: headers = self.msg_recv_buf[:headers_end_match.start()] self.msg_recv_buf = self.msg_recv_buf[headers_end_match.end():] # check HTTP status status_match = HTTP_STATUS_RE.search(lb2s(headers)) if status_match is None: self.err = RemoteQueryTransportError('Malformed HTTP status line') return True status = int(status_match.group('status')) if status != 200: self.err = RemoteQueryTransportError('%d HTTP status' % status) return True # get content length or determine whether "chunked" # transfer encoding is used content_length_match = CONTENT_LENGTH_RE.search(lb2s(headers)) if content_length_match is not None: self.chunked_encoding = False self.msg_recv_len = int(content_length_match.group('length')) else: self.chunked_encoding = CHUNKED_ENCODING_RE.search(lb2s(headers)) is not None # handle chunked encoding first if self.chunked_encoding: # look through as many chunks as are readily available # (without having to read from socket again) while self.msg_recv_buf: if self.msg_recv_len is None: # looking for chunk length # strip off beginning CRLF, if any # (this is for chunks after the first one) crlf_start_match = CRLF_START_RE.search(lb2s(self.msg_recv_buf)) if crlf_start_match is not None: self.msg_recv_buf = self.msg_recv_buf[crlf_start_match.end():] # find the chunk length chunk_len_match = CHUNK_SIZE_RE.search(lb2s(self.msg_recv_buf)) if chunk_len_match is not None: self.msg_recv_len = int(chunk_len_match.group('length'), 16) self.msg_recv_buf = self.msg_recv_buf[chunk_len_match.end():] self.msg_recv_index = 0 else: # if we don't currently know the length of the next # chunk, and we don't have enough data to find the # length, then break out of the loop because we # don't have any more data to go off of. break if self.msg_recv_len is not None: # we know a length of the current chunk if self.msg_recv_len == 0: # no chunks left, so clean up and return return True # read remaining bytes bytes_remaining = self.msg_recv_len - self.msg_recv_index if len(self.msg_recv_buf) > bytes_remaining: self.msg_recv += self.msg_recv_buf[:bytes_remaining] self.msg_recv_index = 0 self.msg_recv_buf = self.msg_recv_buf[bytes_remaining:] self.msg_recv_len = None else: self.msg_recv += self.msg_recv_buf self.msg_recv_index += len(self.msg_recv_buf) self.msg_recv_buf = b'' elif self.chunked_encoding == False: # output is not chunked, so we're either reading until we've # read all the bytes specified by the content-length header (if # specified) or until the server closes the connection (or we # time out) if self.msg_recv_len is not None: bytes_remaining = self.msg_recv_len - self.msg_recv_index self.msg_recv += self.msg_recv_buf[:bytes_remaining] self.msg_recv_buf = self.msg_recv_buf[bytes_remaining:] self.msg_recv_index = len(self.msg_recv) if self.msg_recv_index >= self.msg_recv_len: return True else: self.msg_recv += self.msg_recv_buf self.msg_recv_buf = b'' except (socket.error, EOFError) as e: if isinstance(e, socket.error) and e.errno == socket.errno.EAGAIN: pass else: # if we weren't passed any content length header, and we're not # using chunked encoding, then don't throw an error. If the # content was bad, then it will be reflected in the decoding of # the content if self.chunked_encoding == False and self.msg_recv_len is None: pass else: self.err = RemoteQueryTransportError('Error communicating with HTTP server: %s' % e) return True def do_timeout(self): self.err = RemoteQueryTransportError('HTTP request timed out') class DNSQueryTransportHandlerHTTPPrivate(DNSQueryTransportHandlerHTTP): allow_loopback_query = True allow_private_query = True class DNSQueryTransportHandlerWebSocketServer(DNSQueryTransportHandlerMulti): timeout_baseline = 5.0 unmask_on_recv = True def __init__(self, path, sock=None, recycle_sock=True, processed_queue=None, factory=None): super(DNSQueryTransportHandlerWebSocketServer, self).__init__(sock=sock, recycle_sock=recycle_sock, processed_queue=processed_queue, factory=factory) self.dst = path self.transport_type = socket.SOCK_STREAM self.mask_mapping = [] self.has_more = None def _get_af(self): return socket.AF_UNIX def _bind_socket(self): pass def _set_socket_info(self): pass def _get_connect_arg(self): return self.dst def prepare(self): super(DNSQueryTransportHandlerWebSocketServer, self).prepare() if self.err is not None and not isinstance(self.err, SocketInUse): self.err = RemoteQueryTransportError('Error connecting to UNIX domain socket: %s' % self.err) def do_write(self): val = super(DNSQueryTransportHandlerWebSocketServer, self).do_write() if self.err is not None: self.err = RemoteQueryTransportError('Error writing to UNIX domain socket: %s' % self.err) return val def finalize(self): if self.unmask_on_recv: # python3/python2 dual compatibility if isinstance(self.msg_recv, str): decode_func = lambda x: struct.unpack(b'!B', x)[0] else: decode_func = lambda x: x new_msg_recv = b'' for i, mask_index in enumerate(self.mask_mapping): mask_octets = struct.unpack(b'!BBBB', self.msg_recv[mask_index:mask_index + 4]) if i >= len(self.mask_mapping) - 1: buf = self.msg_recv[mask_index + 4:] else: buf = self.msg_recv[mask_index + 4:self.mask_mapping[i + 1]] for j in range(len(buf)): b = decode_func(buf[j]) new_msg_recv += struct.pack(b'!B', b ^ mask_octets[j % 4]) self.msg_recv = new_msg_recv super(DNSQueryTransportHandlerWebSocketServer, self).finalize() def init_req(self): data = codecs.encode(json.dumps(self.serialize_requests()), 'utf-8') header = b'\x81' l = len(data) if l <= 125: header += struct.pack(b'!B', l) elif l <= 0xffff: header += struct.pack(b'!BH', 126, l) else: # 0xffff < len <= 2^63 header += struct.pack(b'!BLL', 127, 0, l) self.msg_send = header + data self.msg_send_len = len(self.msg_send) self.msg_send_index = 0 def init_empty_msg_send(self): self.msg_send = b'\x81\x00' self.msg_send_len = len(self.msg_send) self.msg_send_index = 0 def do_read(self): try: buf = self.sock.recv(65536) if buf == b'': raise EOFError self.msg_recv_buf += buf # look through as many frames as are readily available # (without having to read from socket again) while self.msg_recv_buf: if self.msg_recv_len is None: # looking for frame length if len(self.msg_recv_buf) >= 2: byte0, byte1 = struct.unpack(b'!BB', self.msg_recv_buf[0:2]) byte1b = byte1 & 0x7f # mask must be set if not byte1 & 0x80: if self.err is not None: self.err = RemoteQueryTransportError('Mask bit not set in message from server') return True # check for FIN flag self.has_more = not bool(byte0 & 0x80) # determine the header length if byte1b <= 125: header_len = 2 elif byte1b == 126: header_len = 4 else: # byte1b == 127: header_len = 10 if len(self.msg_recv_buf) >= header_len: if byte1b <= 125: self.msg_recv_len = byte1b elif byte1b == 126: self.msg_recv_len = struct.unpack(b'!H', self.msg_recv_buf[2:4])[0] else: # byte1b == 127: self.msg_recv_len = struct.unpack(b'!Q', self.msg_recv_buf[2:10])[0] if self.unmask_on_recv: # handle mask self.mask_mapping.append(len(self.msg_recv)) self.msg_recv_len += 4 self.msg_recv_buf = self.msg_recv_buf[header_len:] else: # if we don't currently know the length of the next # frame, and we don't have enough data to find the # length, then break out of the loop because we # don't have any more data to go off of. break if self.msg_recv_len is not None: # we know a length of the current chunk # read remaining bytes bytes_remaining = self.msg_recv_len - self.msg_recv_index if len(self.msg_recv_buf) > bytes_remaining: self.msg_recv += self.msg_recv_buf[:bytes_remaining] self.msg_recv_index = 0 self.msg_recv_buf = self.msg_recv_buf[bytes_remaining:] self.msg_recv_len = None else: self.msg_recv += self.msg_recv_buf self.msg_recv_index += len(self.msg_recv_buf) self.msg_recv_buf = b'' if self.msg_recv_index >= self.msg_recv_len and not self.has_more: return True except (socket.error, EOFError) as e: if isinstance(e, socket.error) and e.errno == socket.errno.EAGAIN: pass else: self.err = e return True def do_timeout(self): self.err = RemoteQueryTransportError('Read of UNIX domain socket timed out') class DNSQueryTransportHandlerWebSocketServerPrivate(DNSQueryTransportHandlerWebSocketServer): allow_loopback_query = True allow_private_query = True class DNSQueryTransportHandlerWebSocketClient(DNSQueryTransportHandlerWebSocketServer): unmask_on_recv = False def __init__(self, sock, recycle_sock=True, processed_queue=None, factory=None): super(DNSQueryTransportHandlerWebSocketClient, self).__init__(None, sock=sock, recycle_sock=recycle_sock, processed_queue=processed_queue, factory=factory) def _init_req(self, data): header = b'\x81' l = len(data) if l <= 125: header += struct.pack(b'!B', l | 0x80) elif l <= 0xffff: header += struct.pack(b'!BH', 126 | 0x80, l) else: # 0xffff < len <= 2^63 header += struct.pack(b'!BLL', 127 | 0x80, 0, l) mask_int = random.randint(0, 0xffffffff) mask = [(mask_int >> 24) & 0xff, (mask_int >> 16) & 0xff, (mask_int >> 8) & 0xff, mask_int & 0xff] header += struct.pack(b'!BBBB', *mask) # python3/python2 dual compatibility if isinstance(data, str): map_func = lambda x: ord(x) else: map_func = lambda x: x self.msg_send = header for i, b in enumerate(data): self.msg_send += struct.pack(b'!B', mask[i % 4] ^ map_func(b)) self.msg_send_len = len(self.msg_send) self.msg_send_index = 0 def init_req(self): self._init_req(codecs.encode(json.dumps(self.serialize_responses()), 'utf-8')) def init_err_send(self, err): self._init_req(codecs.encode(err, 'utf-8')) class DNSQueryTransportHandlerWebSocketClientReader(DNSQueryTransportHandlerWebSocketClient): mode = QTH_MODE_READ class DNSQueryTransportHandlerWebSocketClientWriter(DNSQueryTransportHandlerWebSocketClient): mode = QTH_MODE_WRITE class DNSQueryTransportHandlerCmd(DNSQueryTransportHandlerWebSocketServer): allow_loopback_query = True allow_private_query = True def __init__(self, args, sock=None, recycle_sock=True, processed_queue=None, factory=None): super(DNSQueryTransportHandlerCmd, self).__init__(None, sock=sock, recycle_sock=recycle_sock, processed_queue=processed_queue, factory=factory) self.args = args def _get_af(self): return None def _bind_socket(self): pass def _set_socket_info(self): pass def _get_connect_arg(self): return None def _create_socket(self): try: p = subprocess.Popen(self.args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) except OSError as e: raise socket.error(str(e)) else: self.sock = ReaderWriter(io.open(p.stdout.fileno(), 'rb'), io.open(p.stdin.fileno(), 'wb'), p) def _connect_socket(self): pass def do_write(self): if self.sock.proc.poll() is not None: self.err = RemoteQueryTransportError('Subprocess has ended with status %d.' % (self.sock.proc.returncode)) return True return super(DNSQueryTransportHandlerCmd, self).do_write() def do_read(self): if self.sock.proc.poll() is not None: self.err = RemoteQueryTransportError('Subprocess has ended with status %d.' % (self.sock.proc.returncode)) return True return super(DNSQueryTransportHandlerCmd, self).do_read() def cleanup(self): super(DNSQueryTransportHandlerCmd, self).cleanup() if self.sock is not None and not self.recycle_sock and self.sock.proc is not None and self.sock.proc.poll() is None: self.sock.proc.terminate() self.sock.proc.wait() return True class DNSQueryTransportHandlerRemoteCmd(DNSQueryTransportHandlerCmd): timeout_baseline = 10.0 def __init__(self, url, sock=None, recycle_sock=True, processed_queue=None, factory=None): parse_result = urlparse.urlparse(url) scheme = parse_result.scheme if not scheme: scheme = 'ssh' elif scheme != 'ssh': raise RemoteQueryTransportError('Invalid scheme: %s' % scheme) args = ['ssh', '-T'] if parse_result.port is not None: args.extend(['-p', str(parse_result.port)]) if parse_result.username is not None: args.append('%s@%s' % (parse_result.username, parse_result.hostname)) else: args.append('%s' % (parse_result.hostname)) if parse_result.path and parse_result.path != '/': args.append(parse_result.path) else: args.append('dnsviz lookingglass') super(DNSQueryTransportHandlerRemoteCmd, self).__init__(args, sock=sock, recycle_sock=recycle_sock, processed_queue=processed_queue, factory=factory) def _get_af(self): return None def _bind_socket(self): pass def _set_socket_info(self): pass def _get_connect_arg(self): return None def _create_socket(self): try: p = subprocess.Popen(self.args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) except OSError as e: raise socket.error(str(e)) else: self.sock = ReaderWriter(io.open(p.stdout.fileno(), 'rb'), io.open(p.stdin.fileno(), 'wb'), p) def _connect_socket(self): pass def do_write(self): if self.sock.proc.poll() is not None: self.err = RemoteQueryTransportError('Subprocess has ended with status %d.' % (self.sock.proc.returncode)) return True return super(DNSQueryTransportHandlerCmd, self).do_write() def do_read(self): if self.sock.proc.poll() is not None: self.err = RemoteQueryTransportError('Subprocess has ended with status %d.' % (self.sock.proc.returncode)) return True return super(DNSQueryTransportHandlerCmd, self).do_read() def cleanup(self): super(DNSQueryTransportHandlerCmd, self).cleanup() if self.sock is not None and not self.recycle_sock and self.sock.proc is not None and self.sock.proc.poll() is None: self.sock.proc.terminate() self.sock.proc.wait() return True class DNSQueryTransportHandlerFactory(object): cls = DNSQueryTransportHandler def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.kwargs['factory'] = self self.lock = threading.Lock() self.sock = None def __del__(self): if self.sock is not None: self.sock.close() def build(self, **kwargs): if 'sock' not in kwargs and self.sock is not None: kwargs['sock'] = self.sock for name in self.kwargs: if name not in kwargs: kwargs[name] = self.kwargs[name] return self.cls(*self.args, **kwargs) class DNSQueryTransportHandlerDNSFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerDNS class DNSQueryTransportHandlerDNSPrivateFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerDNSPrivate class DNSQueryTransportHandlerHTTPFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerHTTP class DNSQueryTransportHandlerHTTPPrivateFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerHTTPPrivate class _DNSQueryTransportHandlerWebSocketServerFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerWebSocketServer class DNSQueryTransportHandlerWebSocketServerFactory: def __init__(self, *args, **kwargs): self._f = _DNSQueryTransportHandlerWebSocketServerFactory(*args, **kwargs) def __del__(self): try: qth = self._f.build() qth.init_empty_msg_send() qth.prepare() qth.do_write() except: pass @property def cls(self): return self._f.__class__.cls def build(self, **kwargs): return self._f.build(**kwargs) class _DNSQueryTransportHandlerWebSocketServerPrivateFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerWebSocketServerPrivate class DNSQueryTransportHandlerWebSocketServerPrivateFactory: def __init__(self, *args, **kwargs): self._f = _DNSQueryTransportHandlerWebSocketServerPrivateFactory(*args, **kwargs) def __del__(self): try: qth = self._f.build() qth.init_empty_msg_send() qth.prepare() qth.do_write() except: pass @property def cls(self): return self._f.__class__.cls def build(self, **kwargs): return self._f.build(**kwargs) class DNSQueryTransportHandlerCmdFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerCmd class DNSQueryTransportHandlerRemoteCmdFactory(DNSQueryTransportHandlerFactory): cls = DNSQueryTransportHandlerRemoteCmd class DNSQueryTransportHandlerWrapper(object): def __init__(self, qh): self.qh = qh def __eq__(self, other): return False def __lt__(self, other): return False class _DNSQueryTransportManager: '''A class that handles''' #TODO might need FD_SETSIZE to support lots of fds def __init__(self): self._notify_read_fd, self._notify_write_fd = os.pipe() fcntl.fcntl(self._notify_read_fd, fcntl.F_SETFL, os.O_NONBLOCK) self._msg_queue = queue.Queue() self._event_map = {} self._close = threading.Event() # python3/python2 dual compatibility try: # python 3 t = threading.Thread(target=self._loop, daemon=True) except TypeError: # python 2 t = threading.Thread(target=self._loop) t.daemon = True t.start() def close(self): self._close.set() os.write(self._notify_write_fd, struct.pack(b'!B', 0)) def handle_msg(self, qh): self._event_map[qh] = threading.Event() self._handle_msg(qh, True) self._event_map[qh].wait() del self._event_map[qh] def handle_msg_nowait(self, qh): self._handle_msg(qh, True) def _handle_msg(self, qh, notify): self._msg_queue.put(qh) if notify: os.write(self._notify_write_fd, struct.pack(b'!B', 0)) def _loop(self): '''Return the data resulting from a UDP transaction.''' query_meta = {} expirations = [] # initialize "in" fds for select rlist_in = [self._notify_read_fd] wlist_in = [] xlist_in = [] while True: # determine the new expiration if expirations: timeout = max(expirations[0][0] - time.time(), 0) else: timeout = MAX_WAIT_FOR_REQUEST finished_fds = [] rlist_out, wlist_out, xlist_out = select.select(rlist_in, wlist_in, xlist_in, timeout) # if we have been signalled to exit, then do that if self._close.is_set(): break # handle the requests for fd in wlist_out: qh = query_meta[fd] if qh.setup_state == QTH_SETUP_DONE: if qh.do_write(): if qh.err is not None or qh.mode == QTH_MODE_WRITE: qh.cleanup() finished_fds.append(fd) else: # qh.mode == QTH_MODE_WRITE_READ wlist_in.remove(fd) rlist_in.append(qh.sock.reader_fd) else: qh.do_setup() if qh.err is not None: qh.cleanup() finished_fds.append(fd) elif qh.setup_state == QTH_SETUP_NEED_READ: wlist_in.remove(fd) rlist_in.append(qh.sock.reader_fd) # handle the responses for fd in rlist_out: if fd == self._notify_read_fd: continue qh = query_meta[fd] if qh.setup_state == QTH_SETUP_DONE: if qh.do_read(): # qh.mode in (QTH_MODE_WRITE_READ, QTH_MODE_READ) qh.cleanup() finished_fds.append(qh.sock.reader_fd) else: qh.do_setup() if qh.err is not None: qh.cleanup() finished_fds.append(fd) elif qh.setup_state in (QTH_SETUP_NEED_WRITE, QTH_SETUP_DONE): rlist_in.remove(fd) wlist_in.append(qh.sock.writer_fd) # handle the expired queries future_index = bisect.bisect_right(expirations, ((time.time(), DNSQueryTransportHandlerWrapper(None)))) for i in range(future_index): qh = expirations[i][1].qh # this query actually finished earlier in this iteration of the # loop, so don't indicate that it timed out if qh.end_time is not None: continue qh.do_timeout() qh.cleanup() finished_fds.append(qh.sock.reader_fd) expirations = expirations[future_index:] # for any fds that need to be finished, do it now for fd in finished_fds: qh = query_meta[fd] try: rlist_in.remove(qh.sock.reader_fd) except ValueError: wlist_in.remove(qh.sock.writer_fd) if qh in self._event_map: self._event_map[qh].set() del query_meta[fd] if finished_fds: # if any sockets were finished, then notify, in case any # queued messages are waiting to be handled. os.write(self._notify_write_fd, struct.pack(b'!B', 0)) # handle the new queries if self._notify_read_fd in rlist_out: # empty the pipe os.read(self._notify_read_fd, 65536) requeue = [] while True: try: qh = self._msg_queue.get_nowait() qh.prepare() if qh.err is not None: if isinstance(qh.err, SocketInUse): # if this was a SocketInUse, just requeue, and try again qh.err = None requeue.append(qh) else: qh.cleanup() if qh in self._event_map: self._event_map[qh].set() else: # if we successfully bound and connected the # socket, then put this socket in the write fd list query_meta[qh.sock.reader_fd] = qh query_meta[qh.sock.writer_fd] = qh bisect.insort(expirations, (qh.expiration, DNSQueryTransportHandlerWrapper(qh))) if qh.setup_state == QTH_SETUP_DONE: if qh.mode in (QTH_MODE_WRITE_READ, QTH_MODE_WRITE): wlist_in.append(qh.sock.writer_fd) elif qh.mode == QTH_MODE_READ: rlist_in.append(qh.sock.reader_fd) else: raise Exception('Unexpected mode: %d' % qh.mode) elif qh.setup_state == QTH_SETUP_NEED_WRITE: wlist_in.append(qh.sock.writer_fd) elif qh.setup_state == QTH_SETUP_NEED_READ: rlist_in.append(qh.sock.reader_fd) except queue.Empty: break for qh in requeue: self._handle_msg(qh, False) class DNSQueryTransportHandlerHTTPPrivate(DNSQueryTransportHandlerHTTP): allow_loopback_query = True allow_private_query = True class DNSQueryTransportManager: def __init__(self): self._th = _DNSQueryTransportManager() def __del__(self): self.close() def handle_msg(self, qh): return self._th.handle_msg(qh) def handle_msg_nowait(self, qh): return self._th.handle_msg_nowait(qh) def close(self): return self._th.close() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/util.py0000644000175000017500000002021115001472663015005 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import datetime import io import os import re import socket import dns.message, dns.name, dns.rdataclass, dns.rdatatype, dns.rrset from .config import DNSVIZ_SHARE_PATH from . import format as fmt from .ipaddr import IPAddr ROOT_HINTS = os.path.join(DNSVIZ_SHARE_PATH, 'hints', 'named.root') CR_RE = re.compile(r'\r\n', re.MULTILINE) ZONE_COMMENTS_RE = re.compile(r'\s*;.*', re.MULTILINE) BLANK_LINES_RE = re.compile(r'\n\s*\n') ROOT_HINTS_STR_DEFAULT = ''' . 3600000 NS A.ROOT-SERVERS.NET. A.ROOT-SERVERS.NET. 3600000 A 198.41.0.4 A.ROOT-SERVERS.NET. 3600000 AAAA 2001:503:ba3e::2:30 . 3600000 NS B.ROOT-SERVERS.NET. B.ROOT-SERVERS.NET. 3600000 A 170.247.170.2 B.ROOT-SERVERS.NET. 3600000 AAAA 2801:1b8:10::b . 3600000 NS C.ROOT-SERVERS.NET. C.ROOT-SERVERS.NET. 3600000 A 192.33.4.12 C.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:2::c . 3600000 NS D.ROOT-SERVERS.NET. D.ROOT-SERVERS.NET. 3600000 A 199.7.91.13 D.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:2d::d . 3600000 NS E.ROOT-SERVERS.NET. E.ROOT-SERVERS.NET. 3600000 A 192.203.230.10 E.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:a8::e . 3600000 NS F.ROOT-SERVERS.NET. F.ROOT-SERVERS.NET. 3600000 A 192.5.5.241 F.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:2f::f . 3600000 NS G.ROOT-SERVERS.NET. G.ROOT-SERVERS.NET. 3600000 A 192.112.36.4 G.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:12::d0d . 3600000 NS H.ROOT-SERVERS.NET. H.ROOT-SERVERS.NET. 3600000 A 198.97.190.53 H.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:1::53 . 3600000 NS I.ROOT-SERVERS.NET. I.ROOT-SERVERS.NET. 3600000 A 192.36.148.17 I.ROOT-SERVERS.NET. 3600000 AAAA 2001:7fe::53 . 3600000 NS J.ROOT-SERVERS.NET. J.ROOT-SERVERS.NET. 3600000 A 192.58.128.30 J.ROOT-SERVERS.NET. 3600000 AAAA 2001:503:c27::2:30 . 3600000 NS K.ROOT-SERVERS.NET. K.ROOT-SERVERS.NET. 3600000 A 193.0.14.129 K.ROOT-SERVERS.NET. 3600000 AAAA 2001:7fd::1 . 3600000 NS L.ROOT-SERVERS.NET. L.ROOT-SERVERS.NET. 3600000 A 199.7.83.42 L.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:9f::42 . 3600000 NS M.ROOT-SERVERS.NET. M.ROOT-SERVERS.NET. 3600000 A 202.12.27.33 M.ROOT-SERVERS.NET. 3600000 AAAA 2001:dc3::35 ''' HISTORICAL_ROOT_IPS = ( (dns.name.from_text('h.root-servers.net.'), IPAddr('2001:500:1::803f:235')), # 2015-12-01 (dns.name.from_text('l.root-servers.net.'), IPAddr('2001:500:3::42')), # 2016-03-24 (dns.name.from_text('b.root-servers.net.'), IPAddr('2001:500:84::b')), # 2017-06-01 (dns.name.from_text('b.root-servers.net.'), IPAddr('2001:500:200::b')), (dns.name.from_text('b.root-servers.net.'), IPAddr('192.228.79.201')), ) # The following list should include all current and historical trust anchors # for the root zone. This makes it possible to perform analysis of current DNS # responses, but also archived DNS responses. # # For each root zone trust anchor, the start value is the day it is first # published in the root zone. Even though the key is not yet signing, this # draws attention to the fact that it will be signing and likely replacing its # predecessor. The end value is (at least) the minimum of the TTL and the # expiration of its last published RRSIG. This allows us to query caches with # contents referring to the old key, even after its replacement has taken over. TRUSTED_KEYS_ROOT = ( ('. IN DNSKEY 257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0=', datetime.datetime(2010, 7, 16, 0, 0, 0, 0, fmt.utc), datetime.datetime(2018, 10, 15, 16, 0, 0, 0, fmt.utc)), # 2018-10-11 16:00:00 UTC + 4 days (2*TTL) ('. IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3 +/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kv ArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF 0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+e oZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfd RUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwN R1AkUTV74bU=', datetime.datetime(2017, 8, 11, 0, 0, 0, 0, fmt.utc), # 2017-07-12 00:00:00 UTC + 30 days (RFC 5011 add hold-down time) None), ) def tuple_to_dict(t): d = {} for n, v in t: if n not in d: d[n] = [] d[n].append(v) return d def get_trusted_keys(s): trusted_keys = [] s = CR_RE.sub('\n', s) s = ZONE_COMMENTS_RE.sub('', s) s = BLANK_LINES_RE.sub(r'\n', s) s = s.strip() m = dns.message.from_text(str(';ANSWER\n'+s)) for rrset in m.answer: if rrset.rdtype != dns.rdatatype.DNSKEY: continue for dnskey in rrset: if dnskey.flags & fmt.DNSKEY_FLAGS['revoke']: continue trusted_keys.append((rrset.name,dnskey)) return trusted_keys def get_default_trusted_keys(date): tk_str = '' for tk, start, end in TRUSTED_KEYS_ROOT: if start is not None and date < start: continue if end is not None and date > end: continue tk_str += tk + '\n' return get_trusted_keys(tk_str) def get_hints(s): hints = {} s = CR_RE.sub('\n', s) s = ZONE_COMMENTS_RE.sub('', s) s = BLANK_LINES_RE.sub(r'\n', s) s = s.strip() m = dns.message.from_text(str(';ANSWER\n'+s)) for rrset in m.answer: if rrset.rdtype not in (dns.rdatatype.NS, dns.rdatatype.A, dns.rdatatype.AAAA): continue hints[(rrset.name, rrset.rdtype)] = rrset return hints def get_root_hints(): try: with io.open(ROOT_HINTS, 'r', encoding='utf-8') as fh: return get_hints(fh.read()) except IOError: return get_hints(ROOT_HINTS_STR_DEFAULT) def get_client_address(server): if server.version == 6: af = socket.AF_INET6 else: af = socket.AF_INET s = socket.socket(af, socket.SOCK_DGRAM) try: s.connect((server, 53)) except socket.error: ip = None else: ip = IPAddr(s.getsockname()[0]) s.close() return ip def io_try_buffered(f, mode, closefd=True): """Try opening buffered reader, but allow unbuffered on failure. Required to pass tests under pytest.""" try: return io.open(f.fileno(), mode, closefd=closefd) except io.UnsupportedOperation: # raised by f.fileno() return f ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3905313 dnsviz-0.11.1/dnsviz/viz/0000755000175000017500000000000015001473074014267 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/viz/__init__.py0000644000175000017500000000000015001472663016371 0ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/dnsviz/viz/dnssec.py0000644000175000017500000031213315001472663016126 0ustar00caseycasey# # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2024 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # from __future__ import unicode_literals import codecs import errno import io import json import os import re import sys import xml.dom.minidom # minimal support for python2.6 try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # python3/python2 dual compatibility try: from html import escape except ImportError: from cgi import escape import dns.name, dns.rdtypes, dns.rdatatype, dns.dnssec from pygraphviz import AGraph from dnsviz.analysis import status as Status from dnsviz.analysis import errors as Errors from dnsviz.analysis.online import ANALYSIS_TYPE_RECURSIVE from dnsviz.config import DNSVIZ_SHARE_PATH from dnsviz import crypto from dnsviz import format as fmt from dnsviz import query as Q from dnsviz import response as Response from dnsviz.util import tuple_to_dict lb2s = fmt.latin1_binary_to_string COLORS = { 'secure': '#0a879a', 'secure_non_existent': '#9dcfd6', 'bogus': '#be1515', 'bogus_non_existent': '#e5a1a1', 'insecure': '#000000', 'insecure_non_existent': '#d0d0d0', 'misconfigured': '#f4b800', 'indeterminate': '#f4b800', 'expired': '#6131a3', 'invalid': '#be1515', 'alias': 'black', 'CDNSKEY': 'lightgray', 'CDS': 'lightgray', 'alias': 'black' } NODE_DNSKEY_CDNSKEY_CDS_RE = re.compile(r'^(DNSKEY-|RRset-\d+\|[^\|]+\|(CDNSKEY|CDS)$)') INVIS_STYLE_RE = re.compile(r'(^|,)invis(,|$)') DASHED_STYLE_RE = re.compile(r'(^|,)dashed(,|$)') OPTOUT_STYLE_RE = re.compile(r'BGCOLOR="lightgray"') ICON_PATH=os.path.join(DNSVIZ_SHARE_PATH, 'icons') WARNING_ICON=os.path.join(ICON_PATH, 'warning.png') ERROR_ICON=os.path.join(ICON_PATH, 'error.png') # python3/python2.6 dual compatibility vers0, vers1, vers2 = sys.version_info[:3] if (vers0, vers1) == (2, 6): execv_encode = lambda x: codecs.encode(x, sys.getfilesystemencoding()) else: execv_encode = lambda x: x class DNSKEYNonExistent(object): def __init__(self, name, algorithm, key_tag): self.name = name self.algorithm = algorithm self.key_tag = key_tag def serialize(self): d = OrderedDict() d['flags'] = None d['protocol'] = None d['algorithm'] = self.algorithm d['key'] = None d['ttl'] = None d['key_length'] = None d['key_tag'] = self.key_tag return d class RRsetNonExistent(object): def __init__(self, name, rdtype, nxdomain, servers_clients): self.name = name self.rdtype = rdtype self.nxdomain = nxdomain self.servers_clients = servers_clients def serialize(self, consolidate_clients, html_format=False, map_ip_to_ns_name=None): d = OrderedDict() if html_format: formatter = lambda x: escape(x, True) else: formatter = lambda x: x if self.rdtype == dns.rdatatype.NSEC3: d['name'] = fmt.format_nsec3_name(self.name) else: d['name'] = formatter(lb2s(self.name.canonicalize().to_text())) d['ttl'] = None d['type'] = dns.rdatatype.to_text(self.rdtype) if self.nxdomain: d['rdata'] = ['NXDOMAIN'] else: d['rdata'] = ['NODATA'] servers = tuple_to_dict(self.servers_clients) if consolidate_clients: servers = list(servers) servers.sort() d['servers'] = servers if map_ip_to_ns_name is not None: try: ns_names = list(set([lb2s(map_ip_to_ns_name(s)[0][0].canonicalize().to_text()) for s in servers])) except IndexError: ns_names = [] ns_names.sort() d['ns_names'] = ns_names tags = set() nsids = [] for server,client in self.servers_clients: for response in self.servers_clients[(server,client)]: tags.add(response.effective_query_tag()) nsid = response.nsid_val() if nsid is not None: nsids.append(nsid) if nsids: d['nsid_values'] = nsids d['nsid_values'].sort() d['query_options'] = list(tags) d['query_options'].sort() return d class DNSAuthGraph: def __init__(self, dlv_domain=None): self.dlv_domain = dlv_domain self.G = AGraph(directed=True, strict=False, compound='true', rankdir='BT', ranksep='0.3') self.G.node_attr['penwidth'] = '1.5' self.G.edge_attr['penwidth'] = '1.5' self.node_info = {} self.node_mapping = {} self.node_reverse_mapping = {} self.nsec_rr_status = {} self.secure_dnskey_rrsets = set() self.subgraph_not_stub = set() self.node_subgraph_name = {} self.processed_rrsets = {} self.dnskey_ids = {} self.ds_ids = {} self.nsec_ids = {} self.rrset_ids = {} self.next_dnskey_id = 0 self.next_ds_id = 0 self.next_nsec_id = 0 self.next_rrset_id = 10 self._edge_keys = set() def _raphael_unit_mapping_expression(self, val, unit): #XXX doesn't work properly #if unit: # return '%s*to_pixel_mapping[\'%s\']' % (val, unit) return val def _raphael_transform_str(self, trans_value): transform_re = re.compile(r'(scale|rotate|translate)\((-?[0-9\.]+(px|pt|cm|in)?((,\s*|\s+)-?[0-9\.]+(px|pt|cm|in)?)?)\)') number_units_re = re.compile(r'(-?[0-9\.]+)(px|pt|cm|in)?') t = '' for m in transform_re.findall(trans_value): if m[0] == 'scale': coords = number_units_re.findall(m[1]) if (len(coords) > 1): t += 's%s,%s,0,0' % (self._raphael_unit_mapping_expression(coords[0][0], coords[0][1]), self._raphael_unit_mapping_expression(coords[1][0], coords[1][1])) else: t += 's%s,0,0,0' % (coords[0]) if m[0] == 'translate': coords = number_units_re.findall(m[1]) if (len(coords) > 1): t += 't%s,%s' % (self._raphael_unit_mapping_expression(coords[0][0], coords[0][1]), self._raphael_unit_mapping_expression(coords[1][0], coords[1][1])) else: t += 't%s,0,' % (self._raphael_unit_mapping_expression(coords[0][0], coords[0][1])) return t def _write_raphael_node(self, node, node_id, transform): required_attrs = { 'path': set(['d']), 'ellipse': set(['cx','cy','rx','ry']), 'polygon': set(['points']), 'polyline': set(['points']), 'text': set(['x','y']), 'image': set(['src','x','y','width','height']) } number_units_re = re.compile(r'(-?[0-9\.]+)(px|pt|cm|in)?') s = '' if node.nodeType != xml.dom.Node.ELEMENT_NODE: return s if node.hasAttribute('id'): node_id = node.getAttribute('id') if node.nodeName == 'svg': width, width_unit = number_units_re.match(node.getAttribute('width')).group(1, 2) height, height_unit = number_units_re.match(node.getAttribute('height')).group(1, 2) s += ''' var imageWidth = %s*this.imageScale; var imageHeight = %s*this.imageScale; if (this.maxPaperWidth > 0 && imageWidth > this.maxPaperWidth) { paperScale = this.maxPaperWidth/imageWidth; } else { paperScale = 1.0; } ''' % (width, height) s += '\tpaper = Raphael(this.anchorElement, parseInt(paperScale*imageWidth), parseInt(paperScale*imageHeight));\n' else: if node.nodeName == 'path': s += '\tel = paper.path(\'%s\')' % node.getAttribute('d') elif node.nodeName == 'ellipse': s += '\tel = paper.ellipse(%s, %s, %s, %s)' % (node.getAttribute('cx'), node.getAttribute('cy'), node.getAttribute('rx'), node.getAttribute('ry')) elif node.nodeName == 'text': if node.childNodes: text = node.childNodes[0].nodeValue else: text = '' s += '\tel = paper.text(%s, %s, \'%s\')' % (node.getAttribute('x'), node.getAttribute('y'), text) elif node.nodeName == 'image': width, width_unit = number_units_re.match(node.getAttribute('width')).group(1, 2) height, height_unit = number_units_re.match(node.getAttribute('height')).group(1, 2) s += '\tel = paper.image(\'%s\', %s, %s, %s, %s)' % (node.getAttribute('xlink:href'), node.getAttribute('x'), node.getAttribute('y'), self._raphael_unit_mapping_expression(width, width_unit),self._raphael_unit_mapping_expression(height, height_unit)) elif node.nodeName == 'polygon' or node.nodeName == 'polyline': pathstring = 'M'; coords = number_units_re.findall(node.getAttribute('points')) for i in range(len(coords)): if i > 0: if i % 2 == 0: pathstring += 'L' else: pathstring += ',' pathstring += coords[i][0] if node.nodeName == 'polygon': pathstring += 'Z' s += '\tel = paper.path(\'%s\')' % pathstring attrs = [] for i in range(node.attributes.length): attr = node.attributes.item(i) if attr.name not in required_attrs.get(node.nodeName, set()): if attr.name == 'stroke-dasharray': #XXX hack val = '\'\\-\'' elif attr.name == 'stroke-width': val = attr.value+'*this.imageScale' elif attr.name == 'transform': transform += self._raphael_transform_str(attr.value) continue else: val = '\'%s\'' % attr.value attrs.append('\'%s\': %s' % (attr.name, val)) if transform: attrs.append('\'%s\': \'%s\'' % ('transform', transform)) if s: if attrs: s += '.attr({%s})' % (','.join(attrs)) s += ';\n' if node_id is not None and node_id in self.node_info: s += '\tthis.addNodeEvent(el, node_info[\'%s\']);\n' % node_id.replace('\\', '\\\\').replace('--', '\\-\\-') for i in range(node.childNodes.length): s += self._write_raphael_node(node.childNodes[i], node_id, transform) return s def to_raphael(self): svg = self.G.draw(format=execv_encode('svg'), prog=execv_encode('dot')) dom = xml.dom.minidom.parseString(svg) s = 'AuthGraph.prototype.draw = function () {\n' s += '\tvar el, paperScale;\n' s += '\tvar node_info = %s;\n' % json.dumps(self.node_info) s += self._write_raphael_node(dom.documentElement, None, 's\'+this.imageScale+\',\'+this.imageScale+\',0,0') s += '\tpaper.setViewBox(0, 0, imageWidth, imageHeight);\n' s += '}\n' return codecs.encode(s, 'utf-8') def draw(self, format, path=None): if format == 'js': img = self.to_raphael() if path is None: return img else: io.open(path, 'w', encoding='utf-8').write(img) else: if path is None: return self.G.draw(format=execv_encode(format), prog=execv_encode('dot')) else: return self.G.draw(path=execv_encode(path), format=execv_encode(format), prog=execv_encode('dot')) def id_for_dnskey(self, name, dnskey): # dnspython 1/2 compatibility f = io.BytesIO() dnskey.to_wire(f) wire = f.getvalue() try: return self.dnskey_ids[(name,wire)] except KeyError: self.dnskey_ids[(name,wire)] = self.next_dnskey_id self.next_dnskey_id += 1 return self.dnskey_ids[(name,wire)] def id_for_ds(self, name, ds): try: return self.ds_ids[(name,ds)] except KeyError: self.ds_ids[(name,ds)] = self.next_ds_id self.next_ds_id += 1 return self.ds_ids[(name,ds)] def id_for_multiple_ds(self, name, ds): id_list = [] for d in ds: id_list.append(self.id_for_ds(name, d)) id_list.sort() return '_'.join(map(str, id_list)) def id_for_nsec(self, name, rdtype, cls, nsec_set_info): try: nsec_set_info_list = self.nsec_ids[(name,rdtype,cls)] except KeyError: self.nsec_ids[(name,rdtype,cls)] = [] nsec_set_info_list = self.nsec_ids[(name,rdtype,cls)] for nsec_set_info1, id in nsec_set_info_list: if nsec_set_info == nsec_set_info1: return id id = self.next_nsec_id self.nsec_ids[(name,rdtype,cls)].append((nsec_set_info, id)) self.next_nsec_id += 1 return id def dnskey_node_str(self, id, name, algorithm, key_tag): return 'DNSKEY-%s|%s|%d|%d' % (id, fmt.humanize_name(name), algorithm, key_tag) def has_dnskey(self, id, name, algorithm, key_tag): return self.G.has_node(self.dnskey_node_str(id, name, algorithm, key_tag)) def get_dnskey(self, id, name, algorithm, key_tag): return self.G.get_node(self.dnskey_node_str(id, name, algorithm, key_tag)) def add_dnskey(self, name_obj, dnskey): zone_obj = name_obj.zone node_str = self.dnskey_node_str(self.id_for_dnskey(name_obj.name, dnskey.rdata), name_obj.name, dnskey.rdata.algorithm, dnskey.key_tag) if not self.G.has_node(node_str): rrset_info_with_errors = [x for x in dnskey.rrset_info if name_obj.rrset_errors[x]] rrset_info_with_warnings = [x for x in dnskey.rrset_info if name_obj.rrset_warnings[x]] img_str = '' if dnskey.errors or rrset_info_with_errors: img_str = '' % ERROR_ICON elif dnskey.warnings or rrset_info_with_warnings: img_str = '' % WARNING_ICON if img_str: label_str = '<
DNSKEY%s
alg=%d, id=%d
%d bits
>' % \ (12, 'Helvetica', img_str, 10, dnskey.rdata.algorithm, dnskey.key_tag, dnskey.key_len) else: label_str = '<DNSKEY
alg=%d, id=%d
%d bits
>' % \ (12, 'Helvetica', 10, dnskey.rdata.algorithm, dnskey.key_tag, dnskey.key_len) attr = {'style': 'filled', 'fillcolor': '#ffffff' } if dnskey.rdata.flags & fmt.DNSKEY_FLAGS['SEP']: attr['fillcolor'] = 'lightgray' if dnskey.rdata.flags & fmt.DNSKEY_FLAGS['revoke']: attr['penwidth'] = '4.0' S, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone_obj.name) S.add_node(node_str, id=node_str, shape='ellipse', label=label_str, **attr) self.node_subgraph_name[node_str] = zone_top_name consolidate_clients = name_obj.single_client() dnskey_serialized = dnskey.serialize(consolidate_clients=consolidate_clients, html_format=True, map_ip_to_ns_name=name_obj.zone.get_ns_name_for_ip) all_warnings = [] if rrset_info_with_warnings: for rrset_info in rrset_info_with_warnings: for warning in name_obj.rrset_warnings[rrset_info]: servers_clients = warning.servers_clients warning = Errors.DomainNameAnalysisError.insert_into_list(warning.copy(), all_warnings, None, None, None) warning.servers_clients.update(servers_clients) if 'warnings' not in dnskey_serialized: dnskey_serialized['warnings'] = [] dnskey_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in all_warnings] all_errors = [] if rrset_info_with_errors: for rrset_info in rrset_info_with_errors: for error in name_obj.rrset_errors[rrset_info]: servers_clients = error.servers_clients error = Errors.DomainNameAnalysisError.insert_into_list(error.copy(), all_errors, None, None, None) error.servers_clients.update(servers_clients) if 'errors' not in dnskey_serialized: dnskey_serialized['errors'] = [] dnskey_serialized['errors'] += [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in all_errors] self.node_info[node_str] = [dnskey_serialized] if node_str not in self.node_mapping: self.node_mapping[node_str] = set() self.node_mapping[node_str].add(dnskey) self.node_reverse_mapping[dnskey] = node_str return self.G.get_node(node_str) def add_dnskey_non_existent(self, name, zone, algorithm, key_tag): # We just use 99999 as a "high number", so it doesn't collide with the # lower numbers assigned to real DNSKEYs node_str = self.dnskey_node_str(99999, name, algorithm, key_tag) if not self.G.has_node(node_str): label_str = '<DNSKEY
alg=%d, id=%d>' % \ (12, 'Helvetica', 10, algorithm, key_tag) attr = {'style': 'filled,dashed', 'color': COLORS['insecure_non_existent'], 'fillcolor': '#ffffff' } S, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone) S.add_node(node_str, id=node_str, shape='ellipse', label=label_str, **attr) self.node_subgraph_name[node_str] = zone_top_name dnskey_meta = DNSKEYNonExistent(name, algorithm, key_tag) self.node_info[node_str] = [dnskey_meta.serialize()] self.node_mapping[node_str] = set() return self.G.get_node(node_str) def ds_node_str(self, id, name, ds, rdtype): digest_types = [d.digest_type for d in ds] digest_types.sort() digest_str = '_'.join(map(str, digest_types)) return '%s-%s|%s|%d|%d|%s' % (dns.rdatatype.to_text(rdtype), id, fmt.humanize_name(name), ds[0].algorithm, ds[0].key_tag, digest_str) def has_ds(self, id, name, ds, rdtype): return self.G.has_node(self.ds_node_str(id, name, ds, rdtype)) def get_ds(self, id, name, ds, rdtype): return self.G.get_node(self.ds_node_str(id, name, ds, rdtype)) def add_ds(self, name, ds_statuses, zone_obj, parent_obj): ds_info = ds_statuses[0].ds_meta ds = [d.ds for d in ds_statuses] rdtype = ds_info.rrset.rdtype node_str = self.ds_node_str(self.id_for_multiple_ds(name, ds), name, ds, rdtype) if not self.G.has_node(node_str): digest_types = [d.digest_type for d in ds] digest_types.sort() digest_str = ','.join(map(str, digest_types)) if len(digest_types) != 1: plural = 's' else: plural = '' img_str = '' if [x for x in ds_statuses if [y for y in x.errors if isinstance(y, Errors.DSError)]] or zone_obj.rrset_errors[ds_info]: img_str = '' % ERROR_ICON elif [x for x in ds_statuses if [y for y in x.warnings if isinstance(y, Errors.DSError)]] or zone_obj.rrset_warnings[ds_info]: img_str = '' % WARNING_ICON attr = {'style': 'filled', 'fillcolor': '#ffffff' } if img_str: label_str = '<
%s%s
digest alg%s=%s
>' % \ (12, 'Helvetica', dns.rdatatype.to_text(rdtype), img_str, 10, plural, digest_str) else: label_str = '<%s
digest alg%s=%s>' % \ (12, 'Helvetica', dns.rdatatype.to_text(rdtype), 10, plural, digest_str) S, parent_node_str, parent_bottom_name, parent_top_name = self.get_zone(parent_obj.name) S.add_node(node_str, id=node_str, shape='ellipse', label=label_str, **attr) self.node_subgraph_name[node_str] = parent_top_name consolidate_clients = zone_obj.single_client() ds_serialized = [d.serialize(consolidate_clients=consolidate_clients, html_format=True, map_ip_to_ns_name=zone_obj.get_ns_name_for_ip) for d in ds_statuses] digest_algs = [] digests = [] for d in ds_serialized: digest_algs.append(d['digest_type']) digests.append(d['digest']) digest_algs.sort() digests.sort() consolidated_ds_serialized = ds_serialized[0] consolidated_ds_serialized['digest_type'] = digest_algs consolidated_ds_serialized['digest'] = digests if zone_obj.rrset_warnings[ds_info]: if 'warnings' not in consolidated_ds_serialized: consolidated_ds_serialized['warnings'] = [] consolidated_ds_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in zone_obj.rrset_warnings[ds_info]] if zone_obj.rrset_errors[ds_info]: if 'errors' not in consolidated_ds_serialized: consolidated_ds_serialized['errors'] = [] consolidated_ds_serialized['errors'] += [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in zone_obj.rrset_errors[ds_info]] self.node_info[node_str] = [consolidated_ds_serialized] T, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone_obj.name) self.add_ds_map(name, node_str, ds_statuses, zone_obj, parent_obj) if node_str not in self.node_mapping: self.node_mapping[node_str] = set() self.node_mapping[node_str].add(ds_info) self.node_reverse_mapping[ds_info] = node_str return self.G.get_node(node_str) def add_ds_map(self, name, ds_node, ds_statuses, zone_obj, parent_obj): rdtype = ds_statuses[0].ds_meta.rrset.rdtype ds_status = ds_statuses[0] if ds_status.validation_status == Status.DS_STATUS_VALID: line_color = COLORS['secure'] line_style = 'solid' elif ds_status.validation_status in (Status.DS_STATUS_INDETERMINATE_NO_DNSKEY, Status.DS_STATUS_INDETERMINATE_MATCH_PRE_REVOKE, Status.DS_STATUS_ALGORITHM_IGNORED): line_color = COLORS['insecure_non_existent'] line_style = 'dashed' elif ds_status.validation_status == Status.DS_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM: line_color = COLORS['indeterminate'] line_style = 'solid' elif ds_status.validation_status == Status.DS_STATUS_INVALID_DIGEST: line_color = COLORS['invalid'] line_style = 'solid' elif ds_status.validation_status == Status.DS_STATUS_INVALID: line_color = COLORS['invalid'] line_style = 'dashed' if ds_status.dnskey is None: dnskey_node = self.add_dnskey_non_existent(zone_obj.name, zone_obj.name, ds_status.ds.algorithm, ds_status.ds.key_tag) else: dnskey_node = self.get_dnskey(self.id_for_dnskey(zone_obj.name, ds_status.dnskey.rdata), zone_obj.name, ds_status.dnskey.rdata.algorithm, ds_status.dnskey.key_tag) edge_id = 'digest-%s|%s|%s|%s' % (dnskey_node, ds_node, line_color.lstrip('#'), line_style) self.G.add_edge(dnskey_node, ds_node, id=edge_id, color=line_color, style=line_style, dir='back') self.node_info[edge_id] = [self.node_info[ds_node][0].copy()] self.node_info[edge_id][0]['description'] = 'Digest for %s' % (self.node_info[edge_id][0]['description']) self.node_mapping[edge_id] = set(ds_statuses) for d in ds_statuses: self.node_reverse_mapping[d] = edge_id def zone_node_str(self, name): return 'cluster_%s' % fmt.humanize_name(name) def has_zone(self, name): return self.G.get_subgraph(self.zone_node_str(name)) is not None def get_zone(self, name): node_str = self.zone_node_str(name) top_name = node_str + '_top' bottom_name = node_str + '_bottom' S = self.G.get_subgraph(node_str) return S, node_str, bottom_name, top_name def add_zone(self, zone_obj): node_str = self.zone_node_str(zone_obj.name) top_name = node_str + '_top' bottom_name = node_str + '_bottom' S = self.G.get_subgraph(node_str) if S is None: img_str = '' if zone_obj.zone_errors: img_str = '' % ERROR_ICON elif zone_obj.zone_warnings: img_str = '' % WARNING_ICON if zone_obj.analysis_end is not None: label_str = '<
%s%s
(%s)
>' % \ (12, zone_obj, img_str, 10, fmt.datetime_to_str(zone_obj.analysis_end)) else: label_str = '<
%s%s
>' % \ (12, zone_obj, img_str) S = self.G.add_subgraph(name=node_str, label=label_str, labeljust='l', penwidth='0.5', id=top_name) S.add_node(top_name, shape='point', style='invis') S.add_node(bottom_name, shape='point', style='invis') self.node_subgraph_name[top_name] = top_name self.node_subgraph_name[bottom_name] = top_name self.node_reverse_mapping[zone_obj] = top_name consolidate_clients = zone_obj.single_client() zone_serialized = OrderedDict() zone_serialized['description'] = '%s zone' % (zone_obj) if zone_obj.zone_errors: zone_serialized['errors'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in zone_obj.zone_errors] if zone_obj.zone_warnings: zone_serialized['warnings'] = [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in zone_obj.zone_warnings] self.node_info[top_name] = [zone_serialized] return S, node_str, bottom_name, top_name def add_rrsig(self, rrsig_status, name_obj, signer_obj, signed_node, port=None): if signer_obj is not None: zone_name = signer_obj.zone.name else: zone_name = name_obj.zone.name if rrsig_status.dnskey is None: dnskey_node = self.add_dnskey_non_existent(rrsig_status.rrsig.signer, zone_name, rrsig_status.rrsig.algorithm, rrsig_status.rrsig.key_tag) else: dnskey_node = self.get_dnskey(self.id_for_dnskey(rrsig_status.rrsig.signer, rrsig_status.dnskey.rdata), rrsig_status.rrsig.signer, rrsig_status.dnskey.rdata.algorithm, rrsig_status.dnskey.key_tag) #XXX consider not adding icons if errors are apparent from color of line edge_label = '' if rrsig_status.errors: edge_label = '<
>' % ERROR_ICON elif rrsig_status.warnings: edge_label = '<
>' % WARNING_ICON if rrsig_status.validation_status == Status.RRSIG_STATUS_VALID: line_color = COLORS['secure'] line_style = 'solid' elif rrsig_status.validation_status in (Status.RRSIG_STATUS_INDETERMINATE_NO_DNSKEY, Status.RRSIG_STATUS_INDETERMINATE_MATCH_PRE_REVOKE, Status.RRSIG_STATUS_ALGORITHM_IGNORED): line_color = COLORS['insecure_non_existent'] line_style = 'dashed' elif rrsig_status.validation_status == Status.RRSIG_STATUS_INDETERMINATE_UNKNOWN_ALGORITHM: line_color = COLORS['indeterminate'] line_style = 'solid' elif rrsig_status.validation_status == Status.RRSIG_STATUS_EXPIRED: line_color = COLORS['expired'] line_style = 'solid' elif rrsig_status.validation_status == Status.RRSIG_STATUS_PREMATURE: line_color = COLORS['expired'] line_style = 'solid' elif rrsig_status.validation_status == Status.RRSIG_STATUS_INVALID_SIG: line_color = COLORS['invalid'] line_style = 'solid' elif rrsig_status.validation_status == Status.RRSIG_STATUS_INVALID: line_color = COLORS['invalid'] line_style = 'dashed' attrs = {} edge_id = 'RRSIG-%s|%s|%s|%s' % (signed_node.replace('*', '_'), dnskey_node, line_color.lstrip('#'), line_style) edge_key = '%s-%s' % (line_color, line_style) if port is not None: attrs['tailport'] = port edge_id += '|%s' % port.replace('*', '_') edge_key += '|%s' % port # if this DNSKEY is signing data in a zone above itself (e.g., DS # records), then remove constraint from the edge signed_node_zone = self.node_subgraph_name[signed_node][8:-4] dnskey_node_zone = self.node_subgraph_name[dnskey_node][8:-4] if not signed_node_zone.endswith(dnskey_node_zone): attrs['constraint'] = 'false' if (signed_node, dnskey_node, edge_key) not in self._edge_keys: self._edge_keys.add((signed_node, dnskey_node, edge_key)) self.G.add_edge(signed_node, dnskey_node, label=edge_label, id=edge_id, color=line_color, style=line_style, dir='back', **attrs) consolidate_clients = name_obj.single_client() rrsig_serialized = rrsig_status.serialize(consolidate_clients=consolidate_clients, html_format=True, map_ip_to_ns_name=name_obj.zone.get_ns_name_for_ip) if edge_id not in self.node_info: self.node_info[edge_id] = [] self.node_mapping[edge_id] = set() self.node_info[edge_id].append(rrsig_serialized) self.node_mapping[edge_id].add(rrsig_status) self.node_reverse_mapping[rrsig_status] = edge_id def id_for_rrset(self, rrset_info): name, rdtype = rrset_info.rrset.name, rrset_info.rrset.rdtype try: rrset_info_list = self.rrset_ids[(name,rdtype)] except KeyError: self.rrset_ids[(name,rdtype)] = [] rrset_info_list = self.rrset_ids[(name,rdtype)] for rrset_info1, id in rrset_info_list: if rrset_info == rrset_info1: return id id = self.next_rrset_id self.rrset_ids[(name,rdtype)].append((rrset_info, id)) self.next_rrset_id += 1 return id def rrset_node_str(self, name, rdtype, id): return 'RRset-%d|%s|%s' % (id, fmt.humanize_name(name), dns.rdatatype.to_text(rdtype)) def has_rrset(self, name, rdtype, id): return self.G.has_node(self.rrset_node_str(name, rdtype, id)) def get_rrset(self, name, rdtype, id): return self.G.get_node(self.rrset_node_str(name, rdtype, id)) def add_rrset(self, rrset_info, wildcard_name, name_obj, zone_obj): name = wildcard_name or rrset_info.rrset.name node_str = self.rrset_node_str(name, rrset_info.rrset.rdtype, self.id_for_rrset(rrset_info)) node_id = node_str.replace('*', '_') if not self.G.has_node(node_str): img_str = '' if name_obj.rrset_errors[rrset_info]: img_str = '' % ERROR_ICON elif name_obj.rrset_warnings[rrset_info]: img_str = '' % WARNING_ICON if img_str: node_label = '<
%s/%s
%s
>' % \ (12, 'Helvetica', fmt.humanize_name(name, True), dns.rdatatype.to_text(rrset_info.rrset.rdtype), img_str) else: node_label = '<%s/%s>' % \ (12, 'Helvetica', fmt.humanize_name(name, True), dns.rdatatype.to_text(rrset_info.rrset.rdtype)) attr = {} attr['shape'] = 'rectangle' attr['style'] = 'rounded,filled' attr['fillcolor'] = '#ffffff' S, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone_obj.name) S.add_node(node_str, id=node_id, label=node_label, fontsize='10', **attr) self.node_subgraph_name[node_str] = zone_top_name consolidate_clients = name_obj.single_client() rrset_serialized = rrset_info.serialize(consolidate_clients=consolidate_clients, html_format=True, map_ip_to_ns_name=name_obj.zone.get_ns_name_for_ip) if name_obj.rrset_warnings[rrset_info]: if 'warnings' not in rrset_serialized: rrset_serialized['warnings'] = [] rrset_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in name_obj.rrset_warnings[rrset_info]] if name_obj.rrset_errors[rrset_info]: if 'errors' not in rrset_serialized: rrset_serialized['errors'] = [] rrset_serialized['errors'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in name_obj.rrset_errors[rrset_info]] self.node_info[node_id] = [rrset_serialized] self.G.add_edge(zone_bottom_name, node_str, style='invis') if node_str not in self.node_mapping: self.node_mapping[node_str] = set() self.node_mapping[node_str].add(rrset_info) self.node_reverse_mapping[rrset_info] = node_str return self.G.get_node(node_str) def add_rrset_non_existent(self, name_obj, zone_obj, neg_response_info, nxdomain, wildcard): if nxdomain: node_str = self.rrset_node_str(neg_response_info.qname, neg_response_info.rdtype, 0) else: node_str = self.rrset_node_str(neg_response_info.qname, neg_response_info.rdtype, 1) node_id = node_str.replace('*', '_') if not self.G.has_node(node_str): if wildcard: warnings_list = errors_list = [] else: if nxdomain: warnings_list = name_obj.nxdomain_warnings[neg_response_info] errors_list = name_obj.nxdomain_errors[neg_response_info] else: warnings_list = name_obj.nodata_warnings[neg_response_info] errors_list = name_obj.nodata_errors[neg_response_info] if nxdomain: rdtype_str = '' else: rdtype_str = '/%s' % dns.rdatatype.to_text(neg_response_info.rdtype) img_str = '' if errors_list: img_str = '' % ERROR_ICON elif warnings_list: img_str = '' % WARNING_ICON if img_str: node_label = '<
%s%s
%s
>' % \ (12, 'Helvetica', fmt.humanize_name(neg_response_info.qname, True), rdtype_str, img_str) else: node_label = '<%s%s>' % \ (12, 'Helvetica', fmt.humanize_name(neg_response_info.qname, True), rdtype_str) attr = {} attr['shape'] = 'rectangle' attr['style'] = 'rounded,filled,dashed' if nxdomain: attr['style'] += ',diagonals' attr['fillcolor'] = '#ffffff' S, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone_obj.name) S.add_node(node_str, id=node_id, label=node_label, fontsize='10', **attr) self.node_subgraph_name[node_str] = zone_top_name rrset_info = RRsetNonExistent(neg_response_info.qname, neg_response_info.rdtype, nxdomain, neg_response_info.servers_clients) consolidate_clients = name_obj.single_client() rrset_serialized = rrset_info.serialize(consolidate_clients=consolidate_clients, html_format=True, map_ip_to_ns_name=name_obj.zone.get_ns_name_for_ip) if warnings_list: if 'warnings' not in rrset_serialized: rrset_serialized['warnings'] = [] rrset_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in warnings_list] if errors_list: if 'errors' not in rrset_serialized: rrset_serialized['errors'] = [] rrset_serialized['errors'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in errors_list] self.node_info[node_id] = [rrset_serialized] self.G.add_edge(zone_bottom_name, node_str, style='invis') if node_str not in self.node_mapping: self.node_mapping[node_str] = set() self.node_mapping[node_str].add(neg_response_info) self.node_reverse_mapping[neg_response_info] = node_str return self.G.get_node(node_str) def _add_errors(self, name_obj, zone_obj, name, rdtype, errors_list, code, icon, category, status, description): if not errors_list: return None node_str = self.rrset_node_str(name, rdtype, code) img_str = '' % icon node_label = '<
%s
%s/%s
>' % \ (img_str, 10, 'Helvetica', '#b0b0b0', fmt.humanize_name(name, True), dns.rdatatype.to_text(rdtype), ) attr = {} attr['shape'] = 'none' attr['margin'] = '0' node_id = node_str.replace('*', '_') S, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone_obj.name) S.add_node(node_str, id=node_id, label=node_label, fontsize='10', **attr) self.node_subgraph_name[node_str] = zone_top_name consolidate_clients = name_obj.single_client() errors_serialized = OrderedDict() errors_serialized['description'] = '%s %s/%s' % (description, fmt.humanize_name(name), dns.rdatatype.to_text(rdtype)) errors_serialized[category] = [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in errors_list] errors_serialized['status'] = status self.node_info[node_id] = [errors_serialized] self.G.add_edge(zone_bottom_name, node_str, style='invis') # no need to map errors self.node_mapping[node_str] = set() return self.G.get_node(node_str) def add_errors(self, name_obj, zone_obj, name, rdtype, errors_list): return self._add_errors(name_obj, zone_obj, name, rdtype, errors_list, 2, ERROR_ICON, 'errors', 'ERROR', 'Response errors for') def add_warnings(self, name_obj, zone_obj, name, rdtype, warnings_list): return self._add_errors(name_obj, zone_obj, name, rdtype, warnings_list, 3, WARNING_ICON, 'warnings', 'WARNING', 'Response warnings for') def add_dname(self, dname_status, name_obj, zone_obj): dname_rrset_info = dname_status.synthesized_cname.dname_info dname_node = self.add_rrset(dname_rrset_info, None, name_obj, zone_obj) if dname_status.validation_status == Status.DNAME_STATUS_VALID: line_color = COLORS['secure'] line_style = 'solid' elif dname_status.validation_status == Status.DNAME_STATUS_INDETERMINATE: line_color = COLORS['indeterminate'] line_style = 'solid' elif dname_status.validation_status == Status.DNAME_STATUS_INVALID: line_color = COLORS['invalid'] line_style = 'solid' if dname_status.included_cname is None: cname_node = self.add_rrset_non_existent(name_obj, zone_obj, Response.NegativeResponseInfo(dname_status.synthesized_cname.rrset.name, dns.rdatatype.CNAME, False), False, False) else: cname_node = self.add_rrset(dname_status.included_cname, None, name_obj, zone_obj) edge_id = 'dname-%s|%s|%s|%s' % (cname_node, dname_node, line_color.lstrip('#'), line_style) edge_key = '%s-%s' % (line_color, line_style) if (cname_node, dname_node, edge_key) not in self._edge_keys: self._edge_keys.add((cname_node, dname_node, edge_key)) edge_label = '' if dname_status.errors: edge_label = '<
>' % ERROR_ICON elif dname_status.warnings: edge_label = '<
>' % WARNING_ICON self.G.add_edge(cname_node, dname_node, label=edge_label, id=edge_id, color=line_color, style=line_style, dir='back') self.node_info[edge_id] = [dname_status.serialize(html_format=True, map_ip_to_ns_name=name_obj.zone.get_ns_name_for_ip)] if edge_id not in self.node_mapping: self.node_mapping[edge_id] = set() self.node_mapping[edge_id].add(dname_status) self.node_reverse_mapping[dname_status] = edge_id self.add_rrsigs(name_obj, zone_obj, dname_rrset_info, dname_node) return cname_node def nsec_node_str(self, nsec_rdtype, id, name, rdtype): return '%s-%d|%s|%s' % (dns.rdatatype.to_text(nsec_rdtype), id, fmt.humanize_name(name), dns.rdatatype.to_text(rdtype)) def has_nsec(self, nsec_rdtype, id, name, rdtype): return self.G.has_node(self.nsec_node_str(nsec_rdtype, id, name, rdtype)) def get_nsec(self, nsec_rdtype, id, name, rdtype): return self.G.get_node(self.nsec_node_str(nsec_rdtype, id, name, rdtype)) def add_nsec(self, nsec_status, name, rdtype, name_obj, zone_obj, covered_node): if nsec_status.nsec_set_info.use_nsec3: nsec_rdtype = dns.rdatatype.NSEC3 else: nsec_rdtype = dns.rdatatype.NSEC node_str = self.nsec_node_str(nsec_rdtype, self.id_for_nsec(name, rdtype, nsec_status.__class__, nsec_status.nsec_set_info), name, rdtype) node_id = node_str.replace('*', '_') edge_id = '%sC-%s|%s' % (dns.rdatatype.to_text(nsec_rdtype), covered_node.replace('*', '_'), node_str) if not self.G.has_node(node_str): rrset_info_with_errors = [x for x in nsec_status.nsec_set_info.rrsets.values() if name_obj.rrset_errors[x]] rrset_info_with_warnings = [x for x in nsec_status.nsec_set_info.rrsets.values() if name_obj.rrset_warnings[x]] img_str = '' if nsec_status.errors or rrset_info_with_errors: img_str = '' % ERROR_ICON elif nsec_status.warnings or rrset_info_with_warnings: img_str = '' % WARNING_ICON # if it is NXDOMAIN, not type DS if isinstance(nsec_status, (Status.NSEC3StatusNXDOMAIN, Status.NSEC3StatusNODATA)) and nsec_status.opt_out: bgcolor = 'lightgray' else: bgcolor = '#ffffff' #XXX it looks better when cellspacing is 0, but we can't do that # when there is an icon in use because of the way the graphviz # library draws it. if img_str: cellspacing = 0 else: cellspacing = -2 self.nsec_rr_status[node_str] = {} label_str = '<' % (cellspacing, bgcolor) for nsec_name in nsec_status.nsec_set_info.rrsets: nsec_name = lb2s(nsec_name.canonicalize().to_text()).replace(r'"', r'\"') self.nsec_rr_status[node_str][nsec_name] = '' label_str += '' % (nsec_name, 6) label_str += '
' % len(nsec_status.nsec_set_info.rrsets) if img_str: label_str += '
%s%s
' % \ (12, 'Helvetica', dns.rdatatype.to_text(nsec_rdtype), img_str) else: label_str += '%s' % \ (12, 'Helvetica', dns.rdatatype.to_text(nsec_rdtype)) label_str += '
>' S, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone_obj.name) S.add_node(node_str, id=node_id, label=label_str, shape='none') self.node_subgraph_name[node_str] = zone_top_name consolidate_clients = name_obj.single_client() nsec_serialized = nsec_status.serialize(consolidate_clients=consolidate_clients, html_format=True, map_ip_to_ns_name=name_obj.zone.get_ns_name_for_ip) nsec_serialized_edge = nsec_serialized.copy() nsec_serialized_edge['description'] = 'Non-existence proof provided by %s' % (nsec_serialized['description']) all_warnings = [] if rrset_info_with_warnings: for rrset_info in rrset_info_with_warnings: for warning in name_obj.rrset_warnings[rrset_info]: servers_clients = warning.servers_clients warning = Errors.DomainNameAnalysisError.insert_into_list(warning.copy(), all_warnings, None, None, None) warning.servers_clients.update(servers_clients) if 'warnings' not in nsec_serialized: nsec_serialized['warnings'] = [] nsec_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in all_warnings] all_errors = [] if rrset_info_with_errors: for rrset_info in rrset_info_with_errors: for error in name_obj.rrset_errors[rrset_info]: servers_clients = error.servers_clients error = Errors.DomainNameAnalysisError.insert_into_list(error.copy(), all_errors, None, None, None) error.servers_clients.update(servers_clients) if 'errors' not in nsec_serialized: nsec_serialized['errors'] = [] nsec_serialized['errors'] += [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in all_errors] self.node_info[node_id] = [nsec_serialized] nsec_node = self.G.get_node(node_str) if nsec_status.validation_status == Status.NSEC_STATUS_VALID: line_color = COLORS['secure'] line_style = 'solid' elif nsec_status.validation_status == Status.NSEC_STATUS_INDETERMINATE: line_color = COLORS['indeterminate'] line_style = 'solid' elif nsec_status.validation_status == Status.NSEC_STATUS_INVALID: line_color = COLORS['bogus'] line_style = 'solid' edge_label = '' self.G.add_edge(covered_node, nsec_node, label=edge_label, id=edge_id, color=line_color, style=line_style, dir='back') self.node_info[edge_id] = [nsec_serialized_edge] else: nsec_node = self.G.get_node(node_str) if node_str not in self.node_mapping: self.node_mapping[node_str] = set() self.node_mapping[node_str].add(nsec_status.nsec_set_info) self.node_reverse_mapping[nsec_status.nsec_set_info] = node_str if edge_id not in self.node_mapping: self.node_mapping[edge_id] = set() self.node_mapping[edge_id].add(nsec_status) self.node_reverse_mapping[nsec_status] = edge_id return nsec_node def add_wildcard(self, name_obj, zone_obj, rrset_info, nsec_status, wildcard_name): wildcard_node = self.add_rrset(rrset_info, wildcard_name, name_obj, zone_obj) self.add_rrsigs(name_obj, zone_obj, rrset_info, wildcard_node) nxdomain_node = self.add_rrset_non_existent(name_obj, zone_obj, rrset_info.wildcard_info[wildcard_name], True, True) if nsec_status is not None: nsec_node = self.add_nsec(nsec_status, rrset_info.rrset.name, rrset_info.rrset.rdtype, name_obj, zone_obj, nxdomain_node) for nsec_name, rrset_info in nsec_status.nsec_set_info.rrsets.items(): nsec_cell = lb2s(nsec_name.canonicalize().to_text()) self.add_rrsigs(name_obj, zone_obj, rrset_info, nsec_node, port=nsec_cell) return wildcard_node #XXX consider adding this node (using, e.g., clustering) #rrset_node = self.add_rrset(rrset_info, None, zone_obj, zone_obj) #self.G.add_edge(rrset_node, nxdomain_node, color=COLORS['secure'], style='invis', dir='back') #self.G.add_edge(rrset_node, wildcard_node, color=COLORS['secure'], style='invis', dir='back') #return rrset_node def add_cds(self, zone_obj, rrset_info, my_nodes): for rdata in rrset_info.rrset: nodes_to_link_to = set() nodes_to_link_to.add(self._dnskey_node_for_cds(zone_obj, rdata)) for my_node in my_nodes: for target_node in nodes_to_link_to: if not self.G.has_edge(target_node, my_node): self.G.add_edge(target_node, my_node, color=COLORS['CDS'], dir='back', constraint='false') def add_cdnskey(self, zone_obj, rrset_info, my_nodes): for rdata in rrset_info.rrset: # create a "DNSKEY" from CDNSKEY cdnskey = Response.DNSKEYMeta(rrset_info.rrset.name, rdata, rrset_info.rrset.ttl) dnskey_node = self._dnskey_node_for_cdnskey(zone_obj, cdnskey) for my_node in my_nodes: if not self.G.has_edge(dnskey_node, my_node): self.G.add_edge(dnskey_node, my_node, color=COLORS['CDNSKEY'], dir='back', constraint='false') def _dnskey_node_for_cdnskey(self, zone_obj, cdnskey): try: return self.get_dnskey(self.id_for_dnskey(zone_obj.name, cdnskey.rdata), zone_obj.name, cdnskey.rdata.algorithm, cdnskey.key_tag) except KeyError: return self.add_dnskey_non_existent(zone_obj.name, zone_obj.name, cdnskey.rdata.algorithm, cdnskey.key_tag) def _dnskey_node_for_cds(self, zone_obj, cds): dnskeys = zone_obj.get_dnskeys() dnskey_node = None for dnskey in dnskeys: if cds.algorithm == dnskey.rdata.algorithm and cds.key_tag == dnskey.key_tag: dnskey_msg = dnskey.message_for_ds() status = crypto.validate_ds_digest(cds.digest_type, cds.digest, dnskey_msg) dnskey_node = self.get_dnskey(self.id_for_dnskey(zone_obj.name, dnskey.rdata), zone_obj.name, dnskey.rdata.algorithm, dnskey.key_tag) if status: return dnskey_node # if we could not get an exact match, we use what was closest (i.e., # algorithm and key tag matched). if dnskey_node is not None: return dnskey_node return self.add_dnskey_non_existent(zone_obj.name, zone_obj.name, cds.algorithm, cds.key_tag) def add_alias(self, alias, target): if not [x for x in self.G.out_edges(target) if x[1] == alias and x.attr['color'] == 'black']: alias_zone = self.node_subgraph_name[alias][8:-4] target_zone = self.node_subgraph_name[target][8:-4] if alias_zone.endswith(target_zone) and alias_zone != target_zone: self.G.add_edge(target, alias, color=COLORS['alias'], dir='back', constraint='false') else: self.G.add_edge(target, alias, color=COLORS['alias'], dir='back') def add_rrsigs(self, name_obj, zone_obj, rrset_info, signed_node, port=None): for rrsig in name_obj.rrsig_status[rrset_info]: signer_obj = name_obj.get_name(rrsig.signer) if rrsig.signer != zone_obj.name and signer_obj is not None: self.graph_zone_auth(signer_obj, False) for dnskey in name_obj.rrsig_status[rrset_info][rrsig]: rrsig_status = name_obj.rrsig_status[rrset_info][rrsig][dnskey] self.add_rrsig(rrsig_status, name_obj, signer_obj, signed_node, port=port) def graph_rrset_auth(self, name_obj, name, rdtype, trace=None): if (name, rdtype) not in self.processed_rrsets: self.processed_rrsets[(name, rdtype)] = [] #XXX there are reasons for this (e.g., NXDOMAIN, after which no further # queries are made), but it would be good to have a sanity check, so # we don't simply produce an incomplete graph. (In the case above, perhaps # point to the NXDOMAIN produced by another query.) if (name, rdtype) not in name_obj.queries: return [] zone_obj = name_obj.zone if zone_obj is not None: self.graph_zone_auth(zone_obj, False) else: # in recursive analysis, if we don't contact any servers that are # valid and responsive, then we get a zone_obj that is None # (because we couldn't detect any NS records in the ancestry) zone_obj = name_obj self.add_zone(zone_obj) if name_obj.nxdomain_ancestor is not None: self.graph_rrset_auth(name_obj.nxdomain_ancestor, name_obj.nxdomain_ancestor.name, name_obj.nxdomain_ancestor.referral_rdtype) # if this is for DNSKEY or DS of a zone, then return, as we have # already take care of these types in graph_zone_auth() if name_obj.is_zone() and rdtype in (dns.rdatatype.DNSKEY, dns.rdatatype.DS): return [] # trace is used just for CNAME chains if trace is None: trace = [name] cname_nodes = [] # if this name is an alias, then graph its target, i.e., the canonical # name, unless this is a recursive analysis. if name_obj.analysis_type != ANALYSIS_TYPE_RECURSIVE: if name in name_obj.cname_targets: for target, cname_obj in name_obj.cname_targets[name].items(): if cname_obj is not None: if target not in trace: cname_nodes.extend(self.graph_rrset_auth(cname_obj, target, rdtype, trace + [target])) query = name_obj.queries[(name, rdtype)] node_to_cname_mapping = set() for rrset_info in query.answer_info: # only do qname, unless analysis type is recursive if not (rrset_info.rrset.name == name or name_obj.analysis_type == ANALYSIS_TYPE_RECURSIVE): continue my_name = rrset_info.rrset.name my_nodes = [] if (my_name, rdtype) not in self.processed_rrsets: self.processed_rrsets[(my_name, rdtype)] = [] my_name_obj = name_obj.get_name(my_name) my_zone_obj = my_name_obj.zone if my_zone_obj is not None: self.graph_zone_auth(my_zone_obj, False) else: my_zone_obj = my_name_obj self.add_zone(my_zone_obj) #XXX can we combine multiple DNAMEs into one? #XXX can we combine multiple NSEC(3) into a cluster? #XXX can we combine wildcard components into a cluster? if rrset_info in name_obj.dname_status: for dname_status in name_obj.dname_status[rrset_info]: my_nodes.append(self.add_dname(dname_status, name_obj, my_zone_obj)) elif rrset_info.wildcard_info: for wildcard_name in rrset_info.wildcard_info: if name_obj.wildcard_status[rrset_info.wildcard_info[wildcard_name]]: for nsec_status in name_obj.wildcard_status[rrset_info.wildcard_info[wildcard_name]]: my_nodes.append(self.add_wildcard(name_obj, my_zone_obj, rrset_info, nsec_status, wildcard_name)) else: my_nodes.append(self.add_wildcard(name_obj, my_zone_obj, rrset_info, None, wildcard_name)) else: rrset_node = self.add_rrset(rrset_info, None, name_obj, my_zone_obj) self.add_rrsigs(name_obj, my_zone_obj, rrset_info, rrset_node) my_nodes.append(rrset_node) # if this is a CNAME record, create a node-to-target mapping if rrset_info.rrset.rdtype == dns.rdatatype.CNAME: for my_node in my_nodes: node_to_cname_mapping.add((my_node, rrset_info.rrset[0].target)) elif rdtype == dns.rdatatype.CDNSKEY: if name == zone_obj.name: self.add_cdnskey(zone_obj, rrset_info, my_nodes) elif rdtype == dns.rdatatype.CDS: if name == zone_obj.name: self.add_cds(zone_obj, rrset_info, my_nodes) self.processed_rrsets[(my_name, rdtype)] += my_nodes for neg_response_info in query.nxdomain_info: # make sure this query was made to a server designated as # authoritative if not set([s for (s,c) in neg_response_info.servers_clients]).intersection(name_obj.zone.get_auth_or_designated_servers()): continue # only do qname, unless analysis type is recursive if not (neg_response_info.qname == name or name_obj.analysis_type == ANALYSIS_TYPE_RECURSIVE): continue if (neg_response_info.qname, neg_response_info.rdtype) not in self.processed_rrsets: self.processed_rrsets[(neg_response_info.qname, neg_response_info.rdtype)] = [] my_name_obj = name_obj.get_name(neg_response_info.qname) my_zone_obj = my_name_obj.zone if my_zone_obj is not None: self.graph_zone_auth(my_zone_obj, False) else: my_zone_obj = my_name_obj self.add_zone(my_zone_obj) nxdomain_node = self.add_rrset_non_existent(name_obj, my_zone_obj, neg_response_info, True, False) self.processed_rrsets[(neg_response_info.qname, neg_response_info.rdtype)].append(nxdomain_node) for nsec_status in name_obj.nxdomain_status[neg_response_info]: nsec_node = self.add_nsec(nsec_status, name, rdtype, name_obj, my_zone_obj, nxdomain_node) for nsec_name, rrset_info in nsec_status.nsec_set_info.rrsets.items(): nsec_cell = lb2s(nsec_name.canonicalize().to_text()) self.add_rrsigs(name_obj, my_zone_obj, rrset_info, nsec_node, port=nsec_cell) for soa_rrset_info in neg_response_info.soa_rrset_info: # If no servers match the authoritative servers, then put this in the parent zone if not set([s for (s,c) in soa_rrset_info.servers_clients]).intersection(my_zone_obj.get_auth_or_designated_servers()) and my_zone_obj.parent is not None: z_obj = my_zone_obj.parent else: z_obj = my_zone_obj soa_rrset_node = self.add_rrset(soa_rrset_info, None, name_obj, z_obj) self.add_rrsigs(name_obj, my_zone_obj, soa_rrset_info, soa_rrset_node) for neg_response_info in query.nodata_info: # only do qname, unless analysis type is recursive if not (neg_response_info.qname == name or name_obj.analysis_type == ANALYSIS_TYPE_RECURSIVE): continue if (neg_response_info.qname, neg_response_info.rdtype) not in self.processed_rrsets: self.processed_rrsets[(neg_response_info.qname, neg_response_info.rdtype)] = [] my_name_obj = name_obj.get_name(neg_response_info.qname) my_zone_obj = my_name_obj.zone if my_zone_obj is not None: self.graph_zone_auth(my_zone_obj, False) else: my_zone_obj = my_name_obj self.add_zone(my_zone_obj) nodata_node = self.add_rrset_non_existent(name_obj, my_zone_obj, neg_response_info, False, False) self.processed_rrsets[(neg_response_info.qname, neg_response_info.rdtype)].append(nodata_node) for nsec_status in name_obj.nodata_status[neg_response_info]: nsec_node = self.add_nsec(nsec_status, name, rdtype, name_obj, my_zone_obj, nodata_node) for nsec_name, rrset_info in nsec_status.nsec_set_info.rrsets.items(): nsec_cell = lb2s(nsec_name.canonicalize().to_text()) self.add_rrsigs(name_obj, my_zone_obj, rrset_info, nsec_node, port=nsec_cell) for soa_rrset_info in neg_response_info.soa_rrset_info: soa_rrset_node = self.add_rrset(soa_rrset_info, None, name_obj, my_zone_obj) self.add_rrsigs(name_obj, my_zone_obj, soa_rrset_info, soa_rrset_node) error_node = self.add_errors(name_obj, zone_obj, name, rdtype, name_obj.response_errors[query]) if error_node is not None: if (name, rdtype) not in self.processed_rrsets: self.processed_rrsets[(name, rdtype)] = [] self.processed_rrsets[(name, rdtype)].append(error_node) warning_node = self.add_warnings(name_obj, zone_obj, name, rdtype, name_obj.response_warnings[query]) if warning_node is not None: if (name, rdtype) not in self.processed_rrsets: self.processed_rrsets[(name, rdtype)] = [] self.processed_rrsets[(name, rdtype)].append(warning_node) for alias_node, target in node_to_cname_mapping: # if this is a recursive analysis, then we've already graphed the # node, above, so we graph its hierarchy and then retrieve it from # self.processed_rrsets if name_obj.analysis_type == ANALYSIS_TYPE_RECURSIVE: # if we didn't get the cname RRset in same response, then # processed_rrsets won't be populated try: cname_nodes = self.processed_rrsets[(target, rdtype)] except KeyError: cname_nodes = [] for cname_node in cname_nodes: self.add_alias(alias_node, cname_node) return self.processed_rrsets[(name, rdtype)] def graph_zone_auth(self, name_obj, is_dlv): if (name_obj.name, -1) in self.processed_rrsets: return self.processed_rrsets[(name_obj.name, -1)] = True zone_obj = name_obj.zone S, zone_graph_name, zone_bottom, zone_top = self.add_zone(zone_obj) if zone_obj.stub: return # indicate that this zone is not a stub self.subgraph_not_stub.add(zone_top) ####################################### # DNSKEY roles, based on what they sign ####################################### all_dnskeys = name_obj.get_dnskeys() # Add DNSKEY nodes to graph for dnskey in name_obj.get_dnskeys(): self.add_dnskey(name_obj, dnskey) for signed_keys, rrset_info in name_obj.get_dnskey_sets(): for rrsig in name_obj.rrsig_status[rrset_info]: signer_obj = name_obj.get_name(rrsig.signer) if signer_obj is not None: # if we have the analysis corresponding to the signer, then # graph it too, if it was different from what we were # expecting if rrsig.signer != name_obj.name and not is_dlv: self.graph_zone_auth(signer_obj, False) for dnskey in name_obj.rrsig_status[rrset_info][rrsig]: rrsig_status = name_obj.rrsig_status[rrset_info][rrsig][dnskey] if dnskey is None: dnskey_node = None else: dnskey_node = self.get_dnskey(self.id_for_dnskey(signer_obj.name, dnskey.rdata), signer_obj.name, dnskey.rdata.algorithm, dnskey.key_tag) for signed_key in signed_keys: signed_key_node = self.get_dnskey(self.id_for_dnskey(name_obj.name, signed_key.rdata), name_obj.name, signed_key.rdata.algorithm, signed_key.key_tag) self.add_rrsig(rrsig_status, name_obj, signer_obj, signed_key_node) # map negative responses for DNSKEY queries to top name of the zone try: dnskey_nodata_info = [x for x in name_obj.nodata_status if x.qname == name_obj.name and x.rdtype == dns.rdatatype.DNSKEY][0] except IndexError: pass else: self.node_reverse_mapping[dnskey_nodata_info] = zone_top try: dnskey_nxdomain_info = [x for x in name_obj.nxdomain_status if x.qname == name_obj.name and x.rdtype == dns.rdatatype.DNSKEY][0] except IndexError: pass else: self.node_reverse_mapping[dnskey_nxdomain_info] = zone_top # handle other responses to DNSKEY/DS queries for rdtype in (dns.rdatatype.DS, dns.rdatatype.DNSKEY): if (name_obj.name, rdtype) in name_obj.queries: # Handle errors and warnings for DNSKEY/DS queries if rdtype == dns.rdatatype.DS and zone_obj.parent is not None and not is_dlv: z_obj = zone_obj.parent self.graph_zone_auth(z_obj, False) else: z_obj = zone_obj self.add_errors(name_obj, z_obj, name_obj.name, rdtype, name_obj.response_errors[name_obj.queries[(name_obj.name, rdtype)]]) self.add_warnings(name_obj, z_obj, name_obj.name, rdtype, name_obj.response_warnings[name_obj.queries[(name_obj.name, rdtype)]]) # Map CNAME responses to DNSKEY/DS queries to appropriate node for rrset_info in name_obj.queries[(name_obj.name, rdtype)].answer_info: if rrset_info.rrset.rdtype == dns.rdatatype.CNAME: rrset_node = self.add_rrset(rrset_info, None, name_obj, name_obj.zone) if rrset_node not in self.node_mapping: self.node_mapping[rrset_node] = [] self.node_mapping[rrset_node].add(rrset_info) self.node_reverse_mapping[rrset_info] = rrset_node if not name_obj.is_zone(): return if name_obj.parent is None or is_dlv: return for dlv in False, True: if dlv: parent_obj = name_obj.dlv_parent ds_name = name_obj.dlv_name rdtype = dns.rdatatype.DLV else: parent_obj = name_obj.parent ds_name = name_obj.name rdtype = dns.rdatatype.DS if parent_obj is None or ds_name is None: continue # if this is a DLV parent, and either we're not showing # DLV, or there is no DLV information for this zone, move along if dlv and (ds_name, rdtype) not in name_obj.queries: continue self.graph_zone_auth(parent_obj, dlv) P, parent_graph_name, parent_bottom, parent_top = self.add_zone(parent_obj) for dnskey in name_obj.ds_status_by_dnskey[rdtype]: ds_statuses = list(name_obj.ds_status_by_dnskey[rdtype][dnskey].values()) # identify all validation_status/RRset/algorithm/key_tag # combinations, so we can cluster like DSs validation_statuses = set([(d.validation_status, d.ds_meta, d.ds.algorithm, d.ds.key_tag) for d in ds_statuses]) for validation_status, rrset_info, algorithm, key_tag in validation_statuses: ds_status_subset = [x for x in ds_statuses if x.validation_status == validation_status and x.ds_meta is rrset_info and x.ds.algorithm == algorithm and x.ds.key_tag == key_tag] # create the DS node and edge ds_node = self.add_ds(ds_name, ds_status_subset, name_obj, parent_obj) self.add_rrsigs(name_obj, parent_obj, rrset_info, ds_node) edge_id = 0 nsec_statuses = [] soa_rrsets = [] try: ds_nodata_info = [x for x in name_obj.nodata_status if x.qname == ds_name and x.rdtype == rdtype][0] nsec_statuses.extend(name_obj.nodata_status[ds_nodata_info]) soa_rrsets.extend(ds_nodata_info.soa_rrset_info) except IndexError: ds_nodata_info = None try: ds_nxdomain_info = [x for x in name_obj.nxdomain_status if x.qname == ds_name and x.rdtype == rdtype][0] nsec_statuses.extend(name_obj.nxdomain_status[ds_nxdomain_info]) soa_rrsets.extend(ds_nxdomain_info.soa_rrset_info) except IndexError: ds_nxdomain_info = None for nsec_status in nsec_statuses: nsec_node = self.add_nsec(nsec_status, ds_name, rdtype, name_obj, parent_obj, zone_top) # add a tail to the cluster self.G.get_edge(zone_top, nsec_node).attr['ltail'] = zone_graph_name # anchor NSEC node to bottom self.G.add_edge(parent_bottom, nsec_node, style='invis') for nsec_name, rrset_info in nsec_status.nsec_set_info.rrsets.items(): nsec_cell = lb2s(nsec_name.canonicalize().to_text()) self.add_rrsigs(name_obj, parent_obj, rrset_info, nsec_node, port=nsec_cell) edge_id += 1 # add SOA for soa_rrset_info in soa_rrsets: soa_rrset_node = self.add_rrset(soa_rrset_info, None, name_obj, parent_obj) self.add_rrsigs(name_obj, parent_obj, soa_rrset_info, soa_rrset_node) # add mappings for negative responses self.node_mapping[zone_top] = set() if ds_nodata_info is not None: self.node_mapping[zone_top].add(ds_nodata_info) self.node_reverse_mapping[ds_nodata_info] = zone_top if ds_nxdomain_info is not None: self.node_mapping[zone_top].add(ds_nxdomain_info) self.node_reverse_mapping[ds_nxdomain_info] = zone_top has_warnings = name_obj.delegation_warnings[rdtype] or (ds_nxdomain_info is not None and name_obj.nxdomain_warnings[ds_nxdomain_info]) or (ds_nodata_info is not None and name_obj.nodata_warnings[ds_nodata_info]) has_errors = name_obj.delegation_errors[rdtype] or (ds_nxdomain_info is not None and name_obj.nxdomain_errors[ds_nxdomain_info]) or (ds_nodata_info is not None and name_obj.nodata_errors[ds_nodata_info]) edge_label = '' if has_errors: edge_label = '<
>' % ERROR_ICON elif has_warnings: edge_label = '<
>' % WARNING_ICON if name_obj.delegation_status[rdtype] == Status.DELEGATION_STATUS_SECURE: line_color = COLORS['secure'] line_style = 'solid' elif name_obj.delegation_status[rdtype] == Status.DELEGATION_STATUS_INSECURE: line_color = COLORS['insecure'] line_style = 'solid' elif name_obj.delegation_status[rdtype] in (Status.DELEGATION_STATUS_INCOMPLETE, Status.DELEGATION_STATUS_LAME): line_color = COLORS['misconfigured'] line_style = 'dashed' elif name_obj.delegation_status[rdtype] == Status.DELEGATION_STATUS_BOGUS: line_color = COLORS['bogus'] line_style = 'dashed' consolidate_clients = name_obj.single_client() del_serialized = OrderedDict() del_serialized['description'] = 'Delegation from %s to %s' % (lb2s(name_obj.parent.name.to_text()), lb2s(name_obj.name.to_text())) del_serialized['status'] = Status.delegation_status_mapping[name_obj.delegation_status[rdtype]] if has_warnings: del_serialized['warnings'] = [] del_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in name_obj.delegation_warnings[rdtype]] del_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in name_obj.nxdomain_warnings.get(ds_nxdomain_info, [])] del_serialized['warnings'] += [w.serialize(consolidate_clients=consolidate_clients, html_format=True) for w in name_obj.nodata_warnings.get(ds_nodata_info, [])] if has_errors: del_serialized['errors'] = [] del_serialized['errors'] += [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in name_obj.delegation_errors[rdtype]] del_serialized['errors'] += [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in name_obj.nxdomain_errors.get(ds_nxdomain_info, [])] del_serialized['errors'] += [e.serialize(consolidate_clients=consolidate_clients, html_format=True) for e in name_obj.nodata_errors.get(ds_nodata_info, [])] edge_id = 'del-%s|%s' % (fmt.humanize_name(zone_obj.name), fmt.humanize_name(parent_obj.name)) self.node_info[edge_id] = [del_serialized] self.G.add_edge(zone_top, parent_bottom, label=edge_label, id=edge_id, color=line_color, penwidth='5.0', ltail=zone_graph_name, lhead=parent_graph_name, style=line_style, minlen='2', dir='back') def _set_non_existent_color(self, n): if DASHED_STYLE_RE.search(n.attr['style']) is None: return if n.attr['color'] == COLORS['secure']: n.attr['color'] = COLORS['secure_non_existent'] # if this is an authenticated negative response, and the NSEC3 # RR used opt out, then the node is actually insecure, rather # than secure. for n1 in self.G.out_neighbors(n): if n1.startswith('NSEC3') and OPTOUT_STYLE_RE.search(n1.attr['label']): n.attr['color'] = COLORS['insecure_non_existent'] elif n.attr['color'] == COLORS['bogus']: n.attr['color'] = COLORS['bogus_non_existent'] else: n.attr['color'] = COLORS['insecure_non_existent'] def _set_nsec_color(self, n): if not n.startswith('NSEC'): return #XXX we have to assign l to n.attr['label'], perform any update # operations on l, then assign n.attr['label'] to l's new value, # wrapping it in "<...>". This is because the "<" and ">" at the start # and end somehow get lost when the assignment is made directly. l = n.attr['label'] l = re.sub(r'^(<]+CELLSPACING=")-\d+"', r'\g<1>0"', l, 1) for nsec_name in self.nsec_rr_status[n]: if not self.nsec_rr_status[n][nsec_name]: self.nsec_rr_status[n][nsec_name] = COLORS['bogus'] l = re.sub(r'(]+PORT="%s")' % nsec_name, r'\1 COLOR="%s"' % self.nsec_rr_status[n][nsec_name], l, 1) n.attr['label'] = '<%s>' % l def _set_node_status(self, n): status = self.status_for_node(n) node_id = n.replace('*', '_') for serialized in self.node_info[node_id]: serialized['status'] = Status.rrset_status_mapping[status] def add_trust(self, trusted_keys, supported_algs=None): trusted_keys = tuple_to_dict(trusted_keys) if supported_algs is not None: supported_algs.intersection_update(crypto._supported_algs) else: supported_algs = crypto._supported_algs dlv_nodes = [] trusted_zone_top_names = set([self.get_zone(z)[3] for z in trusted_keys]) for zone in trusted_keys: zone_top_name = self.get_zone(zone)[3] if not self.G.has_node(zone_top_name) or zone_top_name not in self.subgraph_not_stub: continue # if at least one algorithm in trusted keys for the zone is # supported, then give zone no initial marking; otherwise mark it # as insecure algs = set([d.algorithm for d in trusted_keys[zone]]) if algs.intersection(supported_algs): self.G.get_node(zone_top_name).attr['color'] = '' else: self.G.get_node(zone_top_name).attr['color'] = COLORS['insecure'] for dnskey in trusted_keys[zone]: try: dnskey_node = self.get_dnskey(self.id_for_dnskey(zone, dnskey), zone, dnskey.algorithm, Response.DNSKEYMeta.calc_key_tag(dnskey)) dnskey_node.attr['peripheries'] = 2 if self.G.get_node(zone_top_name).attr['color'] == '': self._add_trust_to_nodes_in_chain(dnskey_node, trusted_zone_top_names, dlv_nodes, False, []) except KeyError: dnskey_node = self.add_dnskey_non_existent(zone, zone, dnskey.algorithm, Response.DNSKEYMeta.calc_key_tag(dnskey)) dnskey_node.attr['peripheries'] = 2 # determine DLV zones based on DLV nodes dlv_trusted_zone_top_names = [] for dlv_node in dlv_nodes: dlv_trusted_zone_top_names.append(self.node_subgraph_name[dlv_node]) # now traverse clusters and mark insecure nodes in secure delegations as bad for zone in trusted_keys: S, zone_node_str, zone_bottom_name, zone_top_name = self.get_zone(zone) if not self.G.has_node(zone_top_name) or zone_top_name not in self.subgraph_not_stub: continue # don't yet mark subdomains of DLV zones, as we have yet # to add trust to them if zone_top_name not in dlv_trusted_zone_top_names: self._add_trust_to_orphaned_nodes(zone_node_str, []) # now that we can show which zones are provably insecure, we # can apply trust from the DLV zones for dlv_node in dlv_nodes: self._add_trust_to_nodes_in_chain(dlv_node, trusted_zone_top_names, [], True, []) # now mark the orphaned nodes for dlv_node in dlv_nodes: zone_node_str = self.node_subgraph_name[dlv_node][:-4] self._add_trust_to_orphaned_nodes(zone_node_str, []) for n in self.G.nodes(): # set the status of (only) the cluster top node as well if n.attr['shape'] == 'point' and n.endswith('_top'): pass elif n.attr['shape'] not in ('ellipse', 'rectangle') and not n.startswith('NSEC'): continue self._set_non_existent_color(n) self._set_nsec_color(n) self._set_node_status(n) def status_for_node(self, n, port=None): n = self.G.get_node(n) if n.attr['color'] in (COLORS['secure'], COLORS['secure_non_existent']): status = Status.RRSET_STATUS_SECURE elif n.attr['color'] in (COLORS['bogus'], COLORS['bogus_non_existent']): if port is not None and self.nsec_rr_status[n][port] == COLORS['secure']: status = Status.RRSET_STATUS_SECURE else: status = Status.RRSET_STATUS_BOGUS else: if n.startswith('DNSKEY') and \ DASHED_STYLE_RE.search(n.attr['style']): status = Status.RRSET_STATUS_NON_EXISTENT else: status = Status.RRSET_STATUS_INSECURE return status def secure_nsec3_optout_nodes_covering_node(self, n): return [x for x in self.G.out_neighbors(n) if x.startswith('NSEC') and \ OPTOUT_STYLE_RE.search(x.attr['label']) is not None and \ x.attr['color'] == COLORS['secure']] def secure_nsec_nodes_covering_node(self, n): return [x for x in self.G.out_neighbors(n) if x.startswith('NSEC') and \ x.attr['color'] == COLORS['secure']] def is_invis(self, n): return INVIS_STYLE_RE.search(self.G.get_node(n).attr['style']) is not None def _add_trust_to_nodes_in_chain(self, n, trusted_zones, dlv_nodes, force, trace): if n in trace: return is_ds = n.startswith('DS-') or n.startswith('DLV-') is_dlv = n.startswith('DLV-') is_dnskey = n.startswith('DNSKEY-') is_nsec = n.startswith('NSEC') is_dname = n.endswith('|DNAME') if is_dlv and not force: dlv_nodes.append(n) return # if n isn't a DNSKEY, DS/DLV, or NSEC record, # then don't follow back edges if not (is_ds or is_dnskey or is_nsec or is_dname): return is_revoked = n.attr['penwidth'] == '4.0' is_trust_anchor = n.attr['peripheries'] == '2' top_name = self.G.get_node(self.node_subgraph_name[n]) # trust anchor and revoked DNSKEY must be self-signed if is_revoked or is_trust_anchor: valid_self_loop = False if self.G.has_edge(n,n): for e1 in self.G.out_edges(n) + self.G.in_edges(n): if (n,n) == e1 and \ e1.attr['color'] == COLORS['secure']: valid_self_loop = True # mark all the DNSKEY RRsets as valid for rrsig in self.node_mapping[e1.attr['id']]: self.secure_dnskey_rrsets.add(rrsig.rrset) break #XXX revisit if we want to do this here if is_revoked and n.attr['color'] == COLORS['secure'] and not valid_self_loop: n.attr['color'] = COLORS['bogus'] # mark the zone as "secure" as there is a secure entry point; # descendants will be so marked by following the delegation edges if is_trust_anchor and valid_self_loop: n.attr['color'] = COLORS['secure'] top_name.attr['color'] = COLORS['secure'] node_trusted = n.attr['color'] == COLORS['secure'] if is_dnskey and not node_trusted: # Here we are shortcutting the traversal because we are no longer # propagating trust. But we still need to learn of any DLV nodes. if not force: S = self.G.get_subgraph(top_name[:-4]) for n in S.nodes(): if n.startswith('DLV-'): dlv_nodes.append(n) return # iterate through each edge and propagate trust from this node for e in self.G.in_edges(n): p = e[0] # if this is an edge used for formatting node (invis), then don't # follow it if INVIS_STYLE_RE.search(e.attr['style']) is not None: continue prev_top_name = self.G.get_node(self.node_subgraph_name[p]) # don't derive trust from parent if there is a trust anchor at the # child if is_ds and prev_top_name in trusted_zones: continue # if the previous node is already secure, then no need to follow it if p.attr['color'] == COLORS['secure']: continue # if this is a DLV node, then the zone it covers must be marked # as insecure through previous trust traversal (not because of # a local trust anchor, which case is handled above) if is_dlv: if prev_top_name.attr['color'] not in ('', COLORS['insecure']): continue # reset the security of this top_name prev_top_name.attr['color'] = '' # if this is a non-matching edge (dashed) then don't follow it if DASHED_STYLE_RE.search(e.attr['style']) is not None: continue # derive trust for the previous node using the current node and the # color of the edge in between prev_node_trusted = node_trusted and e.attr['color'] == COLORS['secure'] if is_ds: # if this is an edge between DS and DNSKEY, then the DNSKEY is # not considered secure unless it has a valid self-loop (in # addition to the connecting edge being valid) valid_self_loop = False if self.G.has_edge(p,p): for e1 in self.G.out_edges(p) + self.G.in_edges(p): if (p,p) == e1 and \ e1.attr['color'] == COLORS['secure']: valid_self_loop = True # mark all the DNSKEY RRsets as valid for rrsig in self.node_mapping[e1.attr['id']]: self.secure_dnskey_rrsets.add(rrsig.rrset) break prev_node_trusted = prev_node_trusted and valid_self_loop # if p is an NSEC (set) node, then we need to check that all the # NSEC RRs have been authenticated before we mark this one as # authenticated. elif p.startswith('NSEC'): rrsig_status = list(self.node_mapping[e.attr['id']])[0] nsec_name = lb2s(rrsig_status.rrset.rrset.name.canonicalize().to_text()).replace(r'"', r'\"') if prev_node_trusted: self.nsec_rr_status[p][nsec_name] = COLORS['secure'] for nsec_name in self.nsec_rr_status[p]: if self.nsec_rr_status[p][nsec_name] != COLORS['secure']: prev_node_trusted = False if is_nsec: # if this is an NSEC, then only propagate trust if the previous # node (i.e., the node it covers) is an RRset if prev_node_trusted and p.attr['shape'] == 'rectangle': p.attr['color'] = COLORS['secure'] elif prev_node_trusted: p.attr['color'] = COLORS['secure'] self._add_trust_to_nodes_in_chain(p, trusted_zones, dlv_nodes, force, trace+[n]) def _add_trust_to_orphaned_nodes(self, subgraph_name, trace): if subgraph_name in trace: return top_name = self.G.get_node(subgraph_name + '_top') bottom_name = self.G.get_node(subgraph_name + '_bottom') # if this subgraph (zone) is provably insecure, then don't process # further if top_name.attr['color'] == COLORS['insecure']: return # iterate through each node in the subgraph (zone) and mark as bogus # all nodes that are not already marked as secure S = self.G.get_subgraph(subgraph_name) for n in S.nodes(): # don't mark invisible nodes (zone marking as secure/insecure is handled in the # traversal at the delegation point below). if INVIS_STYLE_RE.search(n.attr['style']) is not None: continue # if node is non-existent, then don't mark it, unless we are talking about an RRset # or a non-existent trust anchor; it doesn't make sense to mark other nodes # as bogus if DASHED_STYLE_RE.search(n.attr['style']) is not None and not (n.attr['shape'] == 'rectangle' or \ n.attr['peripheries'] == 2): continue # if the name is already marked as secure if n.attr['color'] == COLORS['secure']: # don't mark it as bogus continue n.attr['color'] = COLORS['bogus'] # propagate trust through each descendant node for p in self.G.predecessors(bottom_name): e = self.G.get_edge(p, bottom_name) child_subgraph_name = p[:-4] if top_name.attr['color'] == COLORS['secure']: # if this subgraph (zone) is secure, and the delegation is also # secure, then mark the delegated subgraph (zone) as secure. if e.attr['color'] == COLORS['secure']: p.attr['color'] = COLORS['secure'] # if this subgraph (zone) is secure, and the delegation is not # bogus (DNSSEC broken), then mark it as provably insecure. elif e.attr['color'] != COLORS['bogus']: # in this case, it's possible that the proven insecurity is # dependent on NSEC/NSEC3 records that need to be # authenticated. Before marking this as insecure, reach # back up for NSEC records. If any are found, make sure at # least one has been authenticated (i.e., has secure # color). nsec_found = False nsec_authenticated = False for n in self.G.out_neighbors(p): if not n.startswith('NSEC'): continue # check that this node is in the zone we're coming from if self.node_subgraph_name[n] != top_name: continue nsec_found = True if n.attr['color'] == COLORS['secure']: nsec_authenticated = True break # or if there are DS, then there are algorithms that are # not understood (otherwise it would not be insecure). # Check that at least one of the DS nodes was marked as # secure. ds_found = False ds_authenticated = False S = self.G.get_subgraph(child_subgraph_name) for n in S.nodes(): # we're only concerned with DNSKEYs if not n.startswith('DNSKEY-'): continue # we're looking for DS records for d in self.G.out_neighbors(n): if not (d.startswith('DS-') or d.startswith('DLV-')): continue # check that this node is in the zone we're coming from if self.node_subgraph_name[d] != top_name: continue ds_found = True if d.attr['color'] == COLORS['secure']: ds_authenticated = True break if nsec_found and not nsec_authenticated: pass elif ds_found and not ds_authenticated: pass else: p.attr['color'] = COLORS['insecure'] # if the child was not otherwise marked, then mark it as bogus if p.attr['color'] == '': p.attr['color'] = COLORS['bogus'] self._add_trust_to_orphaned_nodes(child_subgraph_name, trace+[subgraph_name]) def remove_extra_edges(self, show_redundant=False): #XXX this assumes DNSKEYs with same name as apex for S in self.G.subgraphs(): non_dnskey = set() all_dnskeys = set() ds_dnskeys = set() ta_dnskeys = set() ksks = set() zsks = set() sep_bit = set() revoked_dnskeys = set() non_existent_dnskeys = set() existing_dnskeys = set() for n in S.nodes(): if not n.startswith('DNSKEY-'): if n.attr['shape'] != 'point': non_dnskey.add(n) continue all_dnskeys.add(n) in_edges = self.G.in_edges(n) out_edges = self.G.out_edges(n) ds_edges = [x for x in out_edges if x[1].startswith('DS-') or x[1].startswith('DLV-')] is_ksk = bool([x for x in in_edges if x[0].startswith('DNSKEY-')]) is_zsk = bool([x for x in in_edges if NODE_DNSKEY_CDNSKEY_CDS_RE.match(x[0]) is None]) non_existent = DASHED_STYLE_RE.search(n.attr['style']) is not None has_sep_bit = n.attr['fillcolor'] == 'lightgray' if is_ksk: ksks.add(n) if is_zsk: zsks.add(n) if has_sep_bit: sep_bit.add(n) if n.attr['peripheries'] == '2': ta_dnskeys.add(n) if ds_edges: ds_dnskeys.add(n) if n.attr['penwidth'] == '4.0': revoked_dnskeys.add(n) if non_existent: non_existent_dnskeys.add(n) else: existing_dnskeys.add(n) seps = ds_dnskeys.union(ta_dnskeys).intersection(ksks).difference(revoked_dnskeys) ksk_only = ksks.difference(zsks).difference(revoked_dnskeys) zsk_only = zsks.difference(ksks).difference(revoked_dnskeys) # if all keys have only KSK roles (i.e., none are signing the zone # data), then try to distinguish using SEP bit if ksk_only and not zsks and sep_bit: ksk_only.intersection_update(sep_bit) if seps: top_level_keys = seps else: if ksk_only: top_level_keys = ksk_only elif ksks: top_level_keys = ksks elif sep_bit: top_level_keys = sep_bit else: top_level_keys = all_dnskeys if top_level_keys: # If there aren't any KSKs or ZSKs, then signing roles are # unknown, and the top-level keys are organized by SEP bit. # Because there are no roles, every key is an "island" (i.e., # not signed by any top-level keys), so only look for "islands" # if there are ZSKs or KSKs. if zsks or ksks: for n in all_dnskeys.difference(top_level_keys): if set(self.G.out_neighbors(n)).intersection(top_level_keys): # If this key is already signed by a top-level, then # it's not in an island. pass else: # Otherwise, find out what keys are connected to this one neighbors = set(self.G.neighbors(n)) # If this key is ksk only, then it is always a top-level key. if n in ksk_only: top_level_keys.add(n) # If this key is not a ksk, and there are ksks, then # it's not a top-level key. elif n not in ksks and neighbors.intersection(ksks): pass # If this key does not have its sep bit set, and there # are others that do, then it's not a top-level key. elif n not in sep_bit and neighbors.intersection(sep_bit): pass # Otherwise, it's on the same rank as all the others, # so it is a top-level key. else: top_level_keys.add(n) # In the case where a top-level key is signing zone data, and # there are other top-level keys that are not signing zone data, # remove it from the top-level keys list, and don't add an edge # to the top. This will make the other top-level keys appear # "higher". for n in list(top_level_keys): if n in zsks and set(self.G.neighbors(n)).intersection(top_level_keys).intersection(ksk_only): top_level_keys.remove(n) else: self.G.add_edge(n, self.node_subgraph_name[n], style='invis') # Now handle all the keys not at the top level non_top_level_keys = all_dnskeys.difference(top_level_keys) if non_top_level_keys: # If there are any keys that are not at the top level, then # determine whether they should be connected to the # top-level keys, to the top, or left alone. for n in non_top_level_keys: # Non-existent DNSKEYs corresponding to DS and trust # anchors should be connected to the top. if n in non_existent_dnskeys: if n in ds_dnskeys or n in ta_dnskeys: self.G.add_edge(n, self.node_subgraph_name[n], style='invis') # If not linked to any other DNSKEYs, then link to # top-level keys. elif not [x for x in self.G.out_neighbors(n) if x.startswith('DNSKEY')]: for m in top_level_keys: if not self.G.has_edge(n, m): self.G.add_edge(n, m, style='invis') intermediate_keys = non_top_level_keys else: intermediate_keys = top_level_keys # If there are ZSKs (and possible ZSKs only signing zone data), # then make those the intermediate keys, instead of using all # the top-level (or non-top-level) keys. if zsk_only: intermediate_keys = zsk_only elif zsks: intermediate_keys = zsks # Link non-keys to intermediate DNSKEYs for n in non_dnskey: if [x for x in self.G.out_neighbors(n) if x.startswith('DNSKEY') or x.startswith('NSEC')]: continue for m in intermediate_keys: # we only link to non-existent DNSKEYs corresponding to # DS records if there aren't any existing DNSKEYs. if m in ds_dnskeys and m in non_existent_dnskeys: if existing_dnskeys: continue self.G.add_edge(n, m, style='invis') else: # For all non-existent non-DNSKEYs, add an edge to the top for n in non_dnskey: if [x for x in self.G.out_neighbors(n) if x.startswith('DNSKEY') or x.startswith('NSEC')]: continue self.G.add_edge(n, self.node_subgraph_name[n], style='invis') for n in ksks: retain_edge_default = n in top_level_keys for e in self.G.in_edges(n): m = e[0] if not m.startswith('DNSKEY-'): continue if n == m: continue if retain_edge_default and m in top_level_keys: retain_edge = False else: retain_edge = retain_edge_default if not retain_edge: if show_redundant: self.G.get_edge(m, n).attr['constraint'] = 'false' else: try: del self.node_info[e.attr.get('id', None)] except KeyError: pass self.G.remove_edge(m, n) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/dnsviz.egg-info/0000755000175000017500000000000015001473074015151 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253948.0 dnsviz-0.11.1/dnsviz.egg-info/PKG-INFO0000644000175000017500000000342515001473074016252 0ustar00caseycaseyMetadata-Version: 2.4 Name: dnsviz Version: 0.11.1 Summary: DNS analysis and visualization tool suite Home-page: https://github.com/dnsviz/dnsviz/ Author: Casey Deccio Author-email: casey@deccio.net License: LICENSE Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: Console Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+) Classifier: Natural Language :: English Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: POSIX Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Topic :: Internet :: Name Service (DNS) Classifier: Topic :: Scientific/Engineering :: Visualization Classifier: Topic :: System :: Networking :: Monitoring Requires: pygraphviz (>=1.3) Requires: cryptography (>=36.0.0) Requires: dnspython (>=1.13) License-File: LICENSE License-File: COPYRIGHT Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: license Dynamic: license-file Dynamic: requires Dynamic: summary DNSViz is a tool suite for analysis and visualization of Domain Name System (DNS) behavior, including its security extensions (DNSSEC). This tool suite powers the Web-based analysis available at http://dnsviz.net/ . ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253948.0 dnsviz-0.11.1/dnsviz.egg-info/SOURCES.txt0000644000175000017500000001003015001473074017027 0ustar00caseycaseyCOPYRIGHT LICENSE MANIFEST.in README.md requirements.txt setup.cfg setup.py bin/dnsviz contrib/digviz contrib/dnsviz-lg-ws.js contrib/dnsviz-lg.cgi contrib/dnsviz-lg-java/net/dnsviz/applet/DNSLookingGlassApplet.java contrib/dnsviz-lg-java/net/dnsviz/lookingglass/DNSLookingGlass.java contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandler.java contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandlerComparator.java contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandlerTCP.java contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportHandlerUDP.java contrib/dnsviz-lg-java/net/dnsviz/transport/DNSQueryTransportManager.java contrib/dnsviz-lg-java/net/dnsviz/transport/Errno.java contrib/dnsviz-lg-java/net/dnsviz/util/Base64.java contrib/dnsviz-lg-java/net/dnsviz/util/Base64Decoder.java contrib/dnsviz-lg-java/net/dnsviz/util/Base64Encoder.java contrib/dnsviz-lg-java/net/dnsviz/util/DNSSettings.java contrib/dnsviz-lg-java/net/dnsviz/websocket/WebSocketClient.java dnsviz/__init__.py dnsviz/base32.py dnsviz/config.py.in dnsviz/crypto.py dnsviz/format.py dnsviz/ipaddr.py dnsviz/query.py dnsviz/resolver.py dnsviz/response.py dnsviz/transport.py dnsviz/util.py dnsviz.egg-info/PKG-INFO dnsviz.egg-info/SOURCES.txt dnsviz.egg-info/dependency_links.txt dnsviz.egg-info/top_level.txt dnsviz/analysis/__init__.py dnsviz/analysis/errors.py dnsviz/analysis/offline.py dnsviz/analysis/online.py dnsviz/analysis/status.py dnsviz/commands/__init__.py dnsviz/commands/graph.py dnsviz/commands/grok.py dnsviz/commands/lookingglass.py dnsviz/commands/print.py dnsviz/commands/probe.py dnsviz/commands/query.py dnsviz/viz/__init__.py dnsviz/viz/dnssec.py doc/COPYRIGHT doc/Makefile doc/dnsviz-graph.html doc/images/error.png doc/images/logo-16x16.png doc/images/logo-220x100.png doc/images/logo-60x60.png doc/images/warning.png doc/man/dnsviz-graph.1 doc/man/dnsviz-grok.1 doc/man/dnsviz-print.1 doc/man/dnsviz-probe.1 doc/man/dnsviz-query.1 doc/man/dnsviz.1 doc/src/alias.dot doc/src/cdnskey-cds-nodnskey.dot doc/src/cdnskey-cds.dot doc/src/delegation-bogus.dot doc/src/delegation-incomplete.dot doc/src/delegation-lame.dot doc/src/delegation-secure.dot doc/src/delegation.dot doc/src/dname-invalid.dot doc/src/dname.dot doc/src/dnskey-revoke.dot doc/src/dnskey-sep.dot doc/src/dnskey-trust-anchor.dot doc/src/dnskey.dot doc/src/ds-invalid-digest.dot doc/src/ds-invalid.dot doc/src/ds-nodnskey.dot doc/src/ds-pre-revoke.dot doc/src/ds-unknown-alg.dot doc/src/ds.dot doc/src/edges-errors.dot doc/src/edges-warnings.dot doc/src/error.svg doc/src/logo.svg doc/src/nodata.dot doc/src/nodes-bogus.dot doc/src/nodes-errors.dot doc/src/nodes-insecure.dot doc/src/nodes-secure.dot doc/src/nodes-warnings.dot doc/src/nsec-ds.dot doc/src/nsec-invalid.dot doc/src/nsec-partial-bogus.dot doc/src/nsec.dot doc/src/nsec3-optout.dot doc/src/nsec3.dot doc/src/nxdomain.dot doc/src/response-error.dot doc/src/response-warning.dot doc/src/rrset.dot doc/src/rrsig-dnskey-pruned.dot doc/src/rrsig-dnskey-redundant.dot doc/src/rrsig-dnskey.dot doc/src/rrsig-ds.dot doc/src/rrsig-nsec.dot doc/src/rrsig-rrset-expired.dot doc/src/rrsig-rrset-invalid-sig.dot doc/src/rrsig-rrset-invalid.dot doc/src/rrsig-rrset-nodnskey.dot doc/src/rrsig-rrset-pre-revoke.dot doc/src/rrsig-rrset-unknown-alg.dot doc/src/rrsig-rrset.dot doc/src/warning.svg doc/src/wildcard.dot doc/src/zone-errors.dot doc/src/zone-warnings.dot doc/src/zone.dot share/css/dnsviz.css share/hints/named.root share/html/dnssec-template.html share/js/dnsviz.js share/trusted-keys/root.txt tests/test_00_setup.py tests/test_11_response.py tests/test_61_dnsviz_probe_options.py tests/test_62_dnsviz_graph_options.py tests/test_63_dnsviz_grok_options.py tests/test_64_dnsviz_print_options.py tests/test_71_dnsviz_probe_run_offline.py tests/test_72_dnsviz_graph_run.py tests/test_73_dnsviz_grok_run.py tests/test_74_dnsviz_print_run.py tests/test_81_dnsviz_probe_run_online.py tests/test_99_cleanup.py tests/vars.py tests/zones/unsigned/example.com-probe-auth.json tests/zones/unsigned/example.com.zone tests/zones/unsigned/example.com.zone-delegation././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253948.0 dnsviz-0.11.1/dnsviz.egg-info/dependency_links.txt0000644000175000017500000000000115001473074021217 0ustar00caseycasey ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253948.0 dnsviz-0.11.1/dnsviz.egg-info/top_level.txt0000644000175000017500000000000715001473074017700 0ustar00caseycaseydnsviz ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3905313 dnsviz-0.11.1/doc/0000755000175000017500000000000015001473074012707 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/COPYRIGHT0000644000175000017500000000231715001472663014210 0ustar00caseycaseysrc/warning.svg: Imported on July 11, 2016. Author: user:Penubag / Wikimedia Commons / Public Domain Downloaded from: https://upload.wikimedia.org/wikipedia/commons/7/74/Ambox_warning_yellow.svg License statement: https://commons.wikimedia.org/wiki/File:Ambox_warning_yellow.svg This work has been released into the public domain by its author, penubag. This applies worldwide. In some countries this may not be legally possible; if so: penubag grants anyone the right to use this work for any purpose, without any conditions, unless such conditions are required by law. src/error.svg: Imported on July 11, 2016. Author: user:Penubag / Wikimedia Commons / Public Domain Downloaded from: https://upload.wikimedia.org/wikipedia/commons/1/15/Ambox_warning_pn.svg License statement: https://commons.wikimedia.org/wiki/File:Ambox_warning_pn.svg I, the copyright holder of this work, release this work into the public domain. This applies worldwide. In some countries this may not be legally possible; if so: I grant anyone the right to use this work for any purpose, without any conditions, unless such conditions are required by law. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/Makefile0000644000175000017500000000373415001472663014361 0ustar00caseycaseyDOT=dot INKSCAPE=inkscape SRCDIR=src IMGDIR=images DOTFILES = $(wildcard $(SRCDIR)/*dot) DOTPNGFILES = $(DOTFILES:$(SRCDIR)/%.dot=$(IMGDIR)/%.png) all: $(DOTPNGFILES) # make png files from dot files $(DOTPNGFILES): $(IMGDIR)/%.png: $(SRCDIR)/%.dot $(DOT) -Tpng $< > $@ || ( rm $@ && false ) # make icon-sized png files from warning.svg and error.svg ICONSVGFILES = $(SRCDIR)/warning.svg $(SRCDIR)/error.svg ICONPNGFILES = $(ICONSVGFILES:$(SRCDIR)/%.svg=$(IMGDIR)/%.png) ICONWIDTH = 25 icons: $(ICONPNGFILES) $(ICONPNGFILES): $(IMGDIR)/%.png: $(SRCDIR)/%.svg HOME=/var/tmp $(INKSCAPE) --export-width=$(ICONWIDTH) --export-type=png --export-filename=$@ $< # make banner png 220x100 file from logo.svg LOGOSVGFILE = $(SRCDIR)/logo.svg LOGOBANNERPNGFILE = $(IMGDIR)/logo-220x100.png LOGOBANNERVIEWBOX = 58 43 381 173 LOGOBANNERHEIGHT = 100 # make square icon png 60x60 file from logo.svg LOGOICONPNGFILE = $(IMGDIR)/logo-60x60.png LOGOICONVIEWBOX = 335 43 103 103 LOGOICONHEIGHT = 50 # make square favico png 16x16 file from logo.svg LOGOFAVICONPNGFILE = $(IMGDIR)/logo-16x16.png LOGOFAVICONVIEWBOX = 335 43 103 103 LOGOFAVICONHEIGHT = 16 logo: $(LOGOBANNERPNGFILE) $(LOGOICONPNGFILE) $(LOGOFAVICONPNGFILE) $(LOGOBANNERPNGFILE): $(LOGOSVGFILE) cat $< | sed 's/viewBox="0 0 504 252"/viewBox="$(LOGOBANNERVIEWBOX)"/' | HOME=/var/tmp $(INKSCAPE) --export-height=$(LOGOBANNERHEIGHT) --export-type=png --export-filename=$@ --pipe $(LOGOICONPNGFILE): $(LOGOSVGFILE) cat $< | sed 's/viewBox="0 0 504 252"/viewBox="$(LOGOICONVIEWBOX)"/' | HOME=/var/tmp $(INKSCAPE) --export-height=$(LOGOICONHEIGHT) --export-type=png --export-filename=$@ --pipe $(LOGOFAVICONPNGFILE): $(LOGOSVGFILE) cat $< | sed 's/viewBox="0 0 504 252"/viewBox="$(LOGOFAVICONVIEWBOX)"/' | HOME=/var/tmp $(INKSCAPE) --export-height=$(LOGOFAVICONHEIGHT) --export-type=png --export-filename=$@ --pipe .PHONY: clean clean: rm -rf $(DOTPNGFILES) rm -rf $(ICONPNGFILES) rm -rf $(LOGOBANNERPNGFILE) $(LOGOICONPNGFILE) $(LOGOFAVICONPNGFILE) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/dnsviz-graph.html0000644000175000017500000006543715001472663016233 0ustar00caseycasey DNSViz

DNSViz - DNS visualization

Zones

Zone

Nodes in DNSViz are clustered by the zone to which the represented information belongs. Each zone is labeled with the name of the zone origin and the time at which the zone was last analyzed.

Delegations

Delegation

Thick lines between zones denote delegations of namespace from one zone to another, as indicated by the presence of NS (name server) resource records (RRs) for the delegated namespace.

In this example, the black, solid line indicates a standard, insecure delegation (i.e., sans DNSSEC). Other possible delegation statuses are described in the following entries.

Lame delegation

If the designated name servers for a zone cannot not be properly resolved or if the servers do not properly respond to queries, then the delegation is considered lame and is represented by a dashed, yellow line.

Incomplete delegation

If the delegation is incomplete, as indicated by the presence of NS records in the zone itself but not in its parent zone, then the delegation is represented by a dashed, yellow line.

Secure delegation

If the delegation is secure by DNSSEC standards, then the delegation is represented by a solid, blue line.

Bogus delegation

If the delegation is bogus by DNSSEC standards, then the delegation is represented by a dashed, red line.

RRsets

RRset

Resource record sets (RRsets) returned in the response (usually in the answer section) are represented as rectangular nodes with rounded corners. Among the most common record types are SOA (start of authority), A (IPv4 address), AAAA (IPv6 address), MX (mail exchange), and CNAME (canonical name).

RRsets that are specific to DNSSEC, such as the DNSKEY, DS, RRSIG, NSEC and NSEC3 RR types, are represented as other node types, as specified elsewhere in this guide.

Alias

Aliases resulting from CNAME RRs are represented by a black edge from one RRset (with the alias name) to another (with the canonical name).

DNAME

A DNAME RR is used to alias an entire namespace into another. DNAME responses include synthesized CNAME RRs for the aliasing directed by the DNAME RR.

DNAME records are shown in DNSViz with their respective CNAME records. A solid, blue line between DNAME node and CNAME node indicates that the DNAME expansion was valid.

Invalid DNAME

A solid, red line between DNAME node and CNAME node indicates that the DNAME expansion was invalid.

Negative Responses

NXDOMAIN

If the response to a query is a name error (NXDOMAIN), this negative response is represented by a rectangular node with diagonals drawn at each corner, and with a dashed border, lighter in color. A node representing the SOA RR returned in the negative response (if any) is also included.

NO DATA

If the response to a query has a NOERROR status but contains no answer data (NO DATA) for the type, this negative response is represented by a rectangular node with rounded corners, and with a dashed border, lighter in color. A node representing the SOA RR returned in the negative response (if any) is also included.

DNSKEY RRs

DNSKEY

DNSKEY RRs include public key and meta information to enable resolvers to validate signatures made by the corresponding private keys.

In DNSViz, each DNSKEY RR is represented as an elliptical node in the zone to which it belongs.

The DNSKEY RR for the example.com zone has algorithm 8 (RSA/SHA-256) and key tag 12345, both of are used to identify the DNSKEY. Each DNSKEY node is decorated based on the attributes of the corresponding DNSKEY RR, as described in the following entries.

DNSKEY with SEP bit

A gray fill indicates that the Secure Entry Point (SEP) bit is set in the flags field of the DNSKEY RR.

This bit is typically used to designate a DNSKEY for usage as a key signing key (KSK), a DNSKEY that is used to sign the DNSKEY RRset of a zone, providing a secure entry point into a zone via DS RRs or a trust anchor at the resolver.

DNSKEY with revoke bit

A thick border indicates that the revoke bit is set in the flags field of the DNSKEY RR.

Resolvers which implement the trust anchor rollover procedures documented in RFC 5011 recognize the revoke bit as a signal that the DNSKEY should no longer be used as a trust anchor by the resolver. For a DNSKEY to be properly revoked, it must also be self-signing (i.e., used to sign the DNSKEY RRset), which proves that the revocation was made by a party that has access to the private key.

DNSKEY designated as trust anchor

A double border indicates that the DNSKEY has been designated as a trust anchor.

A trust anchor must be self-signing (i.e., used to sign the DNSKEY RRset).

DS RRs

DS

DS (delegation signer) RRs exist in the parent of a signed zone to establish a SEP into the zone. Each DS RR specifies an algorithm and key tag corresponding to a DNSKEY RR in the signed zone and includes a cryptographic hash of that DNSKEY RR.

In DNSViz DS RRs with the same DNSKEY algorithm and key tag are typically displayed as a single node since they usually correspond to the same DNSKEY RR with different digest algorithms. The DS for example.com has algorithm 8 and key tag 12345, and maps to the corresponding DNSKEY RR with digest algorithms 1 (SHA1) and 2 (SHA-256).

In this example, the blue color of the arrow pointing from DS to DNSKEY indicates that the digest contained in each of the DS RRs is valid, and corresponds to an existing DNSKEY in example.com. However, other circumstances may exist, which are shown in the following entries.

DS with invalid digest

A solid red line from DS to DNSKEY indicates that a DNSKEY exists matching the algorithm and key tag of the DS RR, but the digest of the DNSKEY in the DS RR does not match.

DS with no matching DNSKEY

A dashed gray line from DS to a DNSKEY with a dashed gray border indicates that no DNSKEY matching the algorithm and key tag of the DS RR exists in the child zone.

Extraneous DS RRs in a parent zone do not, in and of themselves, constitute an error. For example, sometimes they are deliberately pre-published before their corresponding DNSKEYs, as part of a key rollover. However, for every DNSSEC algorithm in the DS RRset for the child zone, a matching DNSKEY must be used to sign the DNSKEY RRset in the child zone, as per RFC 4035.

DS matching DNSKEY prior to its revocation

A special case of a DS with no matching DNSKEY is when the DS matched a DNSKEY prior to its revocation, but the ramifications are the same as if it didn't match any DNSKEY. The line is simply drawn to help identify the cause of the otherwise non-existent DNSKEY.

In the example at the left the key tag of the DS records isn't actually 54321; rather, 54321 is the new key tag resulting from having set the revoke bit in the DNSKEY RR.

DS with unknown digest algorithm

When the algorithm and key tag of a DS RR match those of a DNSKEY RR, but the digest algorithm is unknown or unsupported, then the line between DS and DNSKEY is yellow. In the example at the left digest algorithm 19 is unknown.

DS with invalid digest

When the use of a DS corresponding to a DNSKEY is invalid, independent of the correctness of its digest, the line between DS and DNSKEY is red and dashed. An example scenario is when the DNSKEY has the revoke bit set, which is disallowed by RFC 5011.

NSEC/NSEC3 RRs

NSEC NSEC3

NSEC and NSEC3 RRs are used within DNSSEC to prove the legitimacy of a negative response (i.e., NXDOMAIN or NO DATA) using authenticated denial of existence or hashed authenticated denial of existence, respectively.

In DNSViz the NSEC or NSEC3 RR(s) returned by a server to authenticate a negative response are represented by a rectangular node with several compartments. The bottom compartment is labeled with either NSEC or NSEC3, depending on the type of record. Each compartment on the top row represents an NSEC or NSEC3 record in the set--there will be between one and three.

An edge extends from the NSEC or NSEC3 node to the corresponding negative response, as in the figure to the left. If the edge is solid blue, then the NSEC or NSEC3 RRs returned prove the validity of the negative response.

NSEC covering DS

A special case of NSEC/NSEC3 RRs is that in which they serve to prove the non-existence of Delegation Signer (DS) records. The proof of absence of DS records constitutes an insecure delegation, in which any trust at the parent zone does not propagate to the child zone.

The NSEC/NSEC3 proof involving DS records is graphically represented with an edge from the NSEC/NSEC3 node to the box representing the child zone.

NSEC3

The opt-out flag is set in NSEC3 RRs to indicate that their presence is only sufficient to prove insecure delegations (i.e., lack of DS records) and nothing more. Thus, a name error (NXDOMAIN) response, for example, cannot be securely proven when the NSEC3 uses opt-out.

NSEC3 records with the opt-out flag set are colored with a gray background.

Invalid NSEC

A solid red edge from the NSEC or NSEC3 node to the negative response indicates that the NSEC or NSEC3 RRs included in in the response do not prove the validity of the negative response.

RRSIGs

RRSIG

Each RRSIG RR contains the cryptographic signature made by a DNSKEY over an RRset. Using the DNSKEY with the same algorithm and key tag as the RRSIG, the RRset which was signed, and the RRSIG itself, a resolver may determine the correctness of the signature and authenticate the RRset.

In DNSViz RRSIGs are represented as directed edges from the DNSKEY that made the signature to the RRset that was signed. The edges in the example denote RRSIGs made by the example.com DNSKEY with algorithm 8 and key tag 12345, which cover the example.com/A RRset.

RRSIG with bogus signature

A solid red edge indicates an RRSIG in which the cryptographic signature is invalid.

Expired or premature RRSIG

A solid purple edge indicates that an RRSIG is invalid because it is outside its validity period, as defined by the inception and expiration date fields in the RRSIG RR.

RRSIG with no matching DNSKEY

A dashed gray line stemming from a DNSKEY with a dashed gray border indicates that no DNSKEY matching the algorithm and key tag of the RRSIG RR could be found in the DNSKEY RRset (or the DNSKEY RRset could not be retrieved).

Extraneous RRSIG RRs do not, in and of themselves, constitute an error. For example, sometimes they are deliberately pre-published before their corresponding DNSKEYs, as part of an algorithm rollover. However, every RRset must be covered by RRSIGs for every algorithm in the DNSKEY RRset, as per RFC 4035.

RRSIG matching DNSKEY prior to its revocation

A special case of an RRSIG with no matching DNSKEY is when the RRSIG matched a DNSKEY prior to its revocation, but the ramifications are the same as if it didn't match any DNSKEY. The line is simply drawn to help identify the cause of the otherwise non-existent DNSKEY.

In the example at the left the key tag of the RRSIG RR isn't actually 12345; rather, 12345 is the new key tag resulting from having set the revoke bit in the DNSKEY RR.

DNSKEY with unknown algorithm

When the algorithm and key tag of an RRSIG RR match those of a DNSKEY RR, but the cryptographic algorithm associated with the RRSIG is unknown or unsupported, then the line stemming from the DNSKEY is yellow. In the example at the left algorithm 22 is unknown.

Invalid DS

When an RRSIG is invalid, independent of the correctness of its temporal validity period and its cryptographic signature, the line stemming from the DNSKEY is red and dashed. Example scenarios might be when the DNSKEY has the revoke bit set or when the signer field in the RRSIG RR does not match the name of the zone apex. Such scenarios are disallowed by RFCs 5011 and 4035, respectively.

RRSIG covering a DNSKEY RRset

Just like other RRsets, a DNSKEY RRset is signed as an RRset, which comprises all the collective DNSKEY RRs at the zone apex. Because each DNSKEY RR is represented as a node in DNSViz, a single RRSIG covering the DNSKEY RRset is represented by edges drawn from the node representing the signing DNSKEY to the nodes representing every DNSKEY RR in the set.

In the example at the left, the example.com/DNSKEY RRset is comprised of the three DNSKEY nodes shown, and the blue edges going to each of them collectively represent a single RRSIG corresponding to the key with algorithm 8 and key tag 54321.

RRSIG covering a DNSKEY RRset, with redundant edges

In some DNSSEC implementations, multiple DNSKEYs sign the DNSKEY RRset, even though only a subset are designated to provide secure entry into the zone (e.g., via matching DS records in the parent zone). While there is nothing inherently wrong with this configuration, graphically representing such scenarios can be visually complex because of the cycles and redundancy created in the graph.

RRSIG covering a DNSKEY RRset, with redundant edges pruned

In order to represent trust propagation in a simplified fashion, eliminating graphic redundancies, DNSViz exhibits the following behavior. For every DNSKEY signing the DNSKEY RRset, a self-directed edge is added to the node, indicating that the DNSKEY is self-signing. Additionally, if the DNSKEY is designated as a (SEP) into the zone, then edges are drawn from its node to nodes representing all other DNSKEY RRs in the DNSKEY RRset.

If there is no true SEP, (e.g., no DS RRs in the parent zone), then SEP(s) are inferred based on their signing role (e.g., signing DNSKEY RRset or other RRsets) and properties (e.g., SEP bit).

RRSIG covering a DS RRset

Like the DNSKEY RRset, a single DS RRset might be represented as several different nodes. As such a single RRSIG covering the DS RRset is represented by edges drawn from the node representing the signing DNSKEY to the nodes representing every DS RR in the set.

In the example at the left, the example.com/DS RRset is comprised of both DS nodes shown, and the blue edges going to both of them collectively represent a single RRSIG corresponding to the key with algorithm 8 and key tag 12345.

RRSIGs covering NSEC RRsets

Because an NSEC or NSEC3 node represents one or more RRsets and at least one RRSIG per RRset is anticipated, multiple RRSIG edges will be drawn from DNSKEY to NSEC or NSEC3 nodes, each pointing to the respective compartment corresponding to the NSEC or NSEC3 record.

CDNSKEY/CDS RRs

CDNSKEY/CDS RRsets

CDNSKEY and CDS records are used by the child zone to signal to its parent the DS record(s) that it desires to be published, per RFC 7344. The CDNSKEY and CDS RRsets are represented as any other RRset (see RRsets); that is, there is a single node per RRset, as opposed to a single node per CDNSKEY or CDS record.

CDNSKEY and CDS RRsets are mapped to the DNSKEYs they correspond to with a gray edge from the RRset to the DNSKEY node.

CDNSKEY/CDS RRsets referencing a non-existent DNSKEY

If the DNSKEY referenced does not exist, the DNSKEY node is represented with a dashed gray border (see DNSKEY RRs and DS RRs).

Wildcards

Wildcard

When the RRSIG covering an RRset has a labels field with value greater than the number of labels in the name, it is indicative that the resulting RRset was formed by a wildcard expansion. The server must additionally include an NSEC or NSEC3 proof that the name to which the wildcard is expanded does not exist.

DNSViz represents wildcards by displaying both the wildcard RRset and the NSEC or NSEC3 proof. In the example at the left, the RRset foobar.example.com resulted from the wildcard expansion of *.example.com.

Node Status

Secure nodes

Beginning at the DNSKEYs designated as trust anchors, DNSViz traverses the nodes and edges in the graph to classify each node as having one of three DNSSEC statuses, depending on the status of the RRset which it represents: secure, bogus, or insecure. In DNSViz, node status is indicated by the color of the nodes (Note that there isn't always a one-to-one mapping between node and RRset, but the node status will be consistent among all nodes comprising an RRset. An example is the DNSKEY nodes for a zone, which all have the same status even though the DNSKEY RRset is split among different nodes).

Nodes with blue outline indicate that they are secure, that there is an unbroken chain of trust from anchor to RRset.

Bogus nodes

Nodes with red outline indicate that they are bogus, that the chain of trust from an anchor has been broken.

NSEC nodes that are partially bogus

Because the NSEC and NSEC3 nodes often represent multiple NSEC or NSEC3 RRs, it is possible that a proper subset of the RRs are secure, while others in the set are not (e.g., missing or expired RRSIG). In this case, the outline of the compartments representing secure NSEC or NSEC3 RRs will be colored blue, while the others will be red. Because the status of the collective set of NSEC and NSEC3 RRs is dependent on the status of all the individual NSEC and NSEC3 RRs, the greater node is only colored blue if all the compartments are colored blue.

Insecure nodes

Nodes with black outline indicate that they are insecure, that no chain of trust exists; if any anchors exist then an insecure delegation is demonstrated to prove that no chain should exist from the anchors. This is equivalent to DNS without DNSSEC.

Warnings and Errors

Nodes with warnings

If one or more warnings are detected with the data represented by a node in the graph, then a warning icon is displayed in the node.

Edges with warnings

Similarly, the warning icon is displayed alongside edges whose represented data has warnings.

Zone with warnings

The warning icon is also displayed within the cluster representing a zone if there are zone-related warnings.

Nodes with errors

If one or more errors (more severe than warnings) are detected with the data represented by a node in the graph, then an error icon is displayed in the node.

Edges with errors

Similarly, the error icon is displayed alongside edges whose represented data has errors.

Zone with errors

The error icon is also displayed within the cluster representing a zone if there are zone-related errors.

Response warning

A warning icon with an italicized label denotes a warning for a response that isn't represented elsewhere in the graph, such as a referral with the authoritative answer flag set.

Response error

An error icon with an italicized label denotes a response error, e.g., due to timeout, malformed response, or invalid RCODE.

././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3905313 dnsviz-0.11.1/doc/images/0000755000175000017500000000000015001473074014154 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/images/error.png0000644000175000017500000000214115001472663016014 0ustar00caseycasey‰PNG  IHDR5¿7¶sBIT|dˆ pHYsééT’KtEXtSoftwarewww.inkscape.org›î<ÞIDATH‰­”]hSgÇïÉÉñ$kÒ˜4¦IÚ$˜YÓ¤v›ÌJ„uØ)lŒ±•2:w3¢¦“é@ÝÍv3˜»q0Pƒ  'Þ ¤c‚È”êè…Ȱ~ÁØðB:1Óœ÷cMkÒªÓÍç¼ç}Þÿïù?çáÀ\gV¬8~iÍšß?^¿þÅ'9g=nâÑTj¨”ɼÖ&îܯV«Ñ§ 1 žÏd[BX«ÂáöäÔÔ'€xj³Ù}]™ÙµcÛ¬»yójµš*ƒèËåÞo.YiÍòPÈ]~þüW€ïC^èéù: µˆT kp%%JJVÕjå¶o_Ç¿´í‘C®›íËf_7€Ö½u+·7oÆ$H)ét»÷ìÙ#•JÅùÏžž‚‹9Z)ôÊ•ôŒìîf¼§OJ<)YY¯ç"·n½ý(7…]²dhY*õœçyHc0[¶055…RŠ_ïÞÅ.•RâZ–U˜˜øtïÞ½‘'‚}™Ì´¶¤”Ø7Ò^,’N§(‹Dvì@*…çy´Ž''<ÌÍ!Ç»º$#‘n)%Êuyft€R©@?n©Dpp©h-Š/¾·{÷îÜcAB ”JI¥ð¤$02‚ÕÞ@¹\&ÓÛÛ @¢ZE[²^')e`ÉéÓ_ò€‘^éËå¾ aéyÓ6<<··zõjðùftœtšØÈR)””ä¯^}iÏÎåùmkrÝl¾³óÕÙÉ oÛ†ðûçöK¥…B¡¥¨t¥‚X¼OJB÷îù“ããG»9§ÅÚž\î§X[[Z)…¿X$¶kˆûE…ÃaÇaéÒ¥÷«t„ãðçÉ“ P«E—oÚôÛÏ.L.pòm4úJw4ÚïÕëH)‰µjµùüÂßUzt”`¡€‚EÆXÉS§>¯T*¡ˆ‘O$k­-©Á¡!ܾ¾bår™|>ϱcÇZÞ ÛfÙ¾}h@ Abz:½~ýC߯8ÚÑñQ4˜YŸx¥² µæÆc¸råÊ‚ýĆ ,^»5£,R;ÆÆÆºìƒx6«J)QJa|>þ:sáº` ¦)¾Û¿ŸÉË—y+›å'0 Àì]φDêõ`üܹ/€7ÄñÎÎï—Åão*¥˜ ­iY7Â4tCX¦y=ïù¶m{—†‡×Úñ`ðe¯áBÏž…i=wp¾x³¨irb€€”þ¶kרק§?sC¡wë¶2ÆÌ$XF”1à÷cÕ˜´æ™f'ÆaŒhn´í»g2¿ü;®8•HIEND®B`‚././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/images/logo-16x16.png0000644000175000017500000000111715001472663016410 0ustar00caseycasey‰PNG  IHDRóÿasBIT|dˆ pHYs&&lU¶ìtEXtSoftwarewww.inkscape.org›î<ÌIDAT8¥Ó¿k“qÇñ÷Ý÷É5JE°´(Zº¨uÁ"8‰%•VâÏMbE„‚ˆÔA°)¦jˆÒÅ?@°SAhiAEÐâ/ÒhÓçÉsÕA2äo¿×Ý}àà?K¸p;AmÃvLëÜ·Ž×¢'dQ½ ö$ÄdbwË G§'I%v³¸|±Ç„ fÏI$4Xm!ðܲiÕaZ Ì~þ~¨|«=ÖÆMUìúT%¹CA>¡ñý­ÿÔ/äA©VÛSápIEND®B`‚././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/images/logo-220x100.png0000644000175000017500000002316115001472663016542 0ustar00caseycasey‰PNG  IHDRÜdíÃ^™sBIT|dˆ pHYs×™ {tEXtSoftwarewww.inkscape.org›î< IDATxœíw˜TÕùÇ?ï-ì²t5¢QTÔĆ%£F#ŠQQ5²‰4Ë. , ‹Æ\c¶Š¢„eC\쨱kDFD£Ø;E)ë¶™óþþ˜-Sî½sgvváù9ŸçY˜3§½wæ~ç=÷TÈ!C† 2dÈ!C† 2dÈ!C†m‰lk2dpe\m/„ í…Ò!¬Mˆ®ƒÐj¾Ë[˲Âж632‚˰}1æoýÈ ŒÎãu*ß!<„¡š;F=ÖU&v„Œà2lØ+³øì³2T¦ tO¡„WÇü‘ÿI»mi$#¸ Ûž‹k hjz8&:BêA_AXlÄЀ¥=PÙ8Ø;¦¤fÐsY0ziXÁeØöW?rzÄ;Ï€Ì&Ø´‚Eç7¸æ÷˜Ðˆ^ŠÒ3ü¦ÁÏ‚ÑÏv®Ñ©‘Üÿ7ìÇw'èX}±´†f0›±ØDH6XÿvaÓ¶6³âÅǃõd8 AT/dá¨;’*c\uBÔ"òK”—Y8êé65¸ nþ{¿ `@L'P0™âƒAmDÙB(çsÆï³.™Ò’bÖwÃjþ‰¯´j­aâ!Ÿ§­îÙ/äÑ(G»Æ·},Æð§£ŸJ¹[-¬•Gb8 äpàp”~ áxmMnFu ð_Ð—É ýûä÷S¶¡£/™Lm±ï¯,õ§”Ê9ïÎÞ²?Dè (VNænJŸ¡é!Ë5Æ’Û1fP›&µåKk•¨jtúȰ¶þã”WÂqYMpËê­¨¼ æeTŸ¢{Áœ¿§{"²‚ga¸>Þ&‡°˜—±õ(l1i©;¨»ÇÕÓZ½Õnò’.ß~¡/4] OÃÈ®®õ8‡³A-#iÀ´Ç_Á2·“µ©ºë½ŸöŠøÝÿ0åb·‰¢šu@o@hõ¶;ÁY®1qïE‰I‡#¿hï´`Ž&€,§në:nzëNn^}H*E¤cv´)"lô ^×á:#qª‡–°F†“ ¶6À5OO‚¦@¯Ý5ª¬¨r}†DÇÈšz¿KÙŠ³S»àQ^‹]MQÍ€”Ê)^R†°O¸LYÇø, Ö¥wÁ‹˜<âR^kÚTÏÄ^çÆ7à¦ÿì›ÜåÄÚQ¯“¢Âz•ÏïÔ¡úZil­À¥ÞØÞös»ðöžFMh/WñD +6œ@xèÀ2¦­¨¥ôIX—:=sk€ps^€èëŒ]r1jýyþ¢ÅR¼ä~émïYf&H’¿f]ƒ{“2Ò\Çæ¢C\¢p2y•ß r2sß¼šU&ßÜ ÑÞ¤¨×±¹¥} « “\=.x7ë’ópö3ûCðqÐ]ü•Ûò"ªxõŸVu8Vî”>6”Š_áÏÈ™SXOQÍYX<nñÐåf67M§xÉ#Ï‚µXOHëÉ2½1ÖˆŒr2ðó¨ò„e|—{C§ÚÜRðpn^+6ÜÁ¼á´9¨Î¤ÏSþNò¿¸‰¼K¬•Q”¿4$ézÜ+Oà]|píÓ{BèÉ8±ù÷Z-ßE‚¼ñi÷à ­¤ì‰~I^xò,ùoTŽFy«í½pçÇ”[Qó/Ô¼…¥k1ò*è=(W%65ÀtvÝuÄö<ÝËã.Yñ¸‰)"¯kZ¼á¸“Ém|ŠªU;ø¾2ÓZ K=΄[°Wçø®Ç ?‚H¤9{y>!óÊÎîñQO[Ú¤…·/&X ÚùÃG F¼I î0DÎ}3‰œßƒÜ z F]…=$©~ô®Æ½I ñ¿ÄI5=ò&Ln¿!“x„ÛWÇ…ƒ¿÷´;åzø)ù[K€ë|Õá]yôMìö¤à/(ù*+Q=Ži}äU=…ÒGFQAµƒ;Ƽ ›EÀ"Šª÷N@8‘ÝÅì‘о›­Õ¨µ1o¡¼BÏܧ˜SXßé¶¥ g8ŸâI˜Ö#oòÂ;œºÀíÀhW»£ K­Õ«˜ñÊ?(;üõxUŸH.\õôžhèrï¼qâyÕûëm,>§™ d‡ò0Òe,ŽÀp2°[RÂf`×ÞÕ¥C G¿´^]^u†"§M2©¸Ëlè¼=ø^ø8qZú€öñW®WXG1ëÕ‡˜ô³»¼ 7-¡¤…—ÁS½ëðÀÑKS»0ÑÖï'¡×ÚˆrÓO|À£Äg€… ¯ °wÏ(³€} ϰ+ßçŸüÍÛè ‰ð\Äï}£näâúªíλ±ië¾Xzè0SPÍu)×=,ÜÄœ×W0áÐï¼+ôºQ5-u3žÿeGßãëÚœêöª×Ko¶maôwÞå´½ØLHŽaÆÐ·}™îP¨fêCÏCà)”ñõ8ÔkØ&‚«¬¬Ü)¨r†¢‡òÓUã³zZiéó]mK:ðî¥wr$QÛù{6pÅAo2~Ð|Ær&ÙìŽ0‹ðt£õD…wÀ„¦úª³í&×{=ê‰J+×Sþ\jãQ‰êõúðBÇîÚ?î¦Wœlü“o±E2ó´PF†â;Sê…_a¯ì–t=¤®®n½@1ÈEŠ-p‹e[»ÚŽtá.8£¸ôÆß¨©rÑÁß0þ„#ÐÈf©/á÷ìµ4y“Þ Z×$a õÄ„½>>1:æ%6¯B¨!õÎŒòSžCy¬½\OáucóVsSÓˆmÛÃDZ,˜?uê¤7ºÚŽtáÏÃy / ÓÇò:„ŽÄè‡íå&^jƺjÜó&%tcþÖëT°N ,ÿ0Ôc¯¼&VmÎû’â%ë€ÕÀ*D—³ëÚ—°íôL<ïd|ôR¶ýÓîDá?ìm欺•ÊxA8Še³W¼îZf"Ñúžp$ÙÏó|_KÂrc숦©-Þ+¯’CÙc?g/û¶+–¼ü÷¨«k¿.0yû¶¾èÚ”ëñCQÍ„«Àœ ’ùùll°±9Â÷lÿ–¿¡¨Lå³}ÖRT=“ÝÖÞ±½ / N³‡keÓ–›èÙãràÇqõ:Ùdô4œù«•Í¡,?žH™ýüýØGãïB¼Dº?²nRbû]EÛïý\‰Ò!&ÿÂCÁ°?0á!¶Öm¤ä¡g)Yn3ù¡c·*;±ý]„1¶§·rãøÆ”Ê™?rážKºñÉ»§Áº´ã=ðw“·ÐÙ.\ÇÛqMB7ñ˜P¼‡ ¹ä¿1ÃØG½z]t|LZm{yUxš—ý.yã„çÂu'½…˜ë{8O×— úK”?côz|¹‘’f“˜øÀ¡ØvâñØÎ#b+9-åRÆýý§ áTC˜¦í²ãħ‡ó!¼t#¼£ºØdpÞÁx"íïµµô®õ¶åÕ|BæÆÄá"´È°ëwœüÛÕ;&ž{Ú Ã©^cÓ¡ß0áÁ&Ü?´ËÅg¸›öÙ#(^²ˆóîìTÅÕ'bÌJh—kÝ﹟å6ÄãÃušƒð:ËÃÝà)H›,Éw.Ä#¯“§?¬c.q­7Ú{œÊÕOžé~ž'*ìÁ¼ÁÍäÖŸ„ò‚³Í^k¸˜(³é÷¹ü¾QtÅ¢S€;G½ Rñι²?¤xIüýH×g±?.Þ‰âê1/Y ò°3@Ž¥õ¹ól·)z¸Ø·PÙâË3)2‚ %ð.­ÿÅŒ“ÚCž]꘷5S[œÜàÚ!Ñ.Loá%Â>ó;BÖ¯AÿámSZ„ª{ÕŒ¿ÿ%®¸ç°Ä¦#§³hµ"¼½Â$,óŸ~VGñ’ÿR\ó$ÅK§xÉó/ù‚ õ5È߀ã"Jútô.ß>uÍO¿žf·xꊊ +YºˆÛä%òpåÞËluxÌ+ˆËo\Ò: /Õ‰ ›\ó¶£ºÆØ.…$€ß¯ê¤:ʇƒðÐO»Dx¢?ÇÈ‹\vO Š‹²`T èIÀ«ÑQ’z<08Šð,“ÈDõÀ\¬œƒNÚçß*duË/˜_^Y9Þ ÿp­¶¨¶/ÔìEqÍϸ f/Šjû¦÷ºâIÃòœNóp!ÇzÅ#îíuW¡EÞl1ØC¾Âp¥¯¼ª—3í‰A ëm}3ö&O†òaË©—}P‡êû] ¼„ÛoIr†¦È‚ѳ`Ô`Ðcë·pÝë[6‚\L({ F]μÂM………¡v?8@Un9löìÙa!Õ `lõDŠk£hÉz¤i–®]…¥k‘¦ -YÏØ%+(^rç/unê©M^öòéBµO¼ áÈDÄyO‹¸çL—°kÖÝÆþ;œ‡2Ø3¯’…Å­Øö1Ñóø„åxÃ'ÉÃùØöB¶þìX„ߣüeÇpÑ‘ÇÖ“ ìšW+÷'Ì=»6E«“#|Gø0Žáµ9ôlÜýÔ·þ¶W¶÷÷¯{N£©y]Ëà·#k?ú¤HÚwô²>ª“s(®>Ì9¨Xá{Æ%³Ðå×À¯±‚UŒ]òwÄ\ż1Ÿ¤ãÒ|x8bÄÖU޾žž5Êó˜õñDLMk}áäܦº.+ !r­ƒŽžµ-|MG]c¿w½Ñ‰’Ƕ U§ÿ‹ÊÓ/âãúþH訹x &öpQ^:þ;ŽN+æqÅ}{¤npŠ,+lbáè÷íS·vPAý×Ìõ_/±d‰¾„H ÊÃmø¦1{ÈHèû]õ+ÂMØg[þž'+@ÞdlõoÒq9©M^îgúEdõŸƒðœâpóˆ~ox{È*®~ê6”K\󶆅rÊy€ÃÖŧM·£„·Mx¹åϦdEwBõG~æŠr Éy¸Ø°ö¢9X¦ÉêNcòäÉooRTý¶%rÒŽ¹ÁÖ½a j݇è?М§XX¸1.óùµ;h:eÂi€…j/»[3Œù#ŸèˆmÞ{š´ýç!¼NÓìé-´ˆ§Îe¤¡Y׺’ùîvö(·/’UœÐÖãCxé&¼rà‰–?¸âžþk(¢CÃËÚ7EáÍEµ?áÖÂw;Çðx***v6Xï(ôefEU‘*•M)¹Í3ãðÚ¬æ[jÖ×Y€¬B¬óX0bµg¾; ×wwqAõÁX,d£ÜƸÛ÷kÙÎ/%:4ë:ÕÁqõ87ëé—ÿ^\~ã#o[BÊOÜ„‘‰´ãÝÍ.eãžÖ!oWpýï¾dî™ÕÜpÖ¹ôiîrð˜«mÁ¸°¿ï"«(--ýJUÛÏ P^¸çîófìÕxª{´äyŸœì!ÌO ¶Xîýõr ÒzªÄäwèܹß$^g`¯ì áÝ3Ù6k¸p°Ë/N¢¼šPoL?a)è Å£p+¡@¶wÚ.[,vasÏz€¹g„È1À꤄'ÛÅÓ·wÏë÷EåÒÂB[™« h{-¬à–ÂÔ6ªµeE{¹Ö)•Ó‚ÿ—½š–é&'÷÷¨Év¶!&¬–ûvi‰ò&CH.Á2o¹î^JA9¥¤m×ï´Ñص9lÊÿ±/{êô+æîoj7n8ë9JA}Þ"DÎNhcXtu¨N¿Œ»=-8 Õƒ/ü7{ÕG¾Ú¯ !wág}. xɨü—€¼Â¼︔Ð~ȤpçÝù×D-Ž\°t$tVÛçý/ù‹iÇÿÔ.Ϧe©­  ¦$ÚE¾ŽóR+Ý óááü2cèÿ€rçfiŒ‡SäÞ„õðpu»еX­f-ùZ -šžÎ‹ª1uì¨#Q}'¡W³C§Î·,ª9–±5÷ê¾ÕÇ*„‹_Ü”ÌÒ/{ïRLFt1Ƭ¡¨ú=ÆÖ\ÅE5Ñû­lÎy du‹ùý d¿Dqõ‰IÙ2vÉ)ˆy m««o²Ë¯täò’ôp±áNðpïí:Õ½}Õ«4¢¡' 2.ÞÅ¥\?dgÏ ©iÊÞ®e¹zˆDi´ÎÕæØrîídâÌÊÊ? ²€ö5f>øÀuÝYº.r±´£‡³øäÐîÀ–Ö·+***ÖJ…dŠoœZZ2€óîÜ™ì쨞Š7N¦ ߇t.²7ª×d2ck.gþÈE@¸çvÜâ"Ôz %aÈc׬BÌR4ð›²ÞaYÄöíÃksèݼ?ª'>¹ç°¶–ŠÈV ¸£{¦¸ θ4;Ú4óÂ~¡/„fy×%¼G°‡8ï¾ïf] öÛC˜öø¥zÔ½ÜDaz»×mdK· ù½¸O9ÊÑF•IJlí ÈXŠæ5€¾¥1jÍšùš´Ø@8€âÅ{Bà9Tw‰‰Õç@Ö ²hÀÒÞ¨ì:eÒr­JOÐ;[}óGO`Þ˜—)®>‘¥(»¶\Ë`Tƒž!Šj6"º(@û¡?MPåSÄüù£;äÝ éÓs"ÂévnöÊnd…îÁ°KTžÂ3Þ[oûò.Iþ`]wâ Ê­E)LÎÃņ<œ]ØÄÄåƒô!Ú_qé»pÓo£Ö}‰Èšâd„ JŒÐpžnaÙ¢õø@S9]éÇðÚ4/‡V±©AeaNÀš};_ô ¤§‹È¯vBɾøÕ{׳ÆÍû`§¡ÀŸm=n¸„¢š7Y82¼Aî‚ÑÏrQÍA4kÈE í“ÍE ;BË,‰›~²¸™PS9 ÏO°­¾?ü |{ / «8ì•;•U‹rœ?¡Ê;0d¹wÁ>ÊJ¥²&`…Nÿªº”ë[x±&›×®iÛ`] ü12» Ê]VVx/UýáÁo/>‘%¯mévÜó›{áPOLØŠë4¨¯ßüZn^÷é–eeµÔ{›®… ÷•Q}„ÞÍÃQ=a3Êi3~òõ΂>‚aÏ6ÄÞ~"Å{e‡šËòåÝŸXÝN¼ù^ÓóI‹¿@ÄñZ·Žü(å‚…×È9•aÀÁ „·ãk+¹x •7&'çÑ”{7]pWKå+o :ÈUhíáuL=2µ³±íÕ9doƒ‘k€]î&a >Î}›ï«Ÿ¸•ëóF†Uïæº_OÚö)^z½ws1¡ð˜õ›è³¬'<8ôö„yµåCR=÷°“‰UUU‚†µ@|3©-7~ºóþŸ³×o ››™7Òuƒ{îܞݚ6¹W§·N-|q[°¨æzDÃÇr‰Nš¹Ï×ùÀµö:•¹îK“3ø†÷û½DëJ‚`s_½’ãnÏ'PK·ì†®8g®ã.‹ w ;ûP C`c!Fx‹Ë1îEþ|ÜϺ ÎÏpNáTø`óMìYp.ph[Y¾›.¼Ÿ 5—6á™W€{¸ìÞÜxÖ½±E•””|<£²j‰(®óë%5ëI Àó¹­m"¸>êV@nS“çùŠÞóVä˜ZHD7«Šâå âÊ”ï i#¨iË&Ýý;<ïÂïŽ ¯$Ç3œñ@{xf¼°1.>>¯½Ã_¤ÆÆ%n$¤ùÚÞÛû¨CzcYaˆ’‡/Ââ4rx%IáÅRuÖ7\qÿRà<Ÿ3ÿsAïá²»k®bîÙѳnT–ƒž[*¬©ÏãÁ ½©¶|/‘‘N¯ák¤nˆ‘ý<>Øï¿ÿþ_Ño™g¡e&Š=eÍ®Ãfì÷ùPQ©~æU°EEþöa^3ï³+@[:Fä–)ZÛIx8×°>lÑOÚd¼˜sX´”kOâô”èU§¾Ìä‡çôÑ|ô^,z Êï–3| ¯e8—Ô¾ò0ª¯ï/Zzñœ¾_lª7^u!‹ïB>iÈaõ÷ùlhŠiizu˜„Ç^o`¾÷ƒ<ƒ°w¸çËÀº´l펯£ò,˜UX¬G­z„žÝ‹Q(‹ âÇ>LóG9Ô³}à1Gt«m[ O¨aõ†?&uRŒ›Q%tð쇙§}KÉC% Õþ=$œOùyp&»dŸ¸«>A=~óº§5)¢zŒçù¶m燸D×gÑz] óFɸÚÁ˜æ¹ £ ?ˆ p¢‡µÿp¶Ø+ÄvÊW`cáˆ=×Û–Ô&/ÓV¯p2iš[‘qÈLì!£[Ö}%A¢z´Ãz êÔ”•QåÆÖNIJÂM9g£ÚÞ|N¼P´=ì˜6A^÷´“X0âñD&çåõ„ûpÀŠÉ“'» v^á&Œ<±8,*ŽÃ/ \D¨yÏí]l®ÉËçñ¾ý#9>ù&‚×é9þ}¤ODaùÅ o ä8×ãNÔwëiß2iùPšïE9¦=kZ¼–Ÿ¼ŠÊ$ŽºÞÛЖä–ëþÙŠ¿ï0¼„æB†×^LÏÐ  Âb/”Þ Ýù#ëÀ¼E¶¾Æmc|žf´}àó.6ÜÉš1, 9ëÏÌ8¶½Mê]O:…7ëôw˜ð`Â4Gíh“¯r×c×e\‡0ÈÑÎ^øõW¨žÏÂQžÃ1eçÓœ“%ù.`YaȶíÕݺuÛ šû,€ˆæ©J}VVPsss¿?>ÅmÑ·! z)#^¤Ëkyç]j !nä¯'tüð?Ç›ÞŦ´ÐðW4÷À^ŽõÆ ÏgÝvaPÂÅËîE¨=Ò¿˜’^3pšc;nAàBmmmàƒ>q[+÷Ìĉ}—5³²r*ÓÝK"†ê©ð}C“Ψ¨|¦ñûüÓlû_³Af”WN‘q„Çó¥O’zàÝ€è¹-[<8â=ÛËñ_š‹ëAÿ‡è³Y‰µ~%×Fww/ïkS˜SXÏÄû/CyØý&ï€g½eø ÀQ\z×I¨\: ˜EÒáå[!*hÜðò–ÜÃRÙ¡jí'Ÿ&ÐË)N”¸AyOT®öHJùU^^ÃáÐúüìNeeå Ê̤ìHŽ<à RŒqKäÕKy!j¸n Eëè~KZ÷® –nÁ˜-Hà×™þé@ô^ 1Kê¥ý±)ضAQzŸfŸñî?‰èk¢W¸¥U_SãÄ´I“þ7³¢r È~­ï ¼h”Å¢ÁG¦Nš°]^Qõõœ®©¯¯[êUFFp?4L° ¸ ô¸òòòS¦LÙâž&šœîÝG›v& ±“•“"+ × ®‚KQ™®Â"…»²D+¼:7b™1cöÞŠ9Û+…UjÛ¶ç3ض<ˆ/Ã6`Ê”)ïBì³m9F²~L™–Šc³Oà­i“&uhÓ’’’´®XZ:éï˜àÀ²Ò’QɈ À ˜Éx-sî.-”pè#ãá~€¨êÝ"r€[¼ˆž Ü“D‘Ž‚S’ìtçs`—„© " $Ý[^>©_Ë‹âIDATg?%t¾G’õ¡¦,×5‚‘d<Ücq·gåTÛçÎ\-ó'tг0i™D,è6]j£Vhàz.º"—^yå_û)+#¸ WNžüè$;ååõø¹G|ùùùGà¼iÑG“'ONb•;Š•òÖ⥢bÖi('¹Å+Ð]§¡æÉ”ë—Ìž&ÚhhØúB·ü‚Ïq®qZÙ]W_Ÿ×©ç± N$)ÁåæL Ñ^—Ê-eS§zœSmÛÝró{œ*è(µ¸®eBAÁehöm3³¢ê^à2ÿ¹ô>¿{û§Š y꾉«oÁÍœ9û ÄLO˜Ðd-J”DU¥²²r¯ÖNB9´€>2‚Ë#ºÔRñ-8QYœnlÛ¶òòòö1@€Ó×­ç^`ÀŒªª¶³À·n]cÛö÷ΉÍM¸Ÿ_×J³••WT9ÅYŠöÙµ¼rÖ`õpÚ•Ýu?˜Œà2DQVRòRyå¬h;È“wëë·>™nòò –(œÓ¾(Ç}öVKºsÚòæ\Ü›®¢¢ân§ûD’­à±O‰™dÊÝ¢2&¢UU_ ŠÌ´m»3Öª”jFc9OÑRµöNÝœ$÷Y;ÁeˆC³¬Å$8ŒYEn*+´¨Sê‡Ô»ã:Ž%Z–>ã5Θ6”ÿщnÑžiáÿ'3+ªJQŽEÈz¶¼½xÛÂÜUZZÚi'͘1g/²‚~š´QX&°¹¾~ó*7¯kÛv·üüüýC"};neA Yß³ÙxÕ¤IÛæÄ¦ 2dÈ!C† 2dÈ!C† 2dðÅÿãHµ…5j(IEND®B`‚././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/images/logo-60x60.png0000644000175000017500000000404715001472663016413 0ustar00caseycasey‰PNG  IHDR22?ˆ±sBIT|dˆ pHYs¸¸W½ê„tEXtSoftwarewww.inkscape.org›î<¤IDAThí™{PT×Ç?¿»Ë"¨4Ñ T§Ú3vj’IÌ$­Vc¦ikÆ´ZHUQÛÚqF\¬N‡ÖÉ(ŠcÒ¦Ó®†Ç’ji5†ÄG´µÕ46£‚©%(4ø½÷×?®H§~f˜=÷ñ;|¿{ÎþÎïÜ ·¹Ímþ/góö89{f¦14\9‡pŒ¤Q'oš¯_•öB°‘ìŠû°Ì ßân¥\Ä`>6±-íXk ÛÈœJñíkQÜ€qÝ]Љàq^1ØÈèÑ«z„lÑw´oAYå?6Aw ¤¢:žf—Oš“f—ÃùYÐЀ‰ (+9sfÛ@š2ʧ#Tùÿ†Cž£0õD¯‘Ï{Çã£чPý žù¿ïC­=b€|Ýß¾Œ¯cFX& SOàˆš4Û'ºú Ð(»©$ n‹(:ºþ*H;"®[-. ŒÎi% \îØAzÉð°"3*‡Ñ:|;è]v¸õf_‰ 2½oOvjŠQ~ElÓ_ùé²À(Í©t1Ô÷ †5È&¢ß"éäLòò¬þ6Љ~³ c1c=ˆÌ ºj§Ý ˆAq]VÙN»¹²ô+ý¤9$Á bfÙ“@.ÈTB­',DöaY2Su't‰²°ò.œm_Ä’‰‰‡´}ãèåè õ GqZûy%½¡Ÿµþ÷äo(¨h ½ÑÓôùŸ"ôÔò³~ætÐ’Îc¶*'goŸ«º z4¢ª’¿qÓà„¹9+æô“®ˆéqjÉÒŠøw[b ®V5ÆýŒ%Þ„þ)Á#’—gðÑ„¯bXsP™: `LL;§[»*zàmT·ó™š×rìNÀHVÙc¨x€û"¯Á"‹­©{n½´È°dyç¡ê%0Õ.»CÀ¿ùÕx`,ð0ð40ðo®dÅ©¯ö¯ô`„ů݃Ïw$ hAe ŽË…=ßrèìÂX¬ÁÙÀ @,"> ’ÃÞôNLs¡ß„‰a}™¢ô?÷e›|‘¬òw€jTø4Èíc½7Ä’ì¦Ö‡e¢;Åi‡P9 €Èè[¬-" Ð÷즌&«ü{Egz—€Ž³:û„Ež¡Hôq„»ýg~‡Åz<©Õ z}ˆ YåS°Ä0 À€&Ëp£hns¿ªï†?kULIJªºÌ¨6©Eµ‘Xà^„GQîìê@¸°dLãÙ±.kžÛ½üŸ›7oŽ_¾|ù'cì­+ík20ößÚâ¡Ý\S0éüŸÅ/DyY…U¹+W<tkÞ'u§Ç#1¢±¸ˆ¯í4¯.¼e†¯¯µÒK†åxƒ©XúÄékX»M  À½òŠR.t†åç<«ÂNQG²»fäyLkð,¢“ý™ñÚÿè.,£”­)n­‘äo(¨v¯\ñØ ¯ççUqüxàðŘݕÇOC5&%Uˆ•AQúé°cºá¼™ kq»Ý—€)îõ/9Ø»óª©O£]yâ(Â.#ÀÔjC¹!•/!:$ ej¼KVŧTC#²næo š‰]–RÑu«rrÞ¾a@¦wèKö¾‡ÊRëÀÚSI1¦åsƒ<áï§VÆãM»h›)½q¬ÃÒYHk•ò!†üˆâ”’Ðõ]FÈ*û>*/ú{\½þsõµ¨l¤«¼I(n÷ÉÄ$T×Û¡’ƒ'µ è®ìÒ1˜2Ë_Ÿ%#r'àB¤Kk18‚ê4GWGšn¯Å 2Éo©nÝ„újTÂÙV¡|Ýøº§Vu ¸ÑÏ_w—½J¿ìÿëS Têüí‘$´[Þ†WAKrjD#hçÈé3•a`€¹ Á‡âldЛ¹Ç«¯˜ò ª®EÙÔ`¿^«EÙ¯ª/\Åz(÷Ĩ=ç:œoD!âÃ![ÒˆýCÌ(OA(#°@^ù5¢AõC0š°¬{F™ˆý¦JQD°e~é€8ðÈ(Ùc©tBøáZƒ82(NÙ×Ú""P¢¥ü‰¤÷#ú ¨×ÿ¦*rx ÕÙ$ÕLø4˜€Þö#é%ÃqÉpÄbj+QFC÷ÍÔ§‰ÿIÖ­U8ÔIEND®B`‚././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/images/warning.png0000644000175000017500000000212015001472663016325 0ustar00caseycasey‰PNG  IHDR5¿7¶sBIT|dˆ pHYsééT’KtEXtSoftwarewww.inkscape.org›î<ÍIDATH‰­•KlTeÇç»Ï™NÃcJ™fZúšÎÐRÈÔÁÐꢀiÄ`\(1LµiCbƒ€’ ‰Q‹îˆ DbL4„&F Á•¢&b4ÄÄ01Fº¨¶á™¤íÌý\LZ ´gs¿{ÏãwÎÿžäƒ°ËgÛ¿¸~qÇøñ#Ý[$OÝoà7'jv®^ßÑŽ×/ioõ?;xðàòE…d³¨ºTú˜X®Ó$7¬(_üñm@ Ò“ˆ¿±¤¶q¥ð‚‡áLѽñϧ÷ïÛ×´(‘,¡ºõ­ý¢n"L"x(<â)ÛÉÄ¿û0þ7¤imãG¡êPH1‰2ª0Ü®"HM²yÓhû+/ï}”{ȶ äó£N]m[b›pÁ#çïæÚ;PFa’HÈÛwØØ<ÞqhÿK[n—­òÕ»¬ih3{f7G‚oÖœ?™LÒØØXÒ”¹ü4Q´o²´:hnŠ]8900`ß’¨wÏØ9Kt­ºÁÙ^R0ÒÛÛ{›AìhÿÜk¦yju$tc÷¿§™ƒ\xÏxò¡ÔT’|&¡£·+‰çyD"‘yßÍe½¨`= WmM\>pà@U $›EÕ¯õ‰X¢ 6¸{»­¤ÖšL&C:æÔ©S¥QØ«†fI5™á˜ýÓ‘ÙiÀãq5¼4æÔø3~¾Uùê¼nµÖLLL0666ÏoTd0*;AÓ1ÙÒøëÞ¡¡¡8€9’%Tßjõ邃ΛhíRð΂”´¯@+ÐÂùO_ãÒ•kìÜ^C~ì“ùnÍkkîºß¿?ôÈϧís ™²mº`¢ Ågé}_wÓ<»ž›9ùCO—¹"ænÔ3΋kÿžWÅ‚Y°Âöo‡Ì«—¦ÊÊú¦ vEQÐÚ¦ÿ‘Akåûºdݾ(Ñb*Q¢Š?XD|1wã–‘Ÿ®=ÿ7#¤+J¬Ý¼SIEND®B`‚././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3905313 dnsviz-0.11.1/doc/man/0000755000175000017500000000000015001473074013462 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/man/dnsviz-graph.10000644000175000017500000006046115001472663016172 0ustar00caseycasey.\" .\" This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, .\" analysis, and visualization. .\" Created by Casey Deccio (casey@deccio.net) .\" .\" Copyright 2015-2016 VeriSign, Inc. .\" .\" Copyright 2016-2024 Casey Deccio .\" .\" DNSViz is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation; either version 2 of the License, or .\" (at your option) any later version. .\" .\" DNSViz is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License along .\" with DNSViz. If not, see . .\" .TH dnsviz-probe 1 "21 April 2025" "0.11.1" .SH NAME dnsviz-graph \- graph the assessment of diagnostic DNS queries .SH SYNOPSIS .B dnsviz \fBgraph\fR [ \fIoptions\fR ] [ \fIdomain_name...\fR ] .SH DESCRIPTION Process the results of diagnostic DNS queries previously performed, e.g., using \fBdnsviz-probe(1)\fR, to assess the health of the associated DNS deployments for one or more domain names specified. The results of this processing are presented in one of several graphical formats for user diagnostics. The source of the diagnostic query input is either a file specified with \fB-r\fR or standard input. Domain names to be processed may be passed either as command-line arguments, in a file (using the \fB-f\fR option), or simply implied using the diagnostic query input. The latter is the preferred methodology (and the simplest) and is useful, except in cases where the input contains diagnostic queries for multiple domain names, only a subset of which are to be processed. If \fB-f\fR is not used and no domain names are supplied on the command line, then the domain names to be processed are extracted from the diagnostic query input. If the \fB-f\fR option is used, then names may not be specified on the command line. The domain names passed as input are fully-qualified domain names, such as example.com, www.example.com, _443._tcp.example.com, 1.2.0.192.in-addr.arpa, or 8.b.d.0.1.0.0.2.ip6.arpa. Because it is implied that specified domain names are fully qualified, no trailing dot is necessary. The graphical output is the image of a directed graph created using \fBdot(1)\fR. The "html" format makes this image interactive using javascript libraries that are distributed with this software. .SH OPTIONS .TP .B -f, --names-file \fIfilename\fR Read names from a file (one name per line), instead of from command line. If this option is used, then names may not be specified on the command line. .TP .B -r, --input-file \fIfilename\fR Read diagnostic query input from the specified file, instead of from standard input. .TP .B -t, --trusted-keys-file \fIfilename\fR Use trusted keys from the specified file when processing diagnostic queries. This overrides the default behavior of using the installed keys for the root zone. The format of this file is master zone file format and should contain DNSKEY records that correspond to one more trusted keys for one or more DNS zones. This option may be used multiple times on the command line. .TP .B -a, --algorithms \fIalg\fR[,\fIalg...\fI] Support only the DNSSEC algorithms specified. If this option is used, any algorithms not specified will appear as "unsupported." The status of any RRSIG records corresponding to unsupported algorithms will be unknown. Additionally, when a zone has only DS records with unsupported algorithms, the zone is treated as "insecure", assuming the DS records are properly authenticated. .TP .B -d, --digest-algorithms \fIdigest_alg\fR[,\fIdigest_alg...\fI] Support only the DNSSEC digest algorithms specified. If this option is used, any digest algorithms not specified will appear as "unsupported." The status of any DS records corresponding to unsupported digest algorithms will be unknown. Additionally, when a zone has only DS records with unsupported digest algorithms, the zone is treated as "insecure", assuming the DS records are properly authenticated. .TP .B --ignore-rfc8624 Ignore errors associated with RFC 8624, DNSSEC algorithm implementation requirements. RFC 8624 designates some DNSSEC signing algorithms and some DS digest algorithms as prohibited ("MUST NOT") or not recommended for validation and/or signing. If this option is used, then no warnings will be issued, and the code will still assess their cryptographic status, rather than ignoring them. .TP .B --ignore-rfc9276 Ignore errors associated with RFC 9276, NSEC3 parameter settings. RFC 9276 specifies that if NSEC3 is used, the iterations count must be 0 and the salt length must be 0. If this option is used, then no warnings will be issued for NSEC3 records that violate this specification. .TP .B -C, --enforce-cookies Enforce DNS cookies strictly. Require a server to return a "BADCOOKIE" response when a query contains a COOKIE option with no server cookie or with an invalid server cookie. .TP .B -P, --allow-private Allow private IP addresses for authoritative DNS servers. By default, if the IP address corresponding to an authoritative server is in IP address space designated as "private", it is flagged as an error. However, there are some cases where this is allowed. For example, if the diagnostic queries are issued to servers in an experimental environment, this might be permissible. .TP .B --trust-cdnskey-cds Trust all CDNSKEY and CDS records, even if they are not "signed with a key that is represented in both the current DNSKEY and DS RRsets" (RFC 7344). This is allowed if "the Parent uses the CDS or CDNSKEY RRset for initial enrollment; in that case, the Parent validates the CDS/CDNSKEY through some other means" (RFC 7344). Because there is no way for DNSViz to discover the out-of-band means with which the parent might have validated the CDNSKEY and/or CDS records, this trust is signaled with the use of the \fB--trust-cdnskey-cds\fR command-line option. .TP .B --multi-signer Don't issue errors for missing KSKs with DS RRs. Typically an error is issued if a given DNSKEY is not found in the DNSKEY RRset returned by one or more servers. If \fB--multi-signer\fR is specified, then no error is issued, in the case that 1) the DNSKEY is not signing any non-DNSKEY RRsets (i.e., is a zone-signing key or ZSK) and 2) the DNSKEY corresponds to a DS record in the parent. This corresponds to the Model 2 use case in RFC 8901. .TP .B -R, --rr-types \fItype\fR[,\fItype...\fI] Process queries of only the specified type(s) (e.g., A, AAAA). The default is to process all types queried as part of the diagnostic input. .TP .B -e, --redundant-edges Do not remove redundant RRSIG edges from the graph. As described in \fB"RRSIGs"\fR, some edges representing RRSIGs made by KSKs are removed from the graph to reduce visual complexity. If this option is used, those edges are preserved. .TP .B -O, --derive-filename Save the output to a file, whose name is derived from the format (i.e., provided to \fB-T\fR) and the domain name. If this option is used when the diagnostic queries of multiple domain names are being processed, a file will be created for each domain name processed. .TP .B -o, --output-file \fIfilename\fR Write the output to the specified file instead of to standard output, which is the default. If this option is used when the diagnostic queries of multiple domain name are being processed, a single file (the one specified) will be created, which will contain the collective output for all domain names processed. .TP .B -T, --output-format \fIformat\fR Use the specified output format for the graph, selected from among the following: "dot", "png", "jpg", "svg", and "html". The default is "dot". .TP .B -h, --help Display the usage and exit. .SH OUTPUT The conventions used in the graphical format are described below. .SS Zones Nodes in DNSViz are clustered by the zone to which the represented information belongs. Each zone is labeled with the name of the zone origin and the time at which the zone was last analyzed. .SS Delegations Thick lines between zones denote delegations of namespace from one zone to another, as indicated by the presence of NS (name server) resource records (RRs) for the delegated namespace. The status of the delegation is reflected in its color and style of the edge. .IP insecure A black, solid line between zones indicates a standard, insecure delegation (i.e., sans DNSSEC). .IP lame If the designated name servers for a zone cannot not be properly resolved or if the servers do not properly respond to queries, then the delegation is considered lame and is represented by a dashed, yellow line. .IP incomplete If the delegation is incomplete, as indicated by the presence of NS records in the zone itself but not in its parent zone, then the delegation is represented by a dashed, yellow line. .IP secure If the delegation is secure by DNSSEC standards, then the delegation is represented by a solid, blue line. .IP bogus If the delegation is bogus by DNSSEC standards, then the delegation is represented by a dashed, red line. .SS RRsets Resource record sets (RRsets) returned in the response (usually in the answer section) are represented as rectangular nodes with rounded corners. Among the most common record types are SOA (start of authority), A (IPv4 address), AAAA (IPv6 address), MX (mail exchange), and CNAME (canonical name). RRsets that are specific to DNSSEC, such as the DNSKEY, DS, RRSIG, NSEC and NSEC3 RR types, are represented as other node types, as specified elsewhere in this guide. .SS Aliases Aliases resulting from CNAME RRs are represented by a black edge from one RRset (with the alias name) to another (with the canonical name). .SS DNAME A DNAME RR is used to alias an entire namespace into another. DNAME responses include synthesized CNAME RRs for the aliasing directed by the DNAME RR. DNAME records are shown in DNSViz with their respective CNAME records. The status of the CNAME synthesis is reflected color of the edge. .IP valid A solid, blue line between DNAME node and CNAME node indicates that the DNAME expansion was valid. .IP invalid A solid, red line between DNAME node and CNAME node indicates that the DNAME expansion was invalid. .SS Negative Responses If the response to a query is a name error (NXDOMAIN), this negative response is represented by a rectangular node with diagonals drawn at each corner, and with a dashed border, lighter in color. A node representing the SOA RR returned in the negative response (if any) is also included. If the response to a query has a NOERROR status but contains no answer data (NO DATA) for the type, this negative response is represented by a rectangular node with rounded corners, and with a dashed border, lighter in color. A node representing the SOA RR returned in the negative response (if any) is also included. .SS DNSKEY RRs DNSKEY RRs include public key and meta information to enable resolvers to validate signatures made by the corresponding private keys. In DNSViz, each DNSKEY RR is represented as an elliptical node in the zone to which it belongs. Each DNSKEY node is decorated based on the attributes of the corresponding DNSKEY RR. .IP "SEP bit" A gray fill indicates that the Secure Entry Point (SEP) bit is set in the flags field of the DNSKEY RR. This bit is typically used to designate a DNSKEY for usage as a key signing key (KSK), a DNSKEY that is used to sign the DNSKEY RRset of a zone, providing a secure entry point into a zone via DS RRs or a trust anchor at the resolver. .IP "revoke bit" A thick border indicates that the revoke bit is set in the flags field of the DNSKEY RR. Resolvers which implement the trust anchor rollover procedures documented in RFC 5011 recognize the revoke bit as a signal that the DNSKEY should no longer be used as a trust anchor by the resolver. For a DNSKEY to be properly revoked, it must also be self-signing (i.e., used to sign the DNSKEY RRset), which proves that the revocation was made by a party that has access to the private key. .IP "trust anchor" A double border indicates that the DNSKEY has been designated as a trust anchor. A trust anchor must be self-signing (i.e., used to sign the DNSKEY RRset). .SS DS RRs DS (delegation signer) RRs exist in the parent of a signed zone to establish a SEP into the zone. Each DS RR specifies an algorithm and key tag corresponding to a DNSKEY RR in the signed zone and includes a cryptographic hash of that DNSKEY RR. In DNSViz DS RRs with the same DNSKEY algorithm and key tag are typically displayed as a single node since they usually correspond to the same DNSKEY RR with different digest algorithms. The status of the DS RRs is reflected in the color and style of the edge. .IP valid A blue-colored arrow pointing from DS to DNSKEY indicates that the digest contained in each of the DS RRs is valid, and corresponds to an existing DNSKEY. .IP "invalid digest" A solid red line from DS to DNSKEY indicates that a DNSKEY exists matching the algorithm and key tag of the DS RR, but the digest of the DNSKEY in the DS RR does not match. .IP "indeterminate - no DNSKEY" A dashed gray line from DS to a DNSKEY with a dashed gray border indicates that no DNSKEY matching the algorithm and key tag of the DS RR exists in the child zone. Extraneous DS RRs in a parent zone do not, in and of themselves, constitute an error. For example, sometimes they are deliberately pre-published before their corresponding DNSKEYs, as part of a key rollover. However, for every DNSSEC algorithm in the DS RRset for the child zone, a matching DNSKEY must be used to sign the DNSKEY RRset in the child zone, as per RFC 4035. .IP "indeterminate - match pre-revoke" A special case of a DS with no matching DNSKEY is when the DS matched a DNSKEY prior to its revocation, but the ramifications are the same as if it didn't match any DNSKEY. The line is simply drawn to help identify the cause of the otherwise non-existent DNSKEY. .IP "indeterminate - unknown algorithm" When the algorithm and key tag of a DS RR match those of a DNSKEY RR, but the digest algorithm is unknown or unsupported, then the line between DS and DNSKEY is yellow. .IP "invalid" When the use of a DS corresponding to a DNSKEY is invalid, independent of the correctness of its digest, the line between DS and DNSKEY is red and dashed. An example scenario is when the DNSKEY has the revoke bit set, which is disallowed by RFC 5011. .SS NSEC/NSEC3 RRs NSEC and NSEC3 RRs are used within DNSSEC to prove the legitimacy of a negative response (i.e., NXDOMAIN or NO DATA) using authenticated denial of existence or hashed authenticated denial of existence, respectively. In DNSViz the NSEC or NSEC3 RR(s) returned by a server to authenticate a negative response are represented by a rectangular node with several compartments. The bottom compartment is labeled with either NSEC or NSEC3, depending on the type of record. Each compartment on the top row represents an NSEC or NSEC3 record in the set--there will be between one and three. An edge extends from the NSEC or NSEC3 node to the corresponding negative response. Its status is reflected in the color and style of the edge. .IP valid If the edge is solid blue, then the NSEC or NSEC3 RRs returned prove the validity of the negative response. .IP invalid A solid red edge from the NSEC or NSEC3 node to the negative response indicates that the NSEC or NSEC3 RRs included in in the response do not prove the validity of the negative response. .PP A special case of NSEC/NSEC3 RRs is that in which they serve to prove the non-existence of Delegation Signer (DS) records. The proof of absence of DS records constitutes an insecure delegation, in which any trust at the parent zone does not propagate to the child zone. The NSEC/NSEC3 proof involving DS records is graphically represented with an edge from the NSEC/NSEC3 node to the box representing the child zone. The opt-out flag is set in NSEC3 RRs to indicate that their presence is only sufficient to prove insecure delegations (i.e., lack of DS records) and nothing more. Thus, a name error (NXDOMAIN) response, for example, cannot be securely proven when the NSEC3 uses opt-out. NSEC3 records with the opt-out flag set are colored with a gray background. .SS RRSIGs Each RRSIG RR contains the cryptographic signature made by a DNSKEY over an RRset. Using the DNSKEY with the same algorithm and key tag as the RRSIG, the RRset which was signed, and the RRSIG itself, a resolver may determine the correctness of the signature and authenticate the RRset. In DNSViz RRSIGs are represented as directed edges from the DNSKEY that made the signature to the RRset that was signed. The status of the edge is reflected in its color and style. .IP valid A solid blue edge indicates that an RRSIG is valid. .IP "invalid signature" A solid red edge indicates an RRSIG in which the cryptographic signature is invalid. .IP "expired or premature" A solid purple edge indicates that an RRSIG is invalid because it is outside its validity period, as defined by the inception and expiration date fields in the RRSIG RR. .IP "indeterminate - no DNSKEY" A dashed gray line stemming from a DNSKEY with a dashed gray border indicates that no DNSKEY matching the algorithm and key tag of the RRSIG RR could be found in the DNSKEY RRset (or the DNSKEY RRset could not be retrieved). Extraneous RRSIG RRs do not, in and of themselves, constitute an error. For example, sometimes they are deliberately pre-published before their corresponding DNSKEYs, as part of an algorithm rollover. However, every RRset must be covered by RRSIGs for every algorithm in the DNSKEY RRset, as per RFC 4035 and RFC 6840. .IP "indeterminate - match pre-revoke" A special case of an RRSIG with no matching DNSKEY is when the RRSIG matched a DNSKEY prior to its revocation, but the ramifications are the same as if it didn't match any DNSKEY. The line is simply drawn to help identify the cause of the otherwise non-existent DNSKEY. .IP "indeterminate - unknown algorithm" When the algorithm and key tag of an RRSIG RR match those of a DNSKEY RR, but the cryptographic algorithm associated with the RRSIG is unknown or unsupported, then the line stemming from the DNSKEY is yellow. .IP invalid When an RRSIG is invalid, independent of the correctness of its temporal validity period and its cryptographic signature, the line stemming from the DNSKEY is red and dashed. Example scenarios might be when the DNSKEY has the revoke bit set or when the signer field in the RRSIG RR does not match the name of the zone apex. Such scenarios are disallowed by RFCs 5011 and 4035, respectively. .PP Just like other RRsets, a DNSKEY RRset is signed as an RRset, which comprises all the collective DNSKEY RRs at the zone apex. Because each DNSKEY RR is represented as a node in DNSViz, a single RRSIG covering the DNSKEY RRset is represented by edges drawn from the node representing the signing DNSKEY to the nodes representing every DNSKEY RR in the set. In some DNSSEC implementations, multiple DNSKEYs sign the DNSKEY RRset, even though only a subset are designated to provide secure entry into the zone (e.g., via matching DS records in the parent zone). While there is nothing inherently wrong with this configuration, graphically representing such scenarios can be visually complex because of the cycles and redundancy created in the graph. In order to represent trust propagation in a simplified fashion, eliminating graphic redundancies, DNSViz exhibits the following behavior. For every DNSKEY signing the DNSKEY RRset, a self-directed edge is added to the node, indicating that the DNSKEY is self-signing. Additionally, if the DNSKEY is designated as a (SEP) into the zone, then edges are drawn from its node to nodes representing all other DNSKEY RRs in the DNSKEY RRset. If there is no true SEP, (e.g., no DS RRs in the parent zone), then SEP(s) are inferred based on their signing role (e.g., signing DNSKEY RRset or other RRsets) and properties (e.g., SEP bit). Like the DNSKEY RRset, a single DS RRset might be represented as several different nodes. As such a single RRSIG covering the DS RRset is represented by edges drawn from the node representing the signing DNSKEY to the nodes representing every DS RR in the set. Because an NSEC or NSEC3 node represents one or more RRsets and at least one RRSIG per RRset is anticipated, multiple RRSIG edges will be drawn from DNSKEY to NSEC or NSEC3 nodes, each pointing to the respective compartment corresponding to the NSEC or NSEC3 record. .SS CDNSKEY/CDS RRsets CDNSKEY and CDS records are used by the child zone to signal to its parent the DS record(s) that it desires to be published, per RFC 7344. The CDNSKEY and CDS RRsets are represented as any other RRset (see \fB"RRsets"\fR); that is, there is a single node per RRset, as opposed to a single node per CDNSKEY or CDS record. CDNSKEY and CDS RRsets are mapped to the DNSKEYs they correspond to with a gray edge from the CDNSKEY or CDS RRset to the DNSKEY node(s). If the DNSKEY referenced does not exist, the DNSKEY node is represented with a dashed gray border (see \fB"DNSKEY RRs"\fR and \fB"DS RRs"\fR). .SS Wildcards When the RRSIG covering an RRset has a labels field with value greater than the number of labels in the name, it is indicative that the resulting RRset was formed by a wildcard expansion. The server must additionally include an NSEC or NSEC3 proof that the name to which the wildcard is expanded does not exist. DNSViz represents wildcards by displaying both the wildcard RRset and the NSEC or NSEC3 proof. .SS Node Status Beginning at the DNSKEYs designated as trust anchors, DNSViz traverses the nodes and edges in the graph to classify each node as having one of three DNSSEC statuses, depending on the status of the RRset which it represents: secure, bogus, or insecure. In DNSViz, node status is indicated by the color of the nodes (Note that there isn't always a one-to-one mapping between node and RRset, but the node status will be consistent among all nodes comprising an RRset. An example is the DNSKEY nodes for a zone, which all have the same status even though the DNSKEY RRset is split among different nodes). The status of a node is reflected in the color of its outline. .IP secure Nodes with blue outline indicate that they are secure, that there is an unbroken chain of trust from anchor to RRset. .IP bogus Nodes with red outline indicate that they are bogus, that the chain of trust from an anchor has been broken. Because the NSEC and NSEC3 nodes often represent multiple NSEC or NSEC3 RRs, it is possible that a proper subset of the RRs are secure, while others in the set are not (e.g., missing or expired RRSIG). In this case, the outline of the compartments representing secure NSEC or NSEC3 RRs will be colored blue, while the others will be red. Because the status of the collective set of NSEC and NSEC3 RRs is dependent on the status of all the individual NSEC and NSEC3 RRs, the greater node is only colored blue if all the compartments are colored blue. .IP insecure Nodes with black outline indicate that they are insecure, that no chain of trust exists; if any anchors exist then an insecure delegation is demonstrated to prove that no chain should exist from the anchors. This is equivalent to DNS without DNSSEC. .SS Warnings and Errors If one or more warnings are detected with the data represented by a node in the graph, then a warning icon is displayed in the node. Similarly, the warning icon is displayed alongside edges whose represented data has warnings. If one or more errors (more severe than warnings) are detected with the data represented by a node in the graph, then an error icon is displayed in the node. Similarly, the error icon is displayed alongside edges whose represented data has errors. A warning icon with an italicized label denotes a warning for a response that isn't represented elsewhere in the graph, such as a referral with the authoritative answer flag set. An error icon with an italicized label denotes a response error, e.g., due to timeout, malformed response, or invalid RCODE. .SH EXIT CODES The exit codes are: .IP 0 Program terminated normally. .IP 1 Incorrect usage. .IP 2 Required package dependencies were not found. .IP 3 There was an error processing the input or saving the output. .IP 4 Program execution was interrupted, or an unknown error occurred. .SH SEE ALSO .BR dnsviz(1), .BR dnsviz-probe(1), .BR dnsviz-grok(1), .BR dnsviz-print(1), .BR dnsviz-query(1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/man/dnsviz-grok.10000644000175000017500000001627415001472663016036 0ustar00caseycasey.\" .\" This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, .\" analysis, and visualization. .\" Created by Casey Deccio (casey@deccio.net) .\" .\" Copyright 2015-2016 VeriSign, Inc. .\" .\" Copyright 2016-2024 Casey Deccio .\" .\" DNSViz is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation; either version 2 of the License, or .\" (at your option) any later version. .\" .\" DNSViz is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License along .\" with DNSViz. If not, see . .\" .TH dnsviz-grok 1 "26 September 2024" "0.11.0" .SH NAME dnsviz-grok \- assess diagnostic DNS queries .SH SYNOPSIS .B dnsviz \fBgrok\fR [ \fIoptions\fR ] [ \fIdomain_name...\fR ] .SH DESCRIPTION Process the results of diagnostic DNS queries previously performed, e.g., using \fBdnsviz-probe(1)\fR, to assess the health of the associated DNS deployments for one or more domain names specified. The results of this processing are serialized into JSON format for further programmatic diagnostics or alerts. The source of the diagnostic query input is either a file specified with \fB-r\fR or standard input. Domain names to be processed may be passed either as command-line arguments, in a file (using the \fB-f\fR option), or simply implied using the diagnostic query input. The latter is the preferred methodology (and the simplest) and is useful, except in cases where the input contains diagnostic queries for multiple domain names, only a subset of which are to be processed. If \fB-f\fR is not used and no domain names are supplied on the command line, then the domain names to be processed are extracted from the diagnostic query input. If the \fB-f\fR option is used, then names may not be specified on the command line. The domain names passed as input are fully-qualified domain names, such as example.com, www.example.com, _443._tcp.example.com, 1.2.0.192.in-addr.arpa, or 8.b.d.0.1.0.0.2.ip6.arpa. Because it is implied that specified domain names are fully qualified, no trailing dot is necessary. .SH OPTIONS .TP .B -f, --names-file \fIfilename\fR Read names from a file (one name per line), instead of from command line. If this option is used, then names may not be specified on the command line. .TP .B -r, --input-file \fIfilename\fR Read diagnostic query input from the specified file, instead of from standard input. .TP .B -t, --trusted-keys-file \fIfilename\fR Use trusted keys from the specified file when processing diagnostic queries. This overrides the default behavior of using the installed keys for the root zone. The format of this file is master zone file format and should contain DNSKEY records that correspond to one more trusted keys for one or more DNS zones. This option may be used multiple times on the command line. .TP .B -a, --algorithms \fIalg\fR[,\fIalg...\fI] Support only the DNSSEC algorithms specified. If this option is used, any algorithms not specified will appear as "unsupported." The status of any RRSIG records corresponding to unsupported algorithms will be unknown. Additionally, when a zone has only DS records with unsupported algorithms, the zone is treated as "insecure", assuming the DS records are properly authenticated. .TP .B -d, --digest-algorithms \fIdigest_alg\fR[,\fIdigest_alg...\fI] Support only the DNSSEC digest algorithms specified. If this option is used, any digest algorithms not specified will appear as "unsupported." The status of any DS records corresponding to unsupported digest algorithms will be unknown. Additionally, when a zone has only DS records with unsupported digest algorithms, the zone is treated as "insecure", assuming the DS records are properly authenticated. .TP .B --ignore-rfc8624 Ignore errors associated with RFC 8624, DNSSEC algorithm implementation requirements. RFC 8624 designates some DNSSEC signing algorithms and some DS digest algorithms as prohibited ("MUST NOT") or not recommended for validation and/or signing. If this option is used, then no warnings will be issued, and the code will still assess their cryptographic status, rather than ignoring them. .TP .B --ignore-rfc9276 Ignore errors associated with RFC 9276, NSEC3 parameter settings. RFC 9276 specifies that if NSEC3 is used, the iterations count must be 0 and the salt length must be 0. If this option is used, then no warnings will be issued for NSEC3 records that violate this specification. .TP .B -C, --enforce-cookies Enforce DNS cookies strictly. Require a server to return a "BADCOOKIE" response when a query contains a COOKIE option with no server cookie or with an invalid server cookie. .TP .B -P, --allow-private Allow private IP addresses for authoritative DNS servers. By default, if the IP address corresponding to an authoritative server is in IP address space designated as "private", it is flagged as an error. However, there are some cases where this is allowed. For example, if the diagnostic queries are issued to servers in an experimental environment, this might be permissible. .TP .B --trust-cdnskey-cds Trust all CDNSKEY and CDS records, even if they are not "signed with a key that is represented in both the current DNSKEY and DS RRsets" (RFC 7344). This is allowed if "the Parent uses the CDS or CDNSKEY RRset for initial enrollment; in that case, the Parent validates the CDS/CDNSKEY through some other means" (RFC 7344). Because there is no way for DNSViz to discover the out-of-band means with which the parent might have validated the CDNSKEY and/or CDS records, this trust is signaled with the use of the \fB--trust-cdnskey-cds\fR command-line option. .TP .B --multi-signer Don't issue errors for missing KSKs with DS RRs. Typically an error is issued if a given DNSKEY is not found in the DNSKEY RRset returned by one or more servers. If \fB--multi-signer\fR is specified, then no error is issued, in the case that 1) the DNSKEY is not signing any non-DNSKEY RRsets (i.e., is a zone-signing key or ZSK) and 2) the DNSKEY corresponds to a DS record in the parent. This corresponds to the Model 2 use case in RFC 8901. .TP .B -o, --output-file \fIfilename\fR Write the output to the specified file instead of to standard output, which is the default. .TP .B -c, --minimize-output Format JSON output minimally instead of "pretty" (i.e., with indentation and newlines). .TP .B -l, --log-level \fIlevel\fR Display only information at the specified log priority or higher. Valid values (in increasing order of priority) are: "error", "warning", "info", and "debug". The default is "debug". .TP .B -h, --help Display the usage and exit. .SH EXIT CODES The exit codes are: .IP 0 Program terminated normally. .IP 1 Incorrect usage. .IP 2 Required package dependencies were not found. .IP 3 There was an error processing the input or saving the output. .IP 4 Program execution was interrupted, or an unknown error occurred. .SH SEE ALSO .BR dnsviz(1), .BR dnsviz-probe(1), .BR dnsviz-graph(1), .BR dnsviz-print(1), .BR dnsviz-query(1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/man/dnsviz-print.10000644000175000017500000003162315001472663016223 0ustar00caseycasey.\" .\" This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, .\" analysis, and visualization. .\" Created by Casey Deccio (casey@deccio.net) .\" .\" Copyright 2015-2016 VeriSign, Inc. .\" .\" Copyright 2016-2024 Casey Deccio .\" .\" DNSViz is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation; either version 2 of the License, or .\" (at your option) any later version. .\" .\" DNSViz is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License along .\" with DNSViz. If not, see . .\" .TH dnsviz-print 1 "21 April 2025" "0.11.1" .SH NAME dnsviz-print \- print the assessment of diagnostic DNS queries .SH SYNOPSIS .B dnsviz \fBprint\fR [ \fIoptions\fR ] [ \fIdomain_name...\fR ] .SH DESCRIPTION Process the results of diagnostic DNS queries previously performed, e.g., using \fBdnsviz-probe(1)\fR, to assess the health of the associated DNS deployments for one or more domain names specified. The results of this processing are presented in textual output. The source of the diagnostic query input is either a file specified with \fB-r\fR or standard input. Domain names to be processed may be passed either as command-line arguments, in a file (using the \fB-f\fR option), or simply implied using the diagnostic query input. The latter is the preferred methodology (and the simplest) and is useful, except in cases where the input contains diagnostic queries for multiple domain names, only a subset of which are to be processed. If \fB-f\fR is not used and no domain names are supplied on the command line, then the domain names to be processed are extracted from the diagnostic query input. If the \fB-f\fR option is used, then names may not be specified on the command line. The domain names passed as input are fully-qualified domain names, such as example.com, www.example.com, _443._tcp.example.com, 1.2.0.192.in-addr.arpa, or 8.b.d.0.1.0.0.2.ip6.arpa. Because it is implied that specified domain names are fully qualified, no trailing dot is necessary. The output is appropriate for terminal or text file output, using colors (where supported by the terminal) and symbols to designate status and errors in a loosely-defined textual format. .SH OPTIONS .TP .B -f, --names-file \fIfilename\fR Read names from a file (one name per line), instead of from command line. If this option is used, then names may not be specified on the command line. .TP .B -r, --input-file \fIfilename\fR Read diagnostic query input from the specified file, instead of from standard input. .TP .B -t, --trusted-keys-file \fIfilename\fR Use trusted keys from the specified file when processing diagnostic queries. This overrides the default behavior of using the installed keys for the root zone. The format of this file is master zone file format and should contain DNSKEY records that correspond to one more trusted keys for one or more DNS zones. This option may be used multiple times on the command line. .TP .B -S, --show-colors Use terminal colors even if output is not a TTY or colors are not supported. Normally \fBdnsviz-print(1)\fR detects terminal capabilities and uses colors only where supported. This option overrides this detection and always includes terminal colors in the output. .TP .B -a, --algorithms \fIalg\fR[,\fIalg...\fI] Support only the DNSSEC algorithms specified. If this option is used, any algorithms not specified will appear as "unsupported." The status of any RRSIG records corresponding to unsupported algorithms will be unknown. Additionally, when a zone has only DS records with unsupported algorithms, the zone is treated as "insecure", assuming the DS records are properly authenticated. .TP .B -d, --digest-algorithms \fIdigest_alg\fR[,\fIdigest_alg...\fI] Support only the DNSSEC digest algorithms specified. If this option is used, any digest algorithms not specified will appear as "unsupported." The status of any DS records corresponding to unsupported digest algorithms will be unknown. Additionally, when a zone has only DS records with unsupported digest algorithms, the zone is treated as "insecure", assuming the DS records are properly authenticated. .TP .B --ignore-rfc8624 Ignore errors associated with RFC 8624, DNSSEC algorithm implementation requirements. RFC 8624 designates some DNSSEC signing algorithms and some DS digest algorithms as prohibited ("MUST NOT") or not recommended for validation and/or signing. If this option is used, then no warnings will be issued, and the code will still assess their cryptographic status, rather than ignoring them. .TP .B --ignore-rfc9276 Ignore errors associated with RFC 9276, NSEC3 parameter settings. RFC 9276 specifies that if NSEC3 is used, the iterations count must be 0 and the salt length must be 0. If this option is used, then no warnings will be issued for NSEC3 records that violate this specification. .TP .B -C, --enforce-cookies Enforce DNS cookies strictly. Require a server to return a "BADCOOKIE" response when a query contains a COOKIE option with no server cookie or with an invalid server cookie. .TP .B -P, --allow-private Allow private IP addresses for authoritative DNS servers. By default, if the IP address corresponding to an authoritative server is in IP address space designated as "private", it is flagged as an error. However, there are some cases where this is allowed. For example, if the diagnostic queries are issued to servers in an experimental environment, this might be permissible. .TP .B --trust-cdnskey-cds Trust all CDNSKEY and CDS records, even if they are not "signed with a key that is represented in both the current DNSKEY and DS RRsets" (RFC 7344). This is allowed if "the Parent uses the CDS or CDNSKEY RRset for initial enrollment; in that case, the Parent validates the CDS/CDNSKEY through some other means" (RFC 7344). Because there is no way for DNSViz to discover the out-of-band means with which the parent might have validated the CDNSKEY and/or CDS records, this trust is signaled with the use of the \fB--trust-cdnskey-cds\fR command-line option. .TP .B --multi-signer Don't issue errors for missing KSKs with DS RRs. Typically an error is issued if a given DNSKEY is not found in the DNSKEY RRset returned by one or more servers. If \fB--multi-signer\fR is specified, then no error is issued, in the case that 1) the DNSKEY is not signing any non-DNSKEY RRsets (i.e., is a zone-signing key or ZSK) and 2) the DNSKEY corresponds to a DS record in the parent. This corresponds to the Model 2 use case in RFC 8901. .TP .B -R, --rr-types \fItype\fR[,\fItype...\fR] Process queries of only the specified type(s) (e.g., A, AAAA). The default is to process all types queried as part of the diagnostic input. .TP .B -O, --derive-filename Save the output to a file, whose name is derived from the domain name. If this option is used when the diagnostic queries of multiple domain names are being processed, a file will be created for each domain name processed. .TP .B -o, --output-file \fIfilename\fR Write the output to the specified file instead of to standard output, which is the default. If this option is used when the diagnostic queries of multiple domain name are being processed, a single file (the one specified) will be created, which will contain the collective output for all domain names processed. .TP .B -h Display the usage and exit. .SH OUTPUT The following is an example of the output: .PD 0 \fB.\fP [.] .P [.] DNSKEY: 8/1518/256 [.], 8/19036/257 [.] .P [.] RRSIG: ./8/19036 (2015-08-20 - 2015-09-03) [.] .P \fBcom\fP [.] [.] .P [.] DS: 8/30909/2 [.] .P [.] RRSIG: ./8/1518 (2015-08-26 - 2015-09-05) [.] .P [.] DNSKEY: 8/30909/257 [.], 8/35864/256 [.] .P [.] RRSIG: com/8/30909 (2015-08-24 - 2015-08-31) [.] .P \fBexample.com\fP [.] [.] .P [.] DS: 8/31406/1 [.], 8/31406/2 [.], 8/31589/1 [-], 8/31589/2 [-], 8/43547/1 [-], 8/43547/2 [-] .P [.] RRSIG: com/8/35864 (2015-08-24 - 2015-08-31) [.] .P [.] DNSKEY: 8/54108/256 [.], 8/31406/257 [.], 8/63870/256 [.] .P [.] RRSIG: example.com/8/31406 (2015-08-24 - 2015-09-14) [.] .P \fBwww.example.com\fP .P [.] A: 192.0.2.1 .P [.] RRSIG: example.com/8/31406 (2015-08-24 - 2015-09-14) [.] .P \fBnon-existent.example.com\fP .P [.] A: NXDOMAIN .P [.] SOA: sns.dns.icann.org. noc.dns.icann.org. 2015082401 7200 3600 1209600 3600 .P [.] RRSIG: example.com/8/54108 (2015-08-24 - 2015-09-14) [.] .P [.] PROOF: [.] .P [.] NSEC: example.com. www.example.com. A NS SOA TXT AAAA RRSIG NSEC DNSKEY .P [.] RRSIG: example.com/8/54108 (2015-08-21 - 2015-09-11) [.] .PD .SS Domain Names The output above is divided into several sections, each corresponding to the domain name that starts the section (e.g., example.com). Following the headers of names that correspond to zones are two sets of characters, each within brackets. The characters within the first set of brackets represent the status of the zone. The characters within the second set of brackets represent the status of the delegation (note that this second set of bracketed characters will not be present for the root zone). The first character within each set of brackets is one of the following: .IP . secure zone or delegation .IP - insecure zone or delegation .IP ! bogus zone or delegation .IP ? lame or incomplete delegation .P If there is a second character within the brackets, it represents the following: .IP ! errors are present .IP ? warnings are present .P For example, an insecure delegation with warnings is represented as: [-?] And a secure delegation with no errors is shown as: [.] .SS Query Responses The lines in each section, below the header, represent responses to queries for that name from one or more servers. The bracketed characters at the far left of each line represent the status of the response or response component on the rest of the line. The first character in the brackets represents the authentication status: .IP . secure .IP - insecure .IP ! bogus .P If there is a second character within the brackets, it represents the following: .IP ! errors are present .IP ? warnings are present .P For example, an insecure status with warnings is represented as: [-?] And a secure status with no errors is shown as: [.] The status of the response is followed by the type corresponding to the query or response. For example, "A" means that data following is in response to a query of type A (IPv4 address) for the name of the corresponding section. When the response is positive (i.e., there is data in the answer section), the corresponding data is shown on the right (with some exceptions) as a comma-separated set of records within the RRset. DNSKEY, DS, and RRSIG records show an abbreviated format of their records, as follows: .IP DNSKEY: // Example: 8/35864/256 .IP DS: // Example: 8/30909/2 .IP RRSIG: // ( - ) Example: com/8/35864 (2015-08-24 - 2015-08-31) .P Following each record within a DNSKEY, DS, or RRSIG response is a bracketed set of characters, the first of which represents validity: .IP . valid .IP - indeterminate .IP ! invalid/expired/premature .IP ? indeterminate due to unknown algorithm .P If there is a second character within the brackets, it represents the following: .IP ! errors are present .IP ? warnings are present .P For example, a DNSKEY with warnings is shown as: [.?] A DS corresponding to a non-existent DNSKEY is represented as: [-]. RRSIGs are shown below the RRset they cover, indented from the RRset. .SS Negative Responses If a response is negative, then the appropriate "NODATA" or "NXDOMAIN" text is shown adjacent the type queried, e.g., "A: NXDOMAIN". If there was an SOA record and/or NSEC(3) proof, then they are listed below, indented from the query type. The NSEC or NSEC3 records (and their RRSIGs) comprising a proof are grouped by indentation under the title "PROOF" which is itself indented under the negative response line. Following "PROOF" is a bracketed set of characters with the same meaning as those used for DS, DNSKEY, and RRSIG. .SS Errors and Warnings Textual errors and warnings are listed below the response components with which the issues are associated. Each error or warning is listed on its own line and prefaced with "E:" or "W:", signifying whether it is an error or warning, respectively. .SH EXIT CODES The exit codes are: .IP 0 Program terminated normally. .IP 1 Incorrect usage. .IP 2 Required package dependencies were not found. .IP 3 There was an error processing the input or saving the output. .IP 4 Program execution was interrupted, or an unknown error occurred. .SH SEE ALSO .BR dnsviz(1), .BR dnsviz-probe(1), .BR dnsviz-grok(1), .BR dnsviz-graph(1), .BR dnsviz-query(1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/man/dnsviz-probe.10000644000175000017500000002677115001472663016206 0ustar00caseycasey.\" .\" This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, .\" analysis, and visualization. .\" Created by Casey Deccio (casey@deccio.net) .\" .\" Copyright 2015-2016 VeriSign, Inc. .\" .\" Copyright 2016-2024 Casey Deccio .\" .\" DNSViz is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation; either version 2 of the License, or .\" (at your option) any later version. .\" .\" DNSViz is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License along .\" with DNSViz. If not, see . .\" .TH dnsviz-probe 1 "21 April 2025" "0.11.1" .SH NAME dnsviz-probe \- issue diagnostic DNS queries .SH SYNOPSIS .B dnsviz \fBprobe\fR [ \fIoptions\fR ] [ \fIdomain_name...\fR ] .SH DESCRIPTION Perform a series of diagnostic queries of specified names to either recursive (default) or authoritative DNS servers, the results of which are serialized into JSON format. Its output is used to assess the health of DNS deployments, using, e.g., \fBdnsviz-grok(1)\fR and \fBdnsviz-graph(1)\fR. Domain names to be processed may be passed either as command-line arguments or in a file (using the \fB-f\fR option). When the \fB-r\fR option is used, then the domain names can simply be implied using the diagnostic query input. Domain names are extracted from the diagnostic query input in conjunction with \fB-r\fR only when \fB-f\fR is not used and no domain names are supplied on the command line. If the \fB-f\fR option is used, then names may not be specified on the command line. The domain names passed as input are fully-qualified domain names, such as example.com, www.example.com, _443._tcp.example.com, 1.2.0.192.in-addr.arpa, or 8.b.d.0.1.0.0.2.ip6.arpa. Because it is implied that specified domain names are fully qualified, no trailing dot is necessary. .SH OPTIONS .TP .B -f, --names-file \fIfilename\fR Read names from a file (one name per line), instead of from command line. If this option is used, then names may not be specified on the command line. .TP .B -d, --debug \fIlevel\fR Set debug level to a value from 0 to 3, with increasing verbosity. The default is "2" (informational-level output). .TP .B -r, --input-file \fIfilename\fR Read diagnostic query input from the specified file, instead of querying servers. Specify "-" to read from standard input. .TP .B -t, --threads \fIthreads\fR Issue diagnostic queries for different names in parallel using the specified number of threads. The default is to execute diagnostic queries of names serially. .TP .B -4, --ipv4 Use IPv4 only. .TP .B -6, --ipv6 Use IPv6 only. .TP .B -b, --source-ip \fIaddress\fR Use the specified source IPv4 or IPv6 address for queries, rather than detecting it. This option can be used more than once to supply both an IPv4 and an IPv6 address. The use of this option is sometimes necessary when using a dual-homed machine, and it is desirable to use the non-default interface for queries. .TP .B -u, --looking-glass-url \fIurl\fR Issue queries through the DNS looking glass at the specified URL (HTTP(S) or SSH). The queries will appear to come from the looking glass rather than from the local machine. .RS .RS Examples: .RS .P .PD 0 Issue DNS queries from www.example.com using the cgi script dnsviz-lg.cgi: .P http://www.example.com/cgi-bin/dnsviz-lg.cgi .PD .P .PD 0 Same, but use HTTP Basic authentication: .P http://username:password@www.example.com/cgi-bin/dnsviz-lg.cgi .PD .P .PD 0 Issue DNS queries from host.example.com on which DNSViz is also installed. .P ssh://username@host.example.com .PD .RE .P .RE Note that a looking glass that uses https is only supported when using python version 2.7.9 or greater. .RE .TP .B -k, --insecure Do not verify the server-side TLS certificate for a HTTPS-based DNS looking glass that was specified using \fB-u\fR. .TP .B -a, --ancestor \fIancestor\fR Issue diagnostic queries of each domain name through the specified ancestor. The default for recursive mode is "." (i.e., issue queries all the way to the root). The default for authoritative mode (i.e., with \fB-A\fR) is the domain name itself. .TP .B -R, --rr-types \fItype\fR[,\fItype...\fR] Issue diagnostic queries for only the specified type(s) (e.g., A, AAAA). The default is to pick query types based on the nature of the name (e.g., the number of labels, whether it is a subdomain of .arpa, labels indicating association to TLSA or SRV records, etc.) and whether there are NS records detected (i.e., it is a zone). .TP .B -s, --recursive-servers \fIserver\fR[,\fIserver...\fR] Query the specified recursive resolver(s), rather than using those specified in \fI/etc/resolv.conf\fR. Each server specified may either be an address (IPv4 or IPv6), a domain name (which will be resolved to an address using the standard resolution process), or both, using the syntax \fIname\fR\fB=\fR\fIaddress\fR. Note that when both a name and an address are specified (\fIname\fR\fB=\fR\fIaddress\fR), the name is only used for identification purposes, and it doesn't matter whether the name resolves to the corresponding address (or at all, for that matter). IPv6 addresses must be wrapped in square brackets, e.g., "[2001:db8::1]". Each server value may optionally be suffixed with a numeric port on which the server should be contacted. If not specified, the standard DNS port, 53, is used. The following are example server values: .RS .RS .P .PD 0 ns1.example.com .P ns1.example.com:5333 .P ns1.example.com=192.0.2.1 .P ns1.example.com=[2001:db8::1] .P ns1.example.com=[2001:db8::1]:5333 .P 192.0.2.1 .PD .RE This option cannot be used in conjunction with \fB-A\fR. .RE .TP .B -A, --authoritative-analysis Query authoritative servers, rather than (the default) recursive servers. .TP .B -x, --authoritative-servers \fIdomain\fR[\fB+\fR]\fB:\fR\fIserver\fR[,\fIserver...\fR] Treat the specified servers as authoritative for a domain, rather than learning authoritative servers by following delegations. This option dictates which servers will be queried for a domain, but the servers specified will not be used to check NS or glue record consistency with the child; for that behavior, see \fB-N\fR. The default behavior is to identify and query servers authoritative for ancestors of the specified domain, if other options so dictate. However, if the domain ends in "+", then queries aren't issued for servers authoritative for ancestor domains of the domain. For example, with the following command: .RS .RS dnsviz probe -A -x example.com:ns1.example.com example.com .RE the com servers will be queried for DS records for example.com. However, if the following is used: .RS dnsviz probe -A -x example.com+:ns1.example.com example.com .RE no queries are performed at com servers or above, including DS records for example.com. See \fB-s\fR for the syntax used for designating servers. However, unlike the \fB-s\fR option, a zone file may be specified in lieu of a server name and/or address, in which case an instance of \fBnamed(8)\fR is started, the zone is served from that instance, and queries for the domain are directed to the local instance of \fBnamed(8)\fR serving that zone. For example, if example.com.zone is a file containing the contents of the example.com zone, the following command could be used to specify that the zone file should be used: .RS dnsviz probe -A -x example.com:example.com.zone example.com .RE This option may be used multiple times on the command line. This option can only be used in conjunction with \fB-A\fR. .RE .TP .B -N, --delegation-information \fIdomain\fR\fB:\fR\fIserver\fR[,\fIserver...\fR] Use the specified delegation information for a domain, i.e., the NS and glue records for the domain, which would be served by the domain's parent. This is used for testing new delegations or testing a potential change to a delegation. This option has similar usage to that of the \fB-x\fR option. The major difference is that the server names supplied comprise the NS record set, and the addresses supplied represent glue records. Thus if there are discrepancies between the authoritative responses for the NS RRset and glue and what is supplied on the command line, an error will be reported when the output is subsequently assessed, e.g., using \fBdnsviz-grok(1)\fR. In lieu of specifying the record data itself on the command line, a file may be specified, which contains the delegation NS and glue records for the domain. .TP .B -D, --ds \fIdomain\fR\fB:\fR\fIds\fR[,\fIds...\fR] Use the specified delegation signer (DS) records for a domain. This is used in conjunction with the \fB-N\fR option for testing the introduction or change of DS records. The DS records themselves are specified using the textual representation of their record data. For example the following DS records for example.com: .RS .RS .P .PD 0 31589 8 1 3490A6806D47F17A34C29E2CE80E8A999FFBE4BE .P 31589 8 2 CDE0D742D6998AA554A92D890F8184C698CFAC8A26FA59875A990C03 E576343C .PD .RE would be specified by passing this value to \fB-D\fR: .RS .PD 0 "31589 8 1 3490A6806D47F17A34C29E2CE80E8A999FFBE4BE, .P 31589 8 2 CDE0D742D6998AA554A92D890F8184C698CFAC8A26FA59875A990C03 E576343C" .PD .RE In lieu of specifying the record data itself on the command line, a file may be specified, which contains the DS records. For example: .RS dnsviz probe -D example.com:dsset-example.com. .RE This option must be used in conjunction with the \fB-N\fR option. .RE .TP .B -n, --nsid Use the NSID EDNS option with every DNS query issued. .TP .B -e, --client-subnet \fIsubnet\fR[\fB:\fR\fIprefix_len\fR] Use the EDNS Client Subnet option with every DNS query issued, using the specified \fIsubnet\fR and \fIprefix_len\fR as values. If \fIprefix\fR is not specified, the prefix is the length of the entire address. .TP .B -c, --cookie \fIcookie\fR Send the specified DNS client cookie with every DNS query issued. The value specified is for a client cookie only and thus should be exactly 64 bits long. The value for the cookie is specified using hexadecimal representation, e.g., deadbeef1580f00d. If the \fB-c\fR option is not used, the default behavior is for a DNS client cookie to be generated randomly to be sent with queries. If an empty string is specified, then DNS cookies are disabled. .TP .B -E, --edns Issue queries to check EDNS compatibility of servers. If this option is used, each server probed will be queried with "future" EDNS settings, the respective responses can later be assessed for proper behavior. These settings include future EDNS versions (i.e., > 0), unknown options, and unknown flags. .TP .B -o, --output-file \fIfilename\fR Write the output to the specified file instead of to standard output, which is the default. .TP .B -p, --pretty-output Output "pretty" instead of minimal JSON output, i.e., using indentation and newlines. Note that this is the default when the output is a TTY. .TP .B -h Display the usage and exit. .SH EXIT CODES The exit codes are: .IP 0 Program terminated normally. .IP 1 Incorrect usage. .IP 2 The network was unavailable for diagnostic queries. .IP 3 There was an error processing the input or saving the output. .IP 4 Program execution was interrupted, or an unknown error occurred. .IP 5 No recursive resolvers found in resolv.conf nor given on command line. .SH SEE ALSO .BR dnsviz(1), .BR dnsviz-grok(1), .BR dnsviz-graph(1), .BR dnsviz-print(1), .BR dnsviz-query(1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/man/dnsviz-query.10000644000175000017500000001021715001472663016230 0ustar00caseycasey.\" .\" This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, .\" analysis, and visualization. .\" Created by Casey Deccio (casey@deccio.net) .\" .\" Copyright 2015-2016 VeriSign, Inc. .\" .\" Copyright 2016-2024 Casey Deccio .\" .\" DNSViz is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation; either version 2 of the License, or .\" (at your option) any later version. .\" .\" DNSViz is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License along .\" with DNSViz. If not, see . .\" .TH dnsviz-query 1 "21 April 2025" "0.11.1" .SH NAME dnsviz-query \- assess a DNS query .SH SYNOPSIS .B dnsviz \fBquery\fR [ \fB@\fR\fIserver\fR ] [ \fB-b\fR \fIaddress\fR ] [ \fB-c\fR \fIclass\fR ] [ \fB-q\fR \fIname\fR ] [ \fB-t\fR \fItype\fR ] [ \fB-x\fR \fIaddr\fR ] [ \fB-4\fR ] [ \fB-6\fR ] [ \fIname\fR ] [ \fItype\fR ] [ \fIclass\fR ] [ \fIqueryopt...\fR ] .HP 4 \fBdnsviz query\fR [ \fB-h\fR ] .HP 4 \fBdnsviz query\fR [ \fIglobal\-queryopt...\fR ] [ \fIquery...\fR ] .SH DESCRIPTION Issue queries for the names and types specified on the command line to DNS servers, process the output, and present it to the user. \fBdnsviz-query\fR adopts a subset of the command-line options used by ISC's \fBdig(1)\fR but uses \fBdnsviz-probe(1)\fR and \fBdnsviz-print(1)\fR to issue queries and assess the results. It is meant to be a shortcut for quickly assessing DNS queries and diagnosing problems by invoking \fBdnsviz-probe(1)\fR and \fBdnsviz-print(1)\fR with their most commonly used options and with a usage familiar similar to (but not equal to) \fBdig(1)\fR. For a more detailed analysis, revealing the nature of errors, the user should use \fBdnsviz-probe(1)\fR, \fBdnsviz-grok(1)\fR, \fBdnsviz-print(1)\fR, and \fBdnsviz-graph(1)\fR directly. By default diagnostic queries are sent to recursive servers. If no servers are specified with "@", then the first server in /etc/resolv.conf is selected. Note that unlike \fBdig(1)\fR \fBdnsviz-query(1)\fR only tries one server. If \fB+trace\fR is used, the behavior changes, and the root authoritative servers are first queried and referrals are followed downwards to the name(s) in question. To simplify usage a default root trusted key if none is specified on the command line with \fB+trusted-key\fR. .SH OPTIONS .TP .B -x \fIaddress\fR Specify and IPv4 or IPv6 address for a reverse DNS (i.e., PTR) lookup. .TP .B -b \fIaddress\fR Specify a source IPv4 or IPv6 address for queries, rather than detecting it. This option can be used more than once to supply both an IPv4 and an IPv6 address. The use of this option is sometimes necessary when using a dual-homed machine, and it is desirable to use the non-default interface for queries. .TP .B -q \fIname\fR Specify a query name. .TP .B -t \fItype\fR Specify a query type. .TP .B -c \fIclass\fR Specify a query class (currently, only IN is supported; all other classes are ignored). .TP .B -4 Use IPv4 only. If this is specified, then queries over IPv6 are not attempted, unless \fB-6\fR is also specified. .TP .B -6 Use IPv6 only. If this is specified, then queries over IPv4 are not attempted, unless \fB-4\fR is also specified. .SH QUERY OPTIONS .TP .B +[no]trace Query authoritative servers, beginning at the DNS root, and follow referrals downwards. The default is to query a recursive server instead. .TP .B +trusted-key\fR=\fIfilename\fI Specify a file that contains trusted keys for processing diagnostic queries. The format of this file is master zone file format and should contain DNSKEY records that correspond to one more trusted keys for one or more DNS zones. .SH EXIT CODES The exit codes are: .IP 0 Program terminated normally. .IP 1 Program execution was interrupted, or an unknown error occurred. .SH SEE ALSO .BR dnsviz(1), .BR dnsviz-probe(1), .BR dnsviz-grok(1), .BR dnsviz-graph(1), .BR dnsviz-print(1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/man/dnsviz.10000644000175000017500000000506515001472663015072 0ustar00caseycasey.\" .\" This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, .\" analysis, and visualization. .\" Created by Casey Deccio (casey@deccio.net) .\" .\" Copyright 2015-2016 VeriSign, Inc. .\" .\" Copyright 2016-2024 Casey Deccio .\" .\" DNSViz is free software; you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation; either version 2 of the License, or .\" (at your option) any later version. .\" .\" DNSViz is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License along .\" with DNSViz. If not, see . .\" .TH dnsviz 1 "21 April 2025" "0.11.1" .SH NAME dnsviz \- issue and assess diagnostic DNS queries .SH SYNOPSIS .P .B dnsviz [ \fIoptions \fR ] \fIcommand\fR [ \fIargs\fR ] .TP .B dnsviz [ \fIoptions \fR ] \fBhelp\fR [ \fIcommand\fR ] .SH DESCRIPTION .P .B dnsviz is a tool for assessing the health of DNS deployments by issuing diagnostic queries, assessing the responses, and outputting the results in one of several formats. The assessment may be directed towards recursive or authoritative DNS servers, and the output may be textual, graphical, or serialized for programmatic review. .P .B dnsviz is invoked by specifying one of its commands as the first argument on the command line, followed by arguments specific to that command. If the first argument is "help", then the usage of the command specified by the second argument is displayed. If no second argument is provided, then a general usage message is given. .SH OPTIONS .TP .B -v Show version information and exit. .TP .B -p \fIpath\fR Add a path to the python path. .SH COMMANDS .TP .B probe Issue diagnostic DNS queries. See \fBdnsviz-probe(1)\fR. .TP .B grok Assess diagnostic DNS queries See \fBdnsviz-grok(1)\fR. .TP .B graph Graph the assessment of diagnostic DNS queries See \fBdnsviz-graph(1)\fR. .TP .B print Process diagnostic DNS queries to textual output See \fBdnsviz-print(1)\fR. .TP .B query Assess a DNS query See \fBdnsviz-query(1)\fR. .TP .B "\fBhelp\fR [ \fIcommand\fR ]" Show usage generally, or for a specific command. .SH EXIT CODES The exit codes are: .TP .B 0 Program terminated normally. .TP .B 1 Incorrect usage. .SH "SEE ALSO" .BR dnsviz-probe(1), .BR dnsviz-grok(1), .BR dnsviz-graph(1), .BR dnsviz-print(1), .BR dnsviz-query(1) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/doc/src/0000755000175000017500000000000015001473074013476 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/alias.dot0000644000175000017500000000231115001472663015277 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-10|www.example.com|CNAME" [color="#000000", fillcolor="#ffffff", label=<www.example.com/CNAME>, shape=rectangle, style="rounded,filled"]; "RRset-11|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<foobar.example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|www.example.com|CNAME" [style=invis]; "cluster_example.com_bottom" -> "RRset-11|foobar.example.com|A" [style=invis]; "RRset-11|foobar.example.com|A" -> "RRset-10|www.example.com|CNAME" [dir=back, style=solid, color="#000000"]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/cdnskey-cds-nodnskey.dot0000644000175000017500000000313415001472663020251 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#d0d0d0", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style="filled,dashed"]; "RRset-10|example.com|CDNSKEY" [color="#000000", fillcolor="#ffffff", label=<example.com/CDNSKEY>, shape=rectangle, style="rounded,filled"]; "RRset-11|example.com|CDS" [color="#000000", fillcolor="#ffffff", label=<example.com/CDS>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|CDNSKEY" [style=invis]; "cluster_example.com_bottom" -> "RRset-11|example.com|CDS" [style=invis]; "RRset-10|example.com|CDNSKEY" -> "DNSKEY-0|example.com|8|12345" [ color="lightgray", style="solid"]; "RRset-11|example.com|CDS" -> "DNSKEY-0|example.com|8|12345" [ color="lightgray", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/cdnskey-cds.dot0000644000175000017500000000312515001472663016421 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "RRset-10|example.com|CDNSKEY" [color="#000000", fillcolor="#ffffff", label=<example.com/CDNSKEY>, shape=rectangle, style="rounded,filled"]; "RRset-11|example.com|CDS" [color="#000000", fillcolor="#ffffff", label=<example.com/CDS>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|CDNSKEY" [style=invis]; "cluster_example.com_bottom" -> "RRset-11|example.com|CDS" [style=invis]; "RRset-10|example.com|CDNSKEY" -> "DNSKEY-0|example.com|8|54321" [ color="lightgray", style="solid"]; "RRset-11|example.com|CDS" -> "DNSKEY-0|example.com|8|54321" [ color="lightgray", style="solid"]; "DNSKEY-0|example.com|8|54321" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/delegation-bogus.dot0000644000175000017500000000203715001472663017443 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_example.com_top" -> cluster_com_bottom [color="#be1515", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=dashed]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/delegation-incomplete.dot0000644000175000017500000000203715001472663020463 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_example.com_top" -> cluster_com_bottom [color="#f4b800", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=dashed]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/delegation-lame.dot0000644000175000017500000000203715001472663017242 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_example.com_top" -> cluster_com_bottom [color="#f4b800", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=dashed]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/delegation-secure.dot0000644000175000017500000000203615001472663017611 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_example.com_top" -> cluster_com_bottom [color="#0a879a", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/delegation.dot0000644000175000017500000000203615001472663016325 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/dname-invalid.dot0000644000175000017500000000374515001472663016732 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-12|example.com|DNAME" [color="#000000", fillcolor="#ffffff", label=<example.com/DNAME>, shape=rectangle, style="rounded,filled"]; "RRset-10|www.example.com|CNAME" [color="#000000", fillcolor="#ffffff", label=<www.example.com/CNAME>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|www.example.com|CNAME" [style=invis]; "cluster_example.com_bottom" -> "RRset-12|example.com|DNAME" [style=invis]; "RRset-10|www.example.com|CNAME" -> "RRset-12|example.com|DNAME" [style=solid, color="#be1515", dir=back]; subgraph "cluster_example.net" { graph [label=<
example.net
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.net_top" [shape=point, style=invis]; "cluster_example.net_bottom" [shape=point, style=invis]; "RRset-11|www.example.net|A" [color="#000000", fillcolor="#ffffff", label=<www.example.net/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.net_bottom" -> "RRset-11|www.example.net|A" [style=invis]; "RRset-11|www.example.net|A" -> "RRset-10|www.example.com|CNAME" [dir=back, style=solid, color="#000000", constraint=false]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/dname.dot0000644000175000017500000000374515001472663015306 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-12|example.com|DNAME" [color="#000000", fillcolor="#ffffff", label=<example.com/DNAME>, shape=rectangle, style="rounded,filled"]; "RRset-10|www.example.com|CNAME" [color="#000000", fillcolor="#ffffff", label=<www.example.com/CNAME>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|www.example.com|CNAME" [style=invis]; "cluster_example.com_bottom" -> "RRset-12|example.com|DNAME" [style=invis]; "RRset-10|www.example.com|CNAME" -> "RRset-12|example.com|DNAME" [style=solid, color="#0a879a", dir=back]; subgraph "cluster_example.net" { graph [label=<
example.net
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.net_top" [shape=point, style=invis]; "cluster_example.net_bottom" [shape=point, style=invis]; "RRset-11|www.example.net|A" [color="#000000", fillcolor="#ffffff", label=<www.example.net/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.net_bottom" -> "RRset-11|www.example.net|A" [style=invis]; "RRset-11|www.example.net|A" -> "RRset-10|www.example.com|CNAME" [dir=back, style=solid, color="#000000", constraint=false]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/dnskey-revoke.dot0000644000175000017500000000157415001472663017006 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled, penwidth=4.0]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "DNSKEY-0|example.com|8|54321" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/dnskey-sep.dot0000644000175000017500000000137015001472663016274 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/dnskey-trust-anchor.dot0000644000175000017500000000157515001472663020145 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#0a879a", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled, peripheries=2]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "DNSKEY-0|example.com|8|54321" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/dnskey.dot0000644000175000017500000000136515001472663015513 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/ds-invalid-digest.dot0000644000175000017500000000325215001472663017522 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "DNSKEY-0|example.com|8|54321"-> "DS-1_2|example.com|8|54321|1_2" [style=solid, color="#be1515", dir=back]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/ds-invalid.dot0000644000175000017500000000345615001472663016253 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled, penwidth=4.0]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "DNSKEY-0|example.com|8|54321" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|54321"-> "DS-1_2|example.com|8|54321|1_2" [style=dashed, color="#be1515", dir=back]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/ds-nodnskey.dot0000644000175000017500000000326215001472663016452 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#d0d0d0", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style="filled,dashed"]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "DNSKEY-0|example.com|8|54321"-> "DS-1_2|example.com|8|54321|1_2" [style=dashed, color="#d0d0d0", dir=back]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/ds-pre-revoke.dot0000644000175000017500000000327415001472663016702 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled, penwidth=4.0]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "DNSKEY-0|example.com|8|54321"-> "DS-1_2|example.com|8|54321|1_2" [style=dashed, color="#d0d0d0", dir=back]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/ds-unknown-alg.dot0000644000175000017500000000324215001472663017056 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DS-19|example.com|8|54321|19" [color="#000000", fillcolor="#ffffff", label=<DS
digest alg=19>, shape=ellipse, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "DS-19|example.com|8|54321|19" [style=invis]; "DNSKEY-0|example.com|8|54321"-> "DS-19|example.com|8|54321|19" [style=solid, color="#f4b800", dir=back]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/ds.dot0000644000175000017500000000325215001472663014621 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "DNSKEY-0|example.com|8|54321"-> "DS-1_2|example.com|8|54321|1_2" [style=solid, color="#0a879a", dir=back]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/edges-errors.dot0000644000175000017500000000116215001472663016612 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; bottom_left [shape=point, style=invis]; bottom_right [shape=point, style=invis]; top_left [shape=point, style=invis]; top_right [shape=point, style=invis]; bottom_left -> top_left [style=solid, label=<
>, dir=back, color="#0a879a"]; bottom_right -> top_right [style=solid, label=<
>, dir=back, color="#0a879a", penwidth=5.0]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/edges-warnings.dot0000644000175000017500000000116615001472663017132 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; bottom_left [shape=point, style=invis]; bottom_right [shape=point, style=invis]; top_left [shape=point, style=invis]; top_right [shape=point, style=invis]; bottom_left -> top_left [style=solid, label=<
>, dir=back, color="#0a879a"]; bottom_right -> top_right [style=solid, label=<
>, dir=back, color="#0a879a", penwidth=5.0]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/error.svg0000644000175000017500000000472515001472663015363 0ustar00caseycasey ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/logo.svg0000755000175000017500000001374415001472663015176 0ustar00caseycasey ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nodata.dot0000644000175000017500000000210415001472663015454 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-1|example.com|CNAME" [color="#d0d0d0", fillcolor="#ffffff", label=<example.com/CNAME>, shape=rectangle, style="rounded,filled,dashed"]; "RRset-10|example.com|SOA" [color="#000000", fillcolor="#ffffff", label=<example.com/SOA>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-1|example.com|CNAME" [style=invis]; "cluster_example.com_bottom" -> "RRset-10|example.com|SOA" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nodes-bogus.dot0000644000175000017500000000370115001472663016437 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; "DS-1_2|example.com|8|54321|1_2" [color="#be1515", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|12345" [color="#be1515", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "NSEC-0|foobar.example.com|A" [color="#be1515", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; "RRset-10|example.com|A" [color="#be1515", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; "RRset-1|example.com|CNAME" [color="#e5a1a1", fillcolor="#ffffff", label=<example.com/CNAME>, shape=rectangle, style="rounded,filled,dashed"]; "RRset-0|foobar.example.com|A" [color="#e5a1a1", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "NSEC-0|foobar.example.com|A" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "RRset-1|example.com|CNAME" -> "RRset-10|example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nodes-errors.dot0000644000175000017500000000530515001472663016636 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<
DS
digest algs=1,2
>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<
DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "NSEC-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<
example.com/A
>, shape=rectangle, style="rounded,filled"]; "RRset-1|example.com|CNAME" [color="#d0d0d0", fillcolor="#ffffff", label=<
example.com/CNAME
>, shape=rectangle, style="rounded,filled,dashed"]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<
foobar.example.com
>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "NSEC-0|foobar.example.com|A" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "RRset-1|example.com|CNAME" -> "RRset-10|example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nodes-insecure.dot0000644000175000017500000000370115001472663017135 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "NSEC-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; "RRset-1|example.com|CNAME" [color="#d0d0d0", fillcolor="#ffffff", label=<example.com/CNAME>, shape=rectangle, style="rounded,filled,dashed"]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "NSEC-0|foobar.example.com|A" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "RRset-1|example.com|CNAME" -> "RRset-10|example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nodes-secure.dot0000644000175000017500000000370115001472663016606 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; "DS-1_2|example.com|8|54321|1_2" [color="#0a879a", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|12345" [color="#0a879a", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "NSEC-0|foobar.example.com|A" [color="#0a879a", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; "RRset-10|example.com|A" [color="#0a879a", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; "RRset-1|example.com|CNAME" [color="#9dcfd6", fillcolor="#ffffff", label=<example.com/CNAME>, shape=rectangle, style="rounded,filled,dashed"]; "RRset-0|foobar.example.com|A" [color="#9dcfd6", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "NSEC-0|foobar.example.com|A" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "RRset-1|example.com|CNAME" -> "RRset-10|example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nodes-warnings.dot0000644000175000017500000000532115001472663017150 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<
DS
digest algs=1,2
>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<
DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "NSEC-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<
example.com/A
>, shape=rectangle, style="rounded,filled"]; "RRset-1|example.com|CNAME" [color="#d0d0d0", fillcolor="#ffffff", label=<
example.com/CNAME
>, shape=rectangle, style="rounded,filled,dashed"]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<
foobar.example.com
>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "NSEC-0|foobar.example.com|A" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "RRset-1|example.com|CNAME" -> "RRset-10|example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nsec-ds.dot0000644000175000017500000000320415001472663015544 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "NSEC-0|example.com|DS" [color="#000000", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "NSEC-0|example.com|DS" [style=invis]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; "cluster_example.com_top" -> "NSEC-0|example.com|DS" [color="#0a879a", dir=back, ltail="cluster_example.com", minlen=2, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nsec-invalid.dot0000644000175000017500000000260115001472663016564 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "NSEC-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; } "cluster_example.com_bottom" -> "RRset-0|foobar.example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [color="#be1515", dir=back, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nsec-partial-bogus.dot0000644000175000017500000000113715001472663017712 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; "NSEC-0|foobar.example.com|A" [color="#be1515", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nsec.dot0000644000175000017500000000260115001472663015140 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "NSEC-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; } "cluster_example.com_bottom" -> "RRset-0|foobar.example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [color="#0a879a", dir=back, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nsec3-optout.dot0000644000175000017500000000260615001472663016560 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "NSEC3-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC3
>, shape=none, style=filled]; } "cluster_example.com_bottom" -> "RRset-0|foobar.example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC3-0|foobar.example.com|A" [color="#0a879a", dir=back, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nsec3.dot0000644000175000017500000000270315001472663015226 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "NSEC3-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC3
>, shape=none, style=filled]; } "cluster_example.com_bottom" -> "RRset-0|foobar.example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC3-0|foobar.example.com|A" [color="#0a879a", dir=back, style=solid]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/nxdomain.dot0000644000175000017500000000212515001472663016026 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "RRset-10|example.com|SOA" [color="#000000", fillcolor="#ffffff", label=<example.com/SOA>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-0|foobar.example.com|A" [style=invis]; "cluster_example.com_bottom" -> "RRset-10|example.com|SOA" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/response-error.dot0000644000175000017500000000156615001472663017206 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-2|example.com|A" [color="#000000", fillcolor="#ffffff", label=<
example.com/A
>, shape=none]; } "cluster_example.com_bottom" -> "RRset-2|example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/response-warning.dot0000644000175000017500000000157015001472663017515 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-2|example.com|A" [color="#000000", fillcolor="#ffffff", label=<
example.com/A
>, shape=none]; } "cluster_example.com_bottom" -> "RRset-2|example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrset.dot0000644000175000017500000000143115001472663015347 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-dnskey-pruned.dot0000644000175000017500000000273015001472663020127 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "DNSKEY-0|example.com|8|54321" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "cluster_example.com_bottom" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "DNSKEY-0|example.com|8|54321" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-dnskey-redundant.dot0000644000175000017500000000313615001472663020617 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "DNSKEY-0|example.com|8|54321" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|54321" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid", constraint=false]; "DNSKEY-0|example.com|8|12345" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "cluster_example.com_bottom" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "DNSKEY-0|example.com|8|54321" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-dnskey.dot0000644000175000017500000000331015001472663016627 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|67890" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=67890
1024 bits
>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "DNSKEY-0|example.com|8|54321" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|67890" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#0a879a", style="solid"]; "cluster_example.com_bottom" -> "DNSKEY-0|example.com|8|12345" [style=invis]; "DNSKEY-0|example.com|8|54321" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-ds.dot0000644000175000017500000000557515001472663015757 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; edge [penwidth=1.5]; node [label="\N", penwidth=1.5 ]; subgraph "cluster_com" { graph [label=<
com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "DS-1_2|example.com|8|54321|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "DS-1_2|example.com|8|09876|1_2" [color="#000000", fillcolor="#ffffff", label=<DS
digest algs=1,2>, shape=ellipse, style=filled]; "cluster_com_top" [shape=point, style=invis]; "cluster_com_bottom" [shape=point, style=invis]; } subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=54321
1024 bits
>, shape=ellipse, style=filled]; "DNSKEY-0|example.com|8|09876" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=09876
1024 bits
>, shape=ellipse, style=filled]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } "cluster_com_bottom" -> "DS-1_2|example.com|8|54321|1_2" [style=invis]; "cluster_com_bottom" -> "DS-1_2|example.com|8|09876|1_2" [style=invis]; "DNSKEY-0|example.com|8|54321"-> "DS-1_2|example.com|8|54321|1_2" [style=solid, color="#0a879a", dir=back]; "DNSKEY-0|example.com|8|09876"-> "DS-1_2|example.com|8|09876|1_2" [style=solid, color="#0a879a", dir=back]; "cluster_example.com_top" -> cluster_com_bottom [color="#000000", dir=back, lhead=cluster_com, ltail="cluster_example.com", minlen=2, penwidth=5.0, style=solid]; "DS-1_2|example.com|8|54321|1_2" -> "DNSKEY-0|com|8|12345" [dir=back, color="#0a879a", style="solid"]; "DS-1_2|example.com|8|09876|1_2" -> "DNSKEY-0|com|8|12345" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|54321" -> "cluster_example.com_top" [style=invis]; "DNSKEY-0|com|8|12345" -> "cluster_com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-nsec.dot0000644000175000017500000000363715001472663016276 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "NSEC-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC
>, shape=none, style=filled]; } "cluster_example.com_bottom" -> "RRset-0|foobar.example.com|A" [style=invis]; "NSEC-0|foobar.example.com|A":"nsec1" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "NSEC-0|foobar.example.com|A":"nsec2" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "RRset-0|foobar.example.com|A" -> "NSEC-0|foobar.example.com|A" [color="#0a879a", dir=back, style=solid]; "DNSKEY-0|example.com|8|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-rrset-expired.dot0000644000175000017500000000230115001472663020126 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#6131a3", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-rrset-invalid-sig.dot0000644000175000017500000000230115001472663020674 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#be1515", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-rrset-invalid.dot0000644000175000017500000000250515001472663020122 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled, penwidth=4.0]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; "DNSKEY-0|example.com|8|12345" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#be1515", style="dashed"]; "DNSKEY-0|example.com|8|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-rrset-nodnskey.dot0000644000175000017500000000231315001472663020323 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#d0d0d0", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style="filled,dashed"]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#d0d0d0", style="dashed"]; "DNSKEY-0|example.com|8|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-rrset-pre-revoke.dot0000644000175000017500000000232615001472663020554 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|54321" [color="#000000", fillcolor="lightgray", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled, penwidth=4.0]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|54321" [dir=back, color="#d0d0d0", style="dashed"]; "DNSKEY-0|example.com|8|54321" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-rrset-unknown-alg.dot0000644000175000017500000000230515001472663020732 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|22|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=22, id=12345
1024 bits
>, shape=ellipse, style=filled]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|22|12345" [dir=back, color="#f4b800", style="solid"]; "DNSKEY-0|example.com|22|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/rrsig-rrset.dot0000644000175000017500000000230115001472663016470 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "RRset-10|example.com|A" [color="#000000", fillcolor="#ffffff", label=<example.com/A>, shape=rectangle, style="rounded,filled"]; } "cluster_example.com_bottom" -> "RRset-10|example.com|A" [style=invis]; "RRset-10|example.com|A" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "DNSKEY-0|example.com|8|12345" -> "cluster_example.com_top" [style=invis]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/warning.svg0000644000175000017500000000412515001472663015671 0ustar00caseycasey ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/wildcard.dot0000644000175000017500000000436615001472663016013 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; "DNSKEY-0|example.com|8|12345" [color="#000000", fillcolor="#ffffff", label=<DNSKEY
alg=8, id=12345
1024 bits
>, shape=ellipse, style=filled]; "RRset-10|*.example.com|A" [color="#000000", fillcolor="#ffffff", label=<*.example.com/A>, shape=rectangle, style="rounded,filled"]; "RRset-0|foobar.example.com|A" [color="#d0d0d0", fillcolor="#ffffff", label=<foobar.example.com>, shape=rectangle, style="rounded,filled,dashed,diagonals"]; "NSEC3-0|foobar.example.com|A" [color="#000000", fillcolor="#ffffff", label=<
NSEC3
>, shape=none, style=filled]; } "cluster_example.com_bottom" -> "RRset-10|*.example.com|A" [style=invis]; "cluster_example.com_bottom" -> "RRset-0|foobar.example.com|A" [style=invis]; "RRset-0|foobar.example.com|A" -> "NSEC3-0|foobar.example.com|A" [color="#0a879a", dir=back, style=solid]; "RRset-10|*.example.com|A" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "NSEC3-0|foobar.example.com|A":"nsec1" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; "NSEC3-0|foobar.example.com|A":"nsec2" -> "DNSKEY-0|example.com|8|12345" [dir=back, color="#0a879a", style="solid"]; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/zone-errors.dot0000644000175000017500000000107115001472663016475 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/zone-warnings.dot0000644000175000017500000000107315001472663017013 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/doc/src/zone.dot0000644000175000017500000000100515001472663015160 0ustar00caseycaseydigraph { graph [compound=true, rankdir=BT, ranksep=0.3]; node [label="\N", penwidth=1.5 ]; edge [penwidth=1.5]; subgraph "cluster_example.com" { graph [label=<
example.com
(2015-02-06 12:55:32 UTC)
>, labeljust=l, penwidth=0.5 ]; "cluster_example.com_top" [shape=point, style=invis]; "cluster_example.com_bottom" [shape=point, style=invis]; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/requirements.txt0000644000175000017500000000004215001472663015425 0ustar00caseycaseydnspython pygraphviz cryptography ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/setup.cfg0000644000175000017500000000016415001473074013764 0ustar00caseycasey[bdist_wheel] universal = 1 [metadata] license_files = LICENSE COPYRIGHT [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/setup.py0000644000175000017500000001623415001472663013665 0ustar00caseycasey#!/usr/bin/env python from __future__ import unicode_literals import codecs import glob import os import stat import subprocess import sys try: import setuptools except ImportError: from distutils.core import setup from distutils.command.install import install from distutils.command.build_py import build_py else: from setuptools import setup from setuptools.command.install import install from setuptools.command.build_py import build_py JQUERY_UI_PATH = "'https://code.jquery.com/ui/1.11.4/jquery-ui.min.js'" JQUERY_UI_CSS_PATH = "'https://code.jquery.com/ui/1.11.4/themes/redmond/jquery-ui.css'" JQUERY_PATH = "'https://code.jquery.com/jquery-1.11.3.min.js'" RAPHAEL_PATH = "'https://cdnjs.cloudflare.com/ajax/libs/raphael/2.1.4/raphael-min.js'" RESOLV_CONF = "'/etc/resolv.conf'" def apply_substitutions(filename, install_prefix): assert filename.endswith('.in'), 'Filename supplied for customization must end with \'.in\': %s' % (filename) filename_out = filename[:-3] if os.path.exists(filename_out) and os.path.getctime(filename_out) > os.path.getctime(filename): return with open(filename, 'r') as in_fh: s = in_fh.read() s = s.replace('__DNSVIZ_INSTALL_PREFIX__', install_prefix) s = s.replace('__JQUERY_PATH__', JQUERY_PATH) s = s.replace('__JQUERY_UI_PATH__', JQUERY_UI_PATH) s = s.replace('__JQUERY_UI_CSS_PATH__', JQUERY_UI_CSS_PATH) s = s.replace('__RAPHAEL_PATH__', RAPHAEL_PATH) s = s.replace('__RESOLV_CONF__', RESOLV_CONF) with open(filename_out, 'w') as out_fh: out_fh.write(s) def make_documentation(): os.chdir('doc') try: if os.system('make') != 0: sys.stderr.write('Warning: Some of the included documentation failed to build. Proceeding without it.\n') finally: os.chdir('..') def create_config(prefix): # Create dnsviz/config.py, so version exists for packages that don't # require calling install. Even though the install prefix is the empty # string, the use case for this is virtual environments, which won't # use it. apply_substitutions(os.path.join('dnsviz','config.py.in'), prefix) # update the timestamp of config.py.in, so if/when the install command # is called, config.py will be rewritten, i.e., with the real install # prefix. os.utime(os.path.join('dnsviz', 'config.py.in'), None) class MyBuildPy(build_py): def run(self): make_documentation() build_py.run(self) class MyInstall(install): def run(self): # if this an alternate root is specified, then embed the install_data # path relative to that alternate root if self.root is not None: install_data = os.path.join(os.path.sep, os.path.relpath(self.install_data, self.root)) else: install_data = self.install_data create_config(install_data) install.run(self) DOC_FILES = [('share/doc/dnsviz', ['README.md'])] DATA_FILES = [('share/dnsviz/icons', ['doc/images/error.png', 'doc/images/warning.png']), ('share/dnsviz/css', ['share/css/dnsviz.css']), ('share/dnsviz/js', ['share/js/dnsviz.js']), ('share/dnsviz/html', ['share/html/dnssec-template.html']), ('share/dnsviz/trusted-keys', ['share/trusted-keys/root.txt']), ('share/dnsviz/hints', ['share/hints/named.root'])] MAN_FILES = [('share/man/man1', ['doc/man/dnsviz.1', 'doc/man/dnsviz-probe.1', 'doc/man/dnsviz-grok.1', 'doc/man/dnsviz-graph.1', 'doc/man/dnsviz-print.1', 'doc/man/dnsviz-query.1'])] DOC_EXTRA_FILES = [('share/doc/dnsviz', ['doc/dnsviz-graph.html']), ('share/doc/dnsviz/images', glob.glob(os.path.join('doc', 'images', '*.png')))] # third-party files are only installed if they're included in the package if os.path.exists(os.path.join('external', 'jquery-ui')): JQUERY_UI_FILES = [('share/dnsviz/js', ['external/jquery-ui/jquery-ui-1.11.4.custom.min.js']), ('share/dnsviz/css', ['external/jquery-ui/jquery-ui-1.11.4.custom.min.css']), ('share/dnsviz/css/images', glob.glob(os.path.join('external', 'jquery-ui', 'images', '*.png')))] JQUERY_UI_PATH = "'file://' + os.path.join(DNSVIZ_SHARE_PATH, 'js', 'jquery-ui-1.11.4.custom.min.js')" JQUERY_UI_CSS_PATH = "'file://' + os.path.join(DNSVIZ_SHARE_PATH, 'css', 'jquery-ui-1.11.4.custom.min.css')" else: JQUERY_UI_FILES = [] if os.path.exists(os.path.join('external', 'jquery')): JQUERY_FILES = [('share/dnsviz/js', ['external/jquery/jquery-1.11.3.min.js'])] JQUERY_PATH = "'file://' + os.path.join(DNSVIZ_SHARE_PATH, 'js', 'jquery-1.11.3.min.js')" else: JQUERY_FILES = [] if os.path.exists(os.path.join('external', 'raphael')): RAPHAEL_FILES = [('share/dnsviz/js', ['external/raphael/raphael-min.js'])] RAPHAEL_PATH = "'file://' + os.path.join(DNSVIZ_SHARE_PATH, 'js', 'raphael-min.js')" else: RAPHAEL_FILES = [] if isinstance(b'', str): map_func = lambda x: x else: map_func = lambda x: codecs.decode(x, 'latin1') create_config('') setup(name='dnsviz', version='0.11.1', author='Casey Deccio', author_email='casey@deccio.net', url='https://github.com/dnsviz/dnsviz/', description='DNS analysis and visualization tool suite', long_description='''DNSViz is a tool suite for analysis and visualization of Domain Name System (DNS) behavior, including its security extensions (DNSSEC). This tool suite powers the Web-based analysis available at http://dnsviz.net/ .''', license='LICENSE', packages=[map_func(b'dnsviz'), map_func(b'dnsviz.viz'), map_func(b'dnsviz.analysis'), map_func(b'dnsviz.commands')], scripts=['bin/dnsviz'], data_files=DOC_FILES + DATA_FILES + MAN_FILES + \ DOC_EXTRA_FILES + JQUERY_UI_FILES + JQUERY_FILES + RAPHAEL_FILES, requires=[ 'pygraphviz (>=1.3)', 'cryptography (>=36.0.0)', 'dnspython (>=1.13)', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Topic :: Internet :: Name Service (DNS)', 'Topic :: Scientific/Engineering :: Visualization', 'Topic :: System :: Networking :: Monitoring', ], cmdclass={ 'build_py': MyBuildPy, 'install': MyInstall }, ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3865309 dnsviz-0.11.1/share/0000755000175000017500000000000015001473074013244 5ustar00caseycasey././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/share/css/0000755000175000017500000000000015001473074014034 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/share/css/dnsviz.css0000644000175000017500000000234415001472663016071 0ustar00caseycasey.ui-tooltip { max-width: none; } .ui-tooltip-content { font-size: 10pt; padding: 0pt; margin: -8px; } .ui-tooltip-content ul { list-style:none; margin: 0; padding: 0; } .ui-tooltip-content ul li { margin: 0; padding: 0; } .dnsviz-secure { background-color: #8ffeff; } .dnsviz-insecure { background-color: #b7b7b7; } .dnsviz-bogus { background-color: #f17b7b; } .dnsviz-non-existent { background-color: #d0d0d0; } .dnsviz-incomplete { background-color: #fffa8f; } .dnsviz-lame { background-color: #fffa8f; } .dnsviz-valid { background-color: #8ffeff; } .dnsviz-indeterminate { background-color: #b7b7b7; } .dnsviz-indeterminate-no-dnskey { background-color: #b7b7b7; } .dnsviz-indeterminate-unknown-algorithm { background-color: #b7b7b7; } .dnsviz-algorithm-ignored { background-color: #b7b7b7; } .dnsviz-expired { background-color: #ad7fed; } .dnsviz-premature { background-color: #ad7fed; } .dnsviz-invalid-sig { background-color: #f17b7b; } .dnsviz-invalid-digest { background-color: #f17b7b; } .dnsviz-invalid-target { background-color: #f17b7b; } .dnsviz-invalid { background-color: #f17b7b; } .dnsviz-error { background-color: #f17b7b; } .dnsviz-warning { background-color: #fffa8f; } .dnsviz-match-pre-revoke { background-color: #fffa8f; } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/share/hints/0000755000175000017500000000000015001473074014371 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/share/hints/named.root0000644000175000017500000000635615001472663016377 0ustar00caseycasey; This file holds the information on root name servers needed to ; initialize cache of Internet domain name servers ; (e.g. reference this file in the "cache . " ; configuration file of BIND domain name servers). ; ; This file is made available by InterNIC ; under anonymous FTP as ; file /domain/named.cache ; on server FTP.INTERNIC.NET ; -OR- RS.INTERNIC.NET ; ; last update: June 26, 2024 ; related version of root zone: 2024062601 ; ; FORMERLY NS.INTERNIC.NET ; . 3600000 NS A.ROOT-SERVERS.NET. A.ROOT-SERVERS.NET. 3600000 A 198.41.0.4 A.ROOT-SERVERS.NET. 3600000 AAAA 2001:503:ba3e::2:30 ; ; FORMERLY NS1.ISI.EDU ; . 3600000 NS B.ROOT-SERVERS.NET. B.ROOT-SERVERS.NET. 3600000 A 170.247.170.2 B.ROOT-SERVERS.NET. 3600000 AAAA 2801:1b8:10::b ; ; FORMERLY C.PSI.NET ; . 3600000 NS C.ROOT-SERVERS.NET. C.ROOT-SERVERS.NET. 3600000 A 192.33.4.12 C.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:2::c ; ; FORMERLY TERP.UMD.EDU ; . 3600000 NS D.ROOT-SERVERS.NET. D.ROOT-SERVERS.NET. 3600000 A 199.7.91.13 D.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:2d::d ; ; FORMERLY NS.NASA.GOV ; . 3600000 NS E.ROOT-SERVERS.NET. E.ROOT-SERVERS.NET. 3600000 A 192.203.230.10 E.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:a8::e ; ; FORMERLY NS.ISC.ORG ; . 3600000 NS F.ROOT-SERVERS.NET. F.ROOT-SERVERS.NET. 3600000 A 192.5.5.241 F.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:2f::f ; ; FORMERLY NS.NIC.DDN.MIL ; . 3600000 NS G.ROOT-SERVERS.NET. G.ROOT-SERVERS.NET. 3600000 A 192.112.36.4 G.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:12::d0d ; ; FORMERLY AOS.ARL.ARMY.MIL ; . 3600000 NS H.ROOT-SERVERS.NET. H.ROOT-SERVERS.NET. 3600000 A 198.97.190.53 H.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:1::53 ; ; FORMERLY NIC.NORDU.NET ; . 3600000 NS I.ROOT-SERVERS.NET. I.ROOT-SERVERS.NET. 3600000 A 192.36.148.17 I.ROOT-SERVERS.NET. 3600000 AAAA 2001:7fe::53 ; ; OPERATED BY VERISIGN, INC. ; . 3600000 NS J.ROOT-SERVERS.NET. J.ROOT-SERVERS.NET. 3600000 A 192.58.128.30 J.ROOT-SERVERS.NET. 3600000 AAAA 2001:503:c27::2:30 ; ; OPERATED BY RIPE NCC ; . 3600000 NS K.ROOT-SERVERS.NET. K.ROOT-SERVERS.NET. 3600000 A 193.0.14.129 K.ROOT-SERVERS.NET. 3600000 AAAA 2001:7fd::1 ; ; OPERATED BY ICANN ; . 3600000 NS L.ROOT-SERVERS.NET. L.ROOT-SERVERS.NET. 3600000 A 199.7.83.42 L.ROOT-SERVERS.NET. 3600000 AAAA 2001:500:9f::42 ; ; OPERATED BY WIDE ; . 3600000 NS M.ROOT-SERVERS.NET. M.ROOT-SERVERS.NET. 3600000 A 202.12.27.33 M.ROOT-SERVERS.NET. 3600000 AAAA 2001:dc3::35 ; End of file././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/share/html/0000755000175000017500000000000015001473074014210 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/share/html/dnssec-template.html0000644000175000017500000000167515001472663020202 0ustar00caseycasey
././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/share/js/0000755000175000017500000000000015001473074013660 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/share/js/dnsviz.js0000644000175000017500000001263215001472663015542 0ustar00caseycasey/* # # This file is a part of DNSViz, a tool suite for DNS/DNSSEC monitoring, # analysis, and visualization. # Created by Casey Deccio (casey@deccio.net) # # Copyright 2012-2014 Sandia Corporation. Under the terms of Contract # DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains # certain rights in this software. # # Copyright 2014-2016 VeriSign, Inc. # # Copyright 2016-2021 Casey Deccio # # DNSViz is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # DNSViz is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with DNSViz. If not, see . # */ function AuthGraph(anchorElement, maxPaperWidth, imageScale) { this.anchorElement = anchorElement; this.maxPaperWidth = maxPaperWidth == undefined ? 0 : maxPaperWidth; this.imageScale = imageScale == undefined ? 1.4 : imageScale; this._label_map = { 'rrsig': 'RRSIG', 'nsec': 'NSEC', 'nsec3': 'NSEC3', 'cname': 'CNAME', 'dname': 'DNAME', 'dnskey': 'DNSKEY', 'ds': 'DS', 'dlv': 'DLV', 'ttl': 'TTL', 'rrset': 'RRset', 'rdata': 'Record data', 'nsid': 'NSID', 'ns': 'NS', } this._dnssec_algorithms = { 1: 'RSA/MD5', 3: 'DSA/SHA1', 5: 'RSA/SHA-1', 6: 'DSA-NSEC3-SHA1', 7: 'RSASHA1-NSEC3-SHA1', 8: 'RSA/SHA-256', 10: 'RSA/SHA-512', 12: 'GOST R 34.10-2001', 13: 'ECDSA Curve P-256 with SHA-256', 14: 'ECDSA Curve P-384 with SHA-384', 15: 'Ed25519', 16: 'Ed448', } this._digest_algorithms = { 1: 'SHA-1', 2: 'SHA-256', 3: 'GOST R 34.11-94', 4: 'SHA-384', } } AuthGraph.prototype.infoToHtmlTable = function (obj) { return '' + this.infoToHtmlTableComponents(obj) + '
'; } AuthGraph.prototype.infoToHtmlTableComponents = function (obj) { s = ''; for (var key in obj) { val = obj[key]; if (val == null) { continue; } else if (key.toLowerCase() in {'digest':null,'key':null,'signature':null,'dnskey':null}) { // don't print digest or key continue; } else if (key.toLowerCase() in {'rdata':null,'meta':null} && !val.hasOwnProperty('length')) { s += this.infoToHtmlTableComponents(val); continue; } s += '' + this.labelFromSlug(key) + ':'; if (typeof val != "object") { s += val; } else if (val.hasOwnProperty("length")) { if (key.toLowerCase() in {'nsec':null,'nsec3':null} && !val.hasOwnProperty('object')) { var newval = []; var nsec_type = key.toLowerCase() == 'nsec' ? 'NSEC' : 'NSEC3'; for (var i = 0; i < val.length; i++) { newval.push(val[i]['name'] + ' IN ' + nsec_type + ' ' + val[i]['rdata'][0]); }; val = newval; } if (key.toLowerCase() in {'errors':null,'warnings':null}) { s += '
    '; for (var i = 0; i < val.length; i++) { var servers_tags = []; s += '
  • ' + val[i]['description']; if (val[i]['servers'] != undefined) { servers_tags = servers_tags.concat(val[i]['servers']); } if (val[i]['query_options'] != undefined) { servers_tags = servers_tags.concat(val[i]['query_options']); } if (servers_tags.length > 0) { s += ' (' + servers_tags.join(", ") + ')'; } s += '
  • '; } s += '
'; } else if (typeof val[0] in {'string':null,'number':null}) { if (key.toLowerCase() in {'servers':null,'nsid_values':null,'ns_names':null,'digest_type':null}) { s += val.join(", "); } else { s += val.join("
"); } } else { s += '
    '; for (var i = 0; i < val.length; i++) { s += '
  • ' + this.infoToHtmlTable(val[i]) + '
  • '; } s += '
'; } } else { s += this.infoToHtmlTable(val); } s += ''; } return s } AuthGraph.prototype.addNodeEvent = function (nodeObj, infoObj) { var statusStr; var s = ''; if (infoObj.hasOwnProperty('length') && infoObj.length > 1) { statusStr = this.slugify(infoObj[0]['status']); s += '
    '; for (var i = 0; i < infoObj.length; i++) { s += '
  • ' + this.infoToHtmlTable(infoObj[i]) + '
  • '; } s += '
' } else { if (infoObj.hasOwnProperty('length')) { infoObj = infoObj[0]; } statusStr = this.slugify(infoObj['status']); s += this.infoToHtmlTable(infoObj); } s = '
' + s + '
'; $(nodeObj[0]).tooltip({ content: s, items: '*', track: true, show: false, hide: false }); //$(nodeObj[0]).css('cursor', 'pointer'); } if (typeof String.prototype.trim !== 'function') { String.prototype.trim = function() { return this.replace(/^\s+|\s+$/g, ''); } } AuthGraph.prototype.labelFromSlug = function (str) { var labels = str.split(/_/); for (var i in labels) { var l = labels[i].toLowerCase(); if (l in this._label_map) { labels[i] = this._label_map[l]; } } labels[0] = labels[0].charAt(0).toUpperCase() + labels[0].slice(1); return labels.join(' '); } AuthGraph.prototype.slugify = function (str) { _slugify_strip_re = /[^\w_\s-]/g; _slugify_hyphenate_re = /[_\s-]+/g; str = str.replace(_slugify_strip_re, '').trim().toLowerCase(); str = str.replace(_slugify_hyphenate_re, '-'); return str; } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/share/trusted-keys/0000755000175000017500000000000015001473074015707 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/share/trusted-keys/root.txt0000644000175000017500000000056715001472663017446 0ustar00caseycasey. IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3 +/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kv ArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF 0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+e oZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfd RUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwN R1AkUTV74bU= ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1745253948.3985322 dnsviz-0.11.1/tests/0000755000175000017500000000000015001473074013304 5ustar00caseycasey././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_00_setup.py0000644000175000017500000001122015001472663016353 0ustar00caseycaseyimport os import subprocess import unittest from vars import * class ZoneCreator: def __init__(self, unsigned_subcat): self.unsigned_subcat = unsigned_subcat self.unsigned_zone_dir = get_zone_dir(self.unsigned_subcat) self.unsigned_zone_file = get_zone_file('unsigned', False) self.keys = None def create_dir(self, mydir): if not os.path.exists(mydir): os.mkdir(mydir) def create_keys(self): self.keys = {} key_dir = get_key_dir() self.create_dir(key_dir) self.keys['KSK_RSASHA1'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-f', 'KSK', '-b', '2048', '-a', 'RSASHA1', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['ZSK_RSASHA1'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-b', '1024', '-a', 'RSASHA1', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['KSK_RSASHA256'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-f', 'KSK', '-b', '2048', '-a', 'RSASHA256', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['ZSK_RSASHA256'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-b', '1024', '-a', 'RSASHA256', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['KSK_ECDSA256'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-f', 'KSK', '-a', 'ECDSAP256SHA256', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['ZSK_ECDSA256'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-a', 'ECDSAP256SHA256', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['KSK_ECDSA384'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-f', 'KSK', '-a', 'ECDSAP384SHA384', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['ZSK_ECDSA384'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-a', 'ECDSAP384SHA384', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['KSK_ED25519'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-f', 'KSK', '-a', 'ED25519', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['ZSK_ED25519'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-a', 'ED25519', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['KSK_ED448'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-f', 'KSK', '-a', 'ED448', ZONE_ORIGIN]) \ .decode('utf-8').strip() self.keys['ZSK_ED448'] = \ subprocess.check_output(['dnssec-keygen', '-q', '-K', key_dir, '-a', 'ED448', ZONE_ORIGIN]) \ .decode('utf-8').strip() def get_key_file(self, key_type): return get_key_file(self.keys[key_type]) def create_signed_zone(self, subcat, ksk, zsk): zone_dir = get_zone_dir(subcat) self.create_dir(zone_dir) key_dir = get_key_dir() unsigned_zone_file = get_zone_file(subcat, False) unsigned_delegation_file = get_delegation_file(self.unsigned_subcat) delegation_file = get_delegation_file(subcat) ksk_file = self.get_key_file(ksk) zsk_file = self.get_key_file(zsk) tk_file = get_tk_file(subcat) with open(unsigned_zone_file, 'wb') as fh: subprocess.check_call(['cat', self.unsigned_zone_file, ksk_file, zsk_file], stdout=fh) subprocess.check_call(['dnssec-signzone', '-K', key_dir, '-x', '-k', self.keys[ksk], '-o', ZONE_ORIGIN, unsigned_zone_file, self.keys[zsk]]) os.unlink('dsset-' + ZONE_ORIGIN + '.') with open(delegation_file, 'wb') as fh: subprocess.check_call(['cat', unsigned_delegation_file], stdout=fh) subprocess.check_call(['dnssec-dsfromkey', '-a', 'SHA-1', ksk_file], stdout=fh) subprocess.check_call(['dnssec-dsfromkey', '-a', 'SHA-256', ksk_file], stdout=fh) subprocess.check_call(['dnssec-dsfromkey', '-a', 'SHA-384', ksk_file], stdout=fh) if os.path.exists(tk_file): os.remove(tk_file) os.symlink(ksk_file, tk_file) class TestSetup(unittest.TestCase): def test_setup(self): z = ZoneCreator('unsigned') z.create_keys() z.create_signed_zone('signed-nsec', 'KSK_RSASHA256', 'ZSK_RSASHA256') z.create_signed_zone('signed-nsec3', 'KSK_RSASHA256', 'ZSK_RSASHA256') if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_11_response.py0000644000175000017500000000144615001472663017064 0ustar00caseycasey# -*- coding: utf-8 -*- import binascii import dns.message import unittest from dnsviz.response import DNSResponse class DNSResponseTestCase(unittest.TestCase): def test_nsid_val(self): tests = [ # DNS message in wire format, expected NSID value ("26898105000100000000000106646e7376697a036e657400001c000100002904d00000000000120003000e68756d616e2d7265616461626c65", "human-readable"), ("43468105000100000000000106646e7376697a036e657400001c000100002904d000000000000800030004c01dcafe", "0xc01dcafe"), ] for wire, nsid_val in tests: msg = dns.message.from_wire(binascii.unhexlify(wire)) resp = DNSResponse(msg, 0, None, None, None, None, None, None, None, False) self.assertEqual(resp.nsid_val(), nsid_val) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_61_dnsviz_probe_options.py0000644000175000017500000013730015001472663021511 0ustar00caseycasey# -*- coding: utf-8 -*- import argparse import binascii import gzip import logging import os import subprocess import tempfile import unittest import dns.name, dns.rdatatype, dns.rrset, dns.zone from dnsviz.commands.probe import ZoneFileToServe, ArgHelper, DomainListArgHelper, StandardRecursiveQueryCD, WILDCARD_EXPLICIT_DELEGATION, AnalysisInputError, CustomQueryMixin from dnsviz.ipaddr import IPAddr from dnsviz import transport from vars import * EXAMPLE_COM_ZONE = get_zone_file('signed-nsec3', True) EXAMPLE_COM_DELEGATION = get_delegation_file('signed-nsec3') EXAMPLE_AUTHORITATIVE = get_probe_output_auth_file('unsigned') class DNSVizProbeOptionsTestCase(unittest.TestCase): def setUp(self): self.helper = DomainListArgHelper() self.logger = logging.getLogger() for handler in self.logger.handlers: self.logger.removeHandler(handler) self.logger.addHandler(logging.NullHandler()) try: ArgHelper.bindable_ip('::1') except argparse.ArgumentTypeError: self.use_ipv6 = False else: self.use_ipv6 = True self.first_port = ZoneFileToServe._next_free_port self.custom_query_mixin_edns_options_orig = CustomQueryMixin.edns_options[:] with tempfile.NamedTemporaryFile('w', prefix='dnsviz', delete=False) as self.example_ds_file: self.example_ds_file.write('example.com. IN DS 34983 10 1 EC358CFAAEC12266EF5ACFC1FEAF2CAFF083C418\n' + \ 'example.com. IN DS 34983 10 2 608D3B089D79D554A1947BD10BEC0A5B1BDBE67B4E60E34B1432ED00 33F24B49') def tearDown(self): os.remove(self.example_ds_file.name) CustomQueryMixin.edns_options = self.custom_query_mixin_edns_options_orig[:] def test_authoritative_option(self): arg1 = 'example.com+:ns1.example.com=192.0.2.1:1234,ns1.example.com=[2001:db8::1],' + \ 'ns1.example.com=192.0.2.2,ns2.example.com=[2001:db8::2],a.root-servers.net,192.0.2.3' arg1_with_spaces = ' example.com+ : ns1.example.com = [192.0.2.1]:1234 , ns1.example.com = [2001:db8::1], ' + \ 'ns1.example.com = [192.0.2.2] , ns2.example.com = [2001:db8::2] , a.root-servers.net , 192.0.2.3 ' arg2 = 'example.com:ns1.example.com=192.0.2.1' arg3 = 'example.com:%s' % EXAMPLE_COM_ZONE arg4 = 'example.com+:%s' % EXAMPLE_COM_ZONE delegation_mapping1 = { (dns.name.from_text('example.com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['ns1.example.com.', 'ns2.example.com.', 'a.root-servers.net.', 'ns1._dnsviz.example.com.']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['192.0.2.1', '192.0.2.2']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:db8::1']), (dns.name.from_text('ns1._dnsviz.example.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1._dnsviz.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['192.0.2.3']), (dns.name.from_text('ns2.example.com'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('ns2.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:db8::2']), (dns.name.from_text('a.root-servers.net'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('a.root-servers.net'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['198.41.0.4']), (dns.name.from_text('a.root-servers.net'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('a.root-servers.net'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:503:ba3e::2:30']) } stop_at1 = True odd_ports1 = { (dns.name.from_text('example.com'), IPAddr('192.0.2.1')): 1234 } zone_filename1 = None delegation_mapping2 = { (dns.name.from_text('example.com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['ns1.example.com.']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['192.0.2.1']) } stop_at2 = False odd_ports2 = {} zone_filename2 = None delegation_mapping3 = { (dns.name.from_text('example.com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, []), } stop_at3 = False odd_ports3 = {} zone_filename3 = EXAMPLE_COM_ZONE delegation_mapping4 = { (dns.name.from_text('example.com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, []), } stop_at4 = True odd_ports4 = {} zone_filename4 = EXAMPLE_COM_ZONE obj = self.helper.authoritative_name_server_mappings(arg1) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping1) self.assertEqual(obj.stop_at, stop_at1) self.assertEqual(obj.odd_ports, odd_ports1) self.assertEqual(obj.filename, zone_filename1) obj = self.helper.authoritative_name_server_mappings(arg1_with_spaces) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping1) self.assertEqual(obj.stop_at, stop_at1) self.assertEqual(obj.odd_ports, odd_ports1) self.assertEqual(obj.filename, zone_filename1) obj = self.helper.authoritative_name_server_mappings(arg2) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping2) self.assertEqual(obj.stop_at, stop_at2) self.assertEqual(obj.odd_ports, odd_ports2) self.assertEqual(obj.filename, zone_filename2) obj = self.helper.authoritative_name_server_mappings(arg3) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping3) self.assertEqual(obj.stop_at, stop_at3) self.assertEqual(obj.odd_ports, odd_ports3) self.assertEqual(obj.filename, zone_filename3) obj = self.helper.authoritative_name_server_mappings(arg4) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping4) self.assertEqual(obj.stop_at, stop_at4) self.assertEqual(obj.odd_ports, odd_ports4) self.assertEqual(obj.filename, zone_filename4) def test_authoritative_errors(self): # no mapping arg = 'example.com' with self.assertRaises(argparse.ArgumentTypeError): self.helper.authoritative_name_server_mappings(arg) # bad domain name arg = 'example.com:ns1..foo.com' with self.assertRaises(argparse.ArgumentTypeError): self.helper.authoritative_name_server_mappings(arg) # bad IPv4 address arg = 'example.com:ns1.foo.com=192' with self.assertRaises(argparse.ArgumentTypeError): self.helper.authoritative_name_server_mappings(arg) # Bad IPv6 address arg = 'example.com:ns1.foo.com=2001:db8' with self.assertRaises(argparse.ArgumentTypeError): self.helper.authoritative_name_server_mappings(arg) # IPv6 address needs brackets (IP valid even with port stripped) arg = 'example.com:ns1.foo.com=2001:db8::1:3' with self.assertRaises(argparse.ArgumentTypeError): self.helper.authoritative_name_server_mappings(arg) # IPv6 address needs brackets (IP invalid with port stripped) arg = 'example.com:ns1.foo.com=2001:db8::3' with self.assertRaises(argparse.ArgumentTypeError): self.helper.authoritative_name_server_mappings(arg) # Name does not resolve properly arg = 'example.com:ns1.does-not-exist-foo-bar-baz-123-abc-dnsviz.net' with self.assertRaises(argparse.ArgumentTypeError): self.helper.authoritative_name_server_mappings(arg) def test_delegation_option(self): arg1 = 'example.com:ns1.example.com=192.0.2.1:1234,ns1.example.com=[2001:db8::1],' + \ 'ns1.example.com=192.0.2.2,ns2.example.com=[2001:db8::2]' arg1_with_spaces = ' example.com : ns1.example.com = [192.0.2.1]:1234 , ns1.example.com = [2001:db8::1], ' + \ 'ns1.example.com = [192.0.2.2] , ns2.example.com = [2001:db8::2] ' arg2 = 'example.com:%s' % EXAMPLE_COM_DELEGATION delegation_mapping1 = { (dns.name.from_text('example.com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['ns1.example.com.', 'ns2.example.com.']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['192.0.2.1', '192.0.2.2']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:db8::1']), (dns.name.from_text('ns2.example.com'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('ns2.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:db8::2']), } stop_at1 = False odd_ports1 = { (dns.name.from_text('example.com'), IPAddr('192.0.2.1')): 1234 } delegation_mapping2 = { (dns.name.from_text('example.com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['ns1.example.com.']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['127.0.0.1']) } stop_at2 = False odd_ports2 = {} obj = self.helper.delegation_name_server_mappings(arg1) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping1) self.assertEqual(obj.stop_at, stop_at1) self.assertEqual(obj.odd_ports, odd_ports1) obj = self.helper.delegation_name_server_mappings(arg1_with_spaces) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping1) self.assertEqual(obj.stop_at, stop_at1) self.assertEqual(obj.odd_ports, odd_ports1) obj = self.helper.delegation_name_server_mappings(arg2) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping2) self.assertEqual(obj.stop_at, stop_at2) self.assertEqual(obj.odd_ports, odd_ports2) def test_delegation_errors(self): # all the authoritative error tests as well # requires name=addr mapping arg = 'example.com:ns1.example.com' with self.assertRaises(argparse.ArgumentTypeError): self.helper.delegation_name_server_mappings(arg) # requires name=addr mapping arg = 'example.com:192.0.2.1' with self.assertRaises(argparse.ArgumentTypeError): self.helper.delegation_name_server_mappings(arg) # doesn't allow + arg = 'example.com+:ns1.example.com=192.0.2.1' with self.assertRaises(argparse.ArgumentTypeError): self.helper.delegation_name_server_mappings(arg) # can't do this for root domain arg = '.:ns1.example.com=192.0.2.1' with self.assertRaises(argparse.ArgumentTypeError): self.helper.delegation_name_server_mappings(arg) def test_recursive_option(self): arg1 = 'ns1.example.com=192.0.2.1:1234,ns1.example.com=[2001:db8::1],' + \ 'ns1.example.com=192.0.2.2,ns2.example.com=[2001:db8::2],a.root-servers.net' arg1_with_spaces = ' ns1.example.com = [192.0.2.1]:1234 , ns1.example.com = [2001:db8::1], ' + \ 'ns1.example.com = [192.0.2.2] , ns2.example.com = [2001:db8::2] , a.root-servers.net ' delegation_mapping1 = { (WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS): dns.rrset.from_text_list(WILDCARD_EXPLICIT_DELEGATION, 0, dns.rdataclass.IN, dns.rdatatype.NS, ['ns1.example.com.', 'ns2.example.com.', 'a.root-servers.net.']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['192.0.2.1', '192.0.2.2']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:db8::1']), (dns.name.from_text('ns2.example.com'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('ns2.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:db8::2']), (dns.name.from_text('a.root-servers.net'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('a.root-servers.net'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['198.41.0.4']), (dns.name.from_text('a.root-servers.net'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('a.root-servers.net'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:503:ba3e::2:30']) } stop_at1 = False odd_ports1 = { (WILDCARD_EXPLICIT_DELEGATION, IPAddr('192.0.2.1')): 1234 } obj = self.helper.recursive_servers_for_domain(arg1) self.assertEqual(obj.domain, WILDCARD_EXPLICIT_DELEGATION) self.assertEqual(obj.delegation_mapping, delegation_mapping1) self.assertEqual(obj.stop_at, stop_at1) self.assertEqual(obj.odd_ports, odd_ports1) obj = self.helper.recursive_servers_for_domain(arg1_with_spaces) self.assertEqual(obj.domain, WILDCARD_EXPLICIT_DELEGATION) self.assertEqual(obj.delegation_mapping, delegation_mapping1) self.assertEqual(obj.stop_at, stop_at1) self.assertEqual(obj.odd_ports, odd_ports1) def test_recursive_errors(self): # all the authoritative error tests as well # doesn't accept file arg = EXAMPLE_COM_DELEGATION with self.assertRaises(argparse.ArgumentTypeError): self.helper.recursive_servers_for_domain(arg) def test_ds_option(self): arg1 = 'example.com:34983 10 1 EC358CFAAEC12266EF5ACFC1FEAF2CAFF083C418,' + \ '34983 10 2 608D3B089D79D554A1947BD10BEC0A5B1BDBE67B4E60E34B1432ED00 33F24B49' delegation_mapping1 = { (dns.name.from_text('example.com'), dns.rdatatype.DS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.DS, ['34983 10 1 EC358CFAAEC12266EF5ACFC1FEAF2CAFF083C418', '34983 10 2 608D3B089D79D554A1947BD10BEC0A5B1BDBE67B4E60E34B1432ED00 33F24B49']) } arg1_with_spaces = ' example.com : 34983 10 1 EC358CFAAEC12266EF5ACFC1FEAF2CAFF083C418, ' + \ ' 34983 10 2 608D3B089D79D554A1947BD10BEC0A5B1BDBE67B4E60E34B1432ED00 33F24B49 ' arg2 = 'example.com:%s' % self.example_ds_file.name delegation_mapping2 = { (dns.name.from_text('example.com'), dns.rdatatype.DS): dns.rrset.from_text_list(dns.name.from_text('example.com'), 0, dns.rdataclass.IN, dns.rdatatype.DS, ['34983 10 1 EC358CFAAEC12266EF5ACFC1FEAF2CAFF083C418', '34983 10 2 608D3B089D79D554A1947BD10BEC0A5B1BDBE67B4E60E34B1432ED00 33F24B49']) } obj = self.helper.ds_for_domain(arg1) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping1) obj = self.helper.ds_for_domain(arg1_with_spaces) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping1) obj = self.helper.ds_for_domain(arg2) self.assertEqual(obj.domain, dns.name.from_text('example.com')) self.assertEqual(obj.delegation_mapping, delegation_mapping2) def test_ds_error(self): # bad DS record arg = 'example.com:blah' with self.assertRaises(argparse.ArgumentTypeError): obj = self.helper.ds_for_domain(arg) def test_positive_int(self): self.assertEqual(ArgHelper.positive_int('1'), 1) self.assertEqual(ArgHelper.positive_int('2'), 2) # zero with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.positive_int('0') # negative with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.positive_int('-1') def test_bindable_ip(self): self.assertEqual(ArgHelper.bindable_ip('127.0.0.1'), IPAddr('127.0.0.1')) if self.use_ipv6: self.assertEqual(ArgHelper.bindable_ip('::1'), IPAddr('::1')) # invalid IPv4 address with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.bindable_ip('192.') # invalid IPv6 address with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.bindable_ip('2001:') # invalid IPv4 to bind to with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.bindable_ip('192.0.2.1') # invalid IPv6 to bind to with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.bindable_ip('2001:db8::1') def test_valid_url(self): url1 = 'http://www.example.com/foo' url2 = 'https://www.example.com/foo' url3 = 'ws:///path/to/file' url4 = 'ssh://user@example.com/foo' self.assertEqual(ArgHelper.valid_url(url1), url1) self.assertEqual(ArgHelper.valid_url(url2), url2) self.assertEqual(ArgHelper.valid_url(url3), url3) self.assertEqual(ArgHelper.valid_url(url4), url4) # invalid schema with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.valid_url('ftp://www.example.com/foo') # ws with hostname with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.valid_url('ws://www.example.com/foo') def test_rrtype_list(self): arg1 = 'A,AAAA,MX,CNAME' arg1_with_spaces = ' A , AAAA , MX , CNAME ' arg2 = 'A' arg3 = 'A,BLAH' arg4_empty = '' arg4_empty_spaces = ' ' type_list1 = [dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.MX, dns.rdatatype.CNAME] type_list2 = [dns.rdatatype.A] empty_list = [] self.assertEqual(ArgHelper.comma_separated_dns_types(arg1), type_list1) self.assertEqual(ArgHelper.comma_separated_dns_types(arg1_with_spaces), type_list1) self.assertEqual(ArgHelper.comma_separated_dns_types(arg4_empty), empty_list) self.assertEqual(ArgHelper.comma_separated_dns_types(arg4_empty_spaces), empty_list) # invalid schema with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.comma_separated_dns_types(arg3) def test_valid_domain_name(self): arg1 = '.' arg2 = 'www.example.com' arg3 = 'www..example.com' self.assertEqual(ArgHelper.valid_domain_name(arg1), dns.name.from_text(arg1)) self.assertEqual(ArgHelper.valid_domain_name(arg2), dns.name.from_text(arg2)) # invalid domain name with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.valid_domain_name(arg3) def test_nsid_option(self): self.assertEqual(ArgHelper.nsid_option(), dns.edns.GenericOption(3, b'')) def test_ecs_option(self): arg1 = '192.0.2.0' arg2 = '192.0.2.0/25' arg3 = '192.0.2.255/25' arg4 = '192.0.2.0/24' arg5 = '2001:db8::' arg6 = '2001:db8::/121' arg7 = '2001:db8::ff/121' arg8 = '2001:db8::/120' ecs_option1 = dns.edns.GenericOption(8, binascii.unhexlify('00012000c0000200')) ecs_option2 = dns.edns.GenericOption(8, binascii.unhexlify('00011900c0000200')) ecs_option3 = dns.edns.GenericOption(8, binascii.unhexlify('00011900c0000280')) ecs_option4 = dns.edns.GenericOption(8, binascii.unhexlify('00011800c00002')) ecs_option5 = dns.edns.GenericOption(8, binascii.unhexlify('0002800020010db8000000000000000000000000')) ecs_option6 = dns.edns.GenericOption(8, binascii.unhexlify('0002790020010db8000000000000000000000000')) ecs_option7 = dns.edns.GenericOption(8, binascii.unhexlify('0002790020010db8000000000000000000000080')) ecs_option8 = dns.edns.GenericOption(8, binascii.unhexlify('0002780020010db80000000000000000000000')) self.assertEqual(ArgHelper.ecs_option(arg1), ecs_option1) self.assertEqual(ArgHelper.ecs_option(arg2), ecs_option2) self.assertEqual(ArgHelper.ecs_option(arg3), ecs_option3) self.assertEqual(ArgHelper.ecs_option(arg4), ecs_option4) self.assertEqual(ArgHelper.ecs_option(arg5), ecs_option5) self.assertEqual(ArgHelper.ecs_option(arg6), ecs_option6) self.assertEqual(ArgHelper.ecs_option(arg7), ecs_option7) self.assertEqual(ArgHelper.ecs_option(arg8), ecs_option8) # invalid IP address with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.ecs_option('192') # invalid length with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.ecs_option('192.0.2.0/foo') # invalid length with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.ecs_option('192.0.2.0/33') # invalid length with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.ecs_option('2001:db8::/129') def test_cookie_option(self): arg1 = '0102030405060708' arg2 = '' cookie_option1 = dns.edns.GenericOption(10, binascii.unhexlify('0102030405060708')) cookie_option2 = None self.assertEqual(ArgHelper.dns_cookie_option(arg1), cookie_option1) self.assertEqual(ArgHelper.dns_cookie_option(arg2), None) self.assertIsInstance(ArgHelper.dns_cookie_rand(), dns.edns.GenericOption) # too short with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.dns_cookie_option('01') # too long with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.dns_cookie_option('010203040506070809') # non-hexadecimal with self.assertRaises(argparse.ArgumentTypeError): ArgHelper.dns_cookie_option('010203040506070h') def test_delegation_aggregation(self): args1 = ['-A', '-N', 'example.com:ns1.example.com=192.0.2.1,ns1.example.com=[2001:db8::1]', '-N', 'example.com:ns1.example.com=192.0.2.4', '-N', 'example.com:ns2.example.com=192.0.2.2', '-N', 'example.com:ns3.example.com=192.0.2.3'] args2 = ['-A', '-N', 'example.com:ns1.example.com=192.0.2.1', '-D', 'example.com:34983 10 1 EC358CFAAEC12266EF5ACFC1FEAF2CAFF083C418', '-D', 'example.com:34983 10 2 608D3B089D79D554A1947BD10BEC0A5B1BDBE67B4E60E34B1432ED00 33F24B49'] args3 = ['-A', '-N', 'example.com:ns1.example.com=192.0.2.1', '-N', 'example1.com:ns1.example1.com=192.0.2.2'] args4 = ['-A', '-N', 'example.com:ns1.example.com=192.0.2.1', '-N', 'example.net:ns1.example.net=192.0.2.2'] explicit_delegations1 = { (dns.name.from_text('com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['localhost.']), } explicit_delegations2 = { (dns.name.from_text('com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['localhost.']), } explicit_delegations3 = { (dns.name.from_text('com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['localhost.']), } explicit_delegations4 = { (dns.name.from_text('com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['localhost.']), (dns.name.from_text('net'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('net'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['localhost.']), } for ex in (explicit_delegations1, explicit_delegations2, explicit_delegations3, explicit_delegations4): if self.use_ipv6: ex[(dns.name.from_text('localhost'), dns.rdatatype.AAAA)] = \ dns.rrset.from_text_list(dns.name.from_text('localhost'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['::1']) loopback_ip = IPAddr('::1') else: ex[(dns.name.from_text('localhost'), dns.rdatatype.A)] = \ dns.rrset.from_text_list(dns.name.from_text('localhost'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['127.0.0.1']) loopback_ip = IPAddr('127.0.0.1') odd_ports1 = { (dns.name.from_text('com'), loopback_ip): self.first_port } odd_ports2 = { (dns.name.from_text('com'), loopback_ip): self.first_port } odd_ports3 = { (dns.name.from_text('com'), loopback_ip): self.first_port } odd_ports4 = { (dns.name.from_text('com'), loopback_ip): self.first_port, (dns.name.from_text('net'), loopback_ip): self.first_port + 1, } if self.use_ipv6: rdata = b'AAAA ::1' else: rdata = b'A 127.0.0.1' zone_contents1 = b'''@ 600 IN SOA localhost. root.localhost. 1 1800 900 86400 600 @ 600 IN NS @ @ 600 IN ''' + rdata + \ b''' example 0 IN NS ns1.example example 0 IN NS ns2.example example 0 IN NS ns3.example ns1.example 0 IN A 192.0.2.1 ns1.example 0 IN A 192.0.2.4 ns1.example 0 IN AAAA 2001:db8::1 ns2.example 0 IN A 192.0.2.2 ns3.example 0 IN A 192.0.2.3 ''' zone_contents2 = b'''@ 600 IN SOA localhost. root.localhost. 1 1800 900 86400 600 @ 600 IN NS @ @ 600 IN ''' + rdata + \ b''' example 0 IN DS 34983 10 1 ec358cfaaec12266ef5acfc1feaf2caff083c418 example 0 IN DS 34983 10 2 608d3b089d79d554a1947bd10bec0a5b1bdbe67b4e60e34b1432ed0033f24b49 example 0 IN NS ns1.example ns1.example 0 IN A 192.0.2.1 ''' ZoneFileToServe._next_free_port = self.first_port arghelper1 = ArgHelper(self.logger) arghelper1.build_parser('probe') arghelper1.parse_args(args1) arghelper1.aggregate_delegation_info() zone_to_serve = arghelper1._zones_to_serve[0] zone_obj = dns.zone.from_file(zone_to_serve.filename, dns.name.from_text('com')) zone_obj_other = dns.zone.from_text(zone_contents1, dns.name.from_text('com')) self.assertEqual(zone_obj, zone_obj_other) self.assertEqual(arghelper1.explicit_delegations, explicit_delegations1) self.assertEqual(arghelper1.odd_ports, odd_ports1) ZoneFileToServe._next_free_port = self.first_port arghelper2 = ArgHelper(self.logger) arghelper2.build_parser('probe') arghelper2.parse_args(args2) arghelper2.aggregate_delegation_info() zone_to_serve = arghelper2._zones_to_serve[0] zone_obj = dns.zone.from_file(zone_to_serve.filename, dns.name.from_text('com')) zone_obj_other = dns.zone.from_text(zone_contents2, dns.name.from_text('com')) self.assertEqual(zone_obj, zone_obj_other) self.assertEqual(arghelper2.explicit_delegations, explicit_delegations2) self.assertEqual(arghelper2.odd_ports, odd_ports2) ZoneFileToServe._next_free_port = self.first_port arghelper3 = ArgHelper(self.logger) arghelper3.build_parser('probe') arghelper3.parse_args(args3) arghelper3.aggregate_delegation_info() self.assertEqual(arghelper3.explicit_delegations, explicit_delegations3) self.assertEqual(arghelper3.odd_ports, odd_ports3) ZoneFileToServe._next_free_port = self.first_port arghelper4 = ArgHelper(self.logger) arghelper4.build_parser('probe') arghelper4.parse_args(args4) arghelper4.aggregate_delegation_info() self.assertEqual(arghelper4.explicit_delegations, explicit_delegations4) self.assertEqual(arghelper4.odd_ports, odd_ports4) def test_delegation_authoritative_aggregation(self): args1 = ['-A', '-N', 'example.com:ns1.example.com=192.0.2.1,ns1.example.com=[2001:db8::1]', '-x', 'foo.com:ns1.foo.com=192.0.2.3:50503'] explicit_delegations1 = { (dns.name.from_text('com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['localhost.']), (dns.name.from_text('foo.com'), dns.rdatatype.NS): dns.rrset.from_text_list(dns.name.from_text('foo.com'), 0, dns.rdataclass.IN, dns.rdatatype.NS, ['ns1.foo.com.']), (dns.name.from_text('ns1.foo.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1.foo.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['192.0.2.3']), } for ex in (explicit_delegations1,): if self.use_ipv6: ex[(dns.name.from_text('localhost'), dns.rdatatype.AAAA)] = \ dns.rrset.from_text_list(dns.name.from_text('localhost'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['::1']) loopback_ip = IPAddr('::1') else: ex[(dns.name.from_text('localhost'), dns.rdatatype.A)] = \ dns.rrset.from_text_list(dns.name.from_text('localhost'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['127.0.0.1']) loopback_ip = IPAddr('127.0.0.1') odd_ports1 = { (dns.name.from_text('com'), loopback_ip): self.first_port, (dns.name.from_text('foo.com'), IPAddr('192.0.2.3')): 50503, } ZoneFileToServe._next_free_port = self.first_port arghelper1 = ArgHelper(self.logger) arghelper1.build_parser('probe') arghelper1.parse_args(args1) arghelper1.aggregate_delegation_info() self.assertEqual(arghelper1.explicit_delegations, explicit_delegations1) self.assertEqual(arghelper1.odd_ports, odd_ports1) def test_delegation_authoritative_aggregation_errors(self): args1 = ['-A', '-N', 'example.com:ns1.example.com=192.0.2.1,ns1.example.com=[2001:db8::1]', '-x', 'com:ns1.foo.com=192.0.2.3'] arghelper1 = ArgHelper(self.logger) arghelper1.build_parser('probe') arghelper1.parse_args(args1) # com is specified with -x but example.com is specified with -N with self.assertRaises(argparse.ArgumentTypeError): arghelper1.aggregate_delegation_info() def test_recursive_aggregation(self): args1 = ['-s', 'ns1.example.com=192.0.2.1,ns1.example.com=[2001:db8::1]', '-s', 'ns1.example.com=192.0.2.4,a.root-servers.net'] explicit_delegations1 = { (WILDCARD_EXPLICIT_DELEGATION, dns.rdatatype.NS): dns.rrset.from_text_list(WILDCARD_EXPLICIT_DELEGATION, 0, dns.rdataclass.IN, dns.rdatatype.NS, ['ns1.example.com.', 'a.root-servers.net.']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['192.0.2.1', '192.0.2.4']), (dns.name.from_text('ns1.example.com'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('ns1.example.com'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:db8::1']), (dns.name.from_text('a.root-servers.net'), dns.rdatatype.A): dns.rrset.from_text_list(dns.name.from_text('a.root-servers.net'), 0, dns.rdataclass.IN, dns.rdatatype.A, ['198.41.0.4']), (dns.name.from_text('a.root-servers.net'), dns.rdatatype.AAAA): dns.rrset.from_text_list(dns.name.from_text('a.root-servers.net'), 0, dns.rdataclass.IN, dns.rdatatype.AAAA, ['2001:503:ba3e::2:30']) } odd_ports1 = {} arghelper1 = ArgHelper(self.logger) arghelper1.build_parser('probe') arghelper1.parse_args(args1) arghelper1.aggregate_delegation_info() self.assertEqual(arghelper1.explicit_delegations, explicit_delegations1) self.assertEqual(arghelper1.odd_ports, odd_ports1) def test_option_combination_errors(self): # Names, input file, or names file required args = [] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() # Names file and command-line domain names are mutually exclusive args = ['-f', '/dev/null', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() arghelper.args.names_file.close() # Authoritative analysis and recursive servers args = ['-A', '-s', '192.0.2.1', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() # Authoritative servers with recursive analysis args = ['-x', 'example.com:ns1.example.com=192.0.2.1', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() # Delegation information with recursive analysis args = ['-N', 'example.com:ns1.example.com=192.0.2.1', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() # Delegation information with recursive analysis args = [ '-D', 'example.com:34983 10 1 EC358CFAAEC12266EF5ACFC1FEAF2CAFF083C418', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() def test_ceiling(self): args = ['-a', 'com', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.ceiling, dns.name.from_text('com')) args = ['example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.ceiling, dns.name.root) args = ['-A', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertIsNone(arghelper.ceiling) def test_ip4_ipv6(self): args = [] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.try_ipv4, True) self.assertEqual(arghelper.try_ipv6, True) args = ['-4', '-6'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.try_ipv4, True) self.assertEqual(arghelper.try_ipv6, True) args = ['-4'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.try_ipv4, True) self.assertEqual(arghelper.try_ipv6, False) args = ['-6'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.try_ipv4, False) self.assertEqual(arghelper.try_ipv6, True) def test_client_ip(self): args = [] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertIsNone(arghelper.client_ipv4) self.assertIsNone(arghelper.client_ipv6) args = ['-b', '127.0.0.1'] if self.use_ipv6: args.extend(['-b', '::1']) arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.client_ipv4, IPAddr('127.0.0.1')) if self.use_ipv6: self.assertEqual(arghelper.client_ipv6, IPAddr('::1')) def test_th_factories(self): args = ['example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertIsNone(arghelper.th_factories) args = ['-u', 'http://example.com/', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertIsInstance(arghelper.th_factories[0], transport.DNSQueryTransportHandlerHTTPFactory) args = ['-u', 'ws:///dev/null', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertIsInstance(arghelper.th_factories[0], transport.DNSQueryTransportHandlerWebSocketServerFactory) args = ['-u', 'ssh://example.com/', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertIsInstance(arghelper.th_factories[0], transport.DNSQueryTransportHandlerRemoteCmdFactory) def test_edns_options(self): CustomQueryMixin.edns_options = self.custom_query_mixin_edns_options_orig[:] # None args = ['--no-nsid', '-c', '', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(len(CustomQueryMixin.edns_options), 0) CustomQueryMixin.edns_options = self.custom_query_mixin_edns_options_orig[:] # Only DNS cookie and NSID args = ['example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(set([o.otype for o in CustomQueryMixin.edns_options]), set([3, 10])) CustomQueryMixin.edns_options = self.custom_query_mixin_edns_options_orig[:] # All EDNS options args = ['-e', '192.0.2.0/24', 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(set([o.otype for o in CustomQueryMixin.edns_options]), set([3, 8, 10])) CustomQueryMixin.edns_options = self.custom_query_mixin_edns_options_orig[:] def test_ingest_input(self): with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_bad_json: example_bad_json.write(b'{') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_no_version: example_no_version.write(b'{}') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_1: example_invalid_version_1.write(b'{ "_meta._dnsviz.": { "version": 1.11 } }') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_2: example_invalid_version_2.write(b'{ "_meta._dnsviz.": { "version": 5.0 } }') try: args = ['-r', EXAMPLE_AUTHORITATIVE] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.ingest_input() arghelper.args.input_file.close() # Bad json args = ['-r', example_bad_json.name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # No version args = ['-r', example_no_version.name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_1.name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_2.name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() finally: for tmpfile in (example_bad_json, example_no_version, \ example_invalid_version_1, example_invalid_version_2): os.remove(tmpfile.name) def test_ingest_names(self): args = ['example.com', 'example.net'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) unicode_name = 'テスト' args = [unicode_name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('xn--zckzah.')]) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as names_file: names_file.write('example.com\nexample.net\n'.encode('utf-8')) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as names_file_unicode: try: names_file_unicode.write(('%s\n' % (unicode_name)).encode('utf-8')) # python3/python2 dual compatibility except UnicodeDecodeError: names_file_unicode.write(('%s\n' % (unicode_name))) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_names_only: example_names_only.write(b'{ "_meta._dnsviz.": { "version": 1.2, "names": [ "example.com.", "example.net.", "example.org." ] } }') try: args = ['-f', names_file.name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) arghelper.args.names_file.close() args = ['-f', names_file_unicode.name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('xn--zckzah.')]) arghelper.args.names_file.close() args = ['-r', example_names_only.name] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net'), dns.name.from_text('example.org')]) arghelper.args.input_file.close() args = ['-r', example_names_only.name, 'example.com'] arghelper = ArgHelper(self.logger) arghelper.build_parser('probe') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com')]) arghelper.args.input_file.close() finally: for tmpfile in (names_file, names_file_unicode, example_names_only): os.remove(tmpfile.name) if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_62_dnsviz_graph_options.py0000644000175000017500000003612015001472663021502 0ustar00caseycaseyimport argparse import binascii import datetime import gzip import importlib import io import logging import os import subprocess import tempfile import unittest import dns.name, dns.rdatatype, dns.rrset, dns.zone from dnsviz.format import utc from dnsviz.util import get_default_trusted_keys from vars import * mod = importlib.import_module('dnsviz.commands.graph') GraphArgHelper = getattr(mod, 'GraphArgHelper') AnalysisInputError = getattr(mod, 'AnalysisInputError') EXAMPLE_AUTHORITATIVE = get_probe_output_auth_file('unsigned') class DNSVizGraphOptionsTestCase(unittest.TestCase): def setUp(self): self.logger = logging.getLogger() for handler in self.logger.handlers: self.logger.removeHandler(handler) self.logger.addHandler(logging.NullHandler()) def tearDown(self): for filename in ('png', 'foo.xyz', 'foo.svg', 'foo.png', 'foo.html', 'foo.dot'): if os.path.exists(filename): os.remove(filename) def test_rrtype_list(self): arg1 = 'A,AAAA,MX,CNAME' arg1_with_spaces = ' A , AAAA , MX , CNAME ' arg2 = 'A' arg3 = 'A,BLAH' arg4_empty = '' arg4_empty_spaces = ' ' type_list1 = [dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.MX, dns.rdatatype.CNAME] type_list2 = [dns.rdatatype.A] empty_list = [] self.assertEqual(GraphArgHelper.comma_separated_dns_types(arg1), type_list1) self.assertEqual(GraphArgHelper.comma_separated_dns_types(arg1_with_spaces), type_list1) self.assertEqual(GraphArgHelper.comma_separated_dns_types(arg2), type_list2) self.assertEqual(GraphArgHelper.comma_separated_dns_types(arg4_empty), empty_list) self.assertEqual(GraphArgHelper.comma_separated_dns_types(arg4_empty_spaces), empty_list) # invalid schema with self.assertRaises(argparse.ArgumentTypeError): GraphArgHelper.comma_separated_dns_types(arg3) def test_integer_list(self): arg1 = '1,2,3,4,5' arg1_with_spaces = ' 1 , 2 , 3 , 4 , 5 ' arg2 = '1' arg3 = '1,A' arg4_empty = '' arg4_empty_spaces = ' ' int_list1 = [1,2,3,4,5] int_list2 = [1] empty_list = [] int_set1 = set([1,2,3,4,5]) int_set2 = set([1]) empty_set = set([]) self.assertEqual(GraphArgHelper.comma_separated_ints(arg1), int_list1) self.assertEqual(GraphArgHelper.comma_separated_ints(arg1_with_spaces), int_list1) self.assertEqual(GraphArgHelper.comma_separated_ints(arg2), int_list2) self.assertEqual(GraphArgHelper.comma_separated_ints(arg4_empty), empty_list) self.assertEqual(GraphArgHelper.comma_separated_ints(arg4_empty_spaces), empty_list) self.assertEqual(GraphArgHelper.comma_separated_ints_set(arg1), int_set1) self.assertEqual(GraphArgHelper.comma_separated_ints_set(arg1_with_spaces), int_set1) self.assertEqual(GraphArgHelper.comma_separated_ints_set(arg2), int_set2) self.assertEqual(GraphArgHelper.comma_separated_ints_set(arg4_empty), empty_set) self.assertEqual(GraphArgHelper.comma_separated_ints_set(arg4_empty_spaces), empty_set) # invalid schema with self.assertRaises(argparse.ArgumentTypeError): GraphArgHelper.comma_separated_ints(arg3) def test_valid_domain_name(self): arg1 = '.' arg2 = 'www.example.com' arg3 = 'www..example.com' self.assertEqual(GraphArgHelper.valid_domain_name(arg1), dns.name.from_text(arg1)) self.assertEqual(GraphArgHelper.valid_domain_name(arg2), dns.name.from_text(arg2)) # invalid domain name with self.assertRaises(argparse.ArgumentTypeError): GraphArgHelper.valid_domain_name(arg3) def test_ingest_input(self): with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_bad_json: example_bad_json.write(b'{') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_no_version: example_no_version.write(b'{}') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_1: example_invalid_version_1.write(b'{ "_meta._dnsviz.": { "version": 1.11 } }') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_2: example_invalid_version_2.write(b'{ "_meta._dnsviz.": { "version": 5.0 } }') try: args = ['-r', EXAMPLE_AUTHORITATIVE] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.ingest_input() arghelper.args.input_file.close() # Bad json args = ['-r', example_bad_json.name] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # No version args = ['-r', example_no_version.name] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_1.name] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_2.name] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() finally: for tmpfile in (example_bad_json, example_no_version, \ example_invalid_version_1, example_invalid_version_2): os.remove(tmpfile.name) def test_ingest_names(self): args = ['example.com', 'example.net'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as names_file: names_file.write(b'example.com\nexample.net\n') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_names_only: example_names_only.write(b'{ "_meta._dnsviz.": { "version": 1.2, "names": [ "example.com.", "example.net.", "example.org." ] } }') try: args = ['-f', names_file.name] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) arghelper.args.names_file.close() args = ['-r', example_names_only.name] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net'), dns.name.from_text('example.org')]) arghelper.args.input_file.close() args = ['-r', example_names_only.name, 'example.com'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com')]) arghelper.args.input_file.close() finally: for tmpfile in (names_file, example_names_only): os.remove(tmpfile.name) def test_trusted_keys_file(self): tk1 = 'example.com. IN DNSKEY 256 3 7 AwEAAZ2YEuBl4X58v1CezDfZjT1viYn5kY3MF3lSDjvHjMZ6gJlYt4Qq oIdpChifmeJldEX9/wPc04Tg7MlEfV3m0x2j80dMyObM0FZTxzMgbTFk Zs0AWrDXELieGkFZv1FB9YoxSX2XqvpFxwvPyyszUtCy/c5hrb6vfKRB Jh+qIO+NsNrl6O8NiYjWWNjdiFw+c2BxzpArQoaA+rcoyDYwH4xGpvTw YLnE9HmkwTSQuwASkgWgX3KgTmsDEw4I0P5Tk+wvmNnaqDhmFMHJK5Oh 92wUX+ppxxSgUx4UIJmftzi7sCg0qekIYUf99Dkn7OlC8X0rjj+xO4cD hbTjGkxmsD0=' tk2 = 'example.com. IN DNSKEY 256 3 7 AwEAAaerI6CXvvG6U3UxkB0PXj+ORyGFtABYJ6JG3NL6w1KKlZl+73AS aPEEa7SXeuWmAWE1N3rsbnrMBvepBXkCbP609eoo2mJ8bsozT/NNwSSc FP1Ddw4wxpZAC/+/K736rF1HbI3ROS/rBTr7RW6rWzcyPbYFuUMVzrAM ZSJNJsTDcmyGc5Is3cFzNcrd3/Gmcjt8TKMmGq51HXWzFvxro7EH6aOl K6G4O4+mzaUKp91mg7DAVhX8yXnadXUZQ4yDfLzSleYQ2TroQqeSgI3X m/gUoACm3ELUOr84TmIKZ67X/zBTx8tHC5iBWY2tbIKqiJY7I4/aW4S4 NraCSRbDpbM=' tk1_rdata = ' '.join(tk1.split()[3:]) tk2_rdata = ' '.join(tk2.split()[3:]) tk_explicit = [(dns.name.from_text('example.com'), dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY, tk1_rdata)), (dns.name.from_text('example.com'), dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY, tk2_rdata))] now = datetime.datetime.now(utc) tk_default = get_default_trusted_keys(now) args = ['example.com'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() self.assertEqual(arghelper.trusted_keys, None) arghelper.update_trusted_key_info(now) self.assertEqual(arghelper.trusted_keys, tk_default) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as tk1_file: tk1_file.write(tk1.encode('utf-8')) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as tk2_file: tk2_file.write(tk2.encode('utf-8')) try: args = ['-t', tk1_file.name, '-t', tk2_file.name, 'example.com'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() arghelper.update_trusted_key_info(now) self.assertEqual(arghelper.trusted_keys, tk_explicit) for f in arghelper.args.trusted_keys_file: f.close() args = ['-t', '/dev/null', 'example.com'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() arghelper.update_trusted_key_info(now) self.assertEqual(arghelper.trusted_keys, []) for f in arghelper.args.trusted_keys_file: f.close() finally: for tmpfile in (tk1_file, tk2_file): os.remove(tmpfile.name) def test_option_combination_errors(self): # Names file and command-line domain names are mutually exclusive args = ['-f', '/dev/null', 'example.com'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() arghelper.args.names_file.close() # Names file and command-line domain names are mutually exclusive args = ['-O', '-o', '/dev/null'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() arghelper.args.output_file.close() # But this is allowed args = ['-o', '/dev/null'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.check_args() arghelper.args.output_file.close() # So is this args = ['-O'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.check_args() def test_output_format(self): args = ['-T', 'png', '-o', 'foo.dot'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.output_format, 'png') arghelper.args.output_file.close() args = ['-o', 'foo.dot'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.output_format, 'dot') arghelper.args.output_file.close() args = ['-o', 'foo.png'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.output_format, 'png') arghelper.args.output_file.close() args = ['-o', 'foo.html'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.output_format, 'html') arghelper.args.output_file.close() args = ['-o', 'foo.svg'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.output_format, 'svg') arghelper.args.output_file.close() args = ['-o', 'foo.xyz'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.set_kwargs() arghelper.args.output_file.close() args = ['-o', 'png'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.set_kwargs() arghelper.args.output_file.close() args = ['-o', '-'] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.output_format, 'dot') args = [] arghelper = GraphArgHelper(self.logger) arghelper.build_parser('graph') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.output_format, 'dot') if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_63_dnsviz_grok_options.py0000644000175000017500000002676215001472663021357 0ustar00caseycaseyimport argparse import datetime import gzip import importlib import io import logging import os import subprocess import tempfile import unittest import dns.name, dns.rdatatype, dns.rrset, dns.zone from dnsviz.format import utc from dnsviz.util import get_default_trusted_keys from vars import * mod = importlib.import_module('dnsviz.commands.grok') GrokArgHelper = getattr(mod, 'GrokArgHelper') AnalysisInputError = getattr(mod, 'AnalysisInputError') EXAMPLE_AUTHORITATIVE = get_probe_output_auth_file('unsigned') class DNSVizGrokOptionsTestCase(unittest.TestCase): def setUp(self): self.logger = logging.getLogger() for handler in self.logger.handlers: self.logger.removeHandler(handler) self.logger.addHandler(logging.NullHandler()) def test_integer_list(self): arg1 = '1,2,3,4,5' arg1_with_spaces = ' 1 , 2 , 3 , 4 , 5 ' arg2 = '1' arg3 = '1,A' arg4_empty = '' arg4_empty_spaces = ' ' int_list1 = [1,2,3,4,5] int_list2 = [1] empty_list = [] int_set1 = set([1,2,3,4,5]) int_set2 = set([1]) empty_set = set([]) self.assertEqual(GrokArgHelper.comma_separated_ints(arg1), int_list1) self.assertEqual(GrokArgHelper.comma_separated_ints(arg1_with_spaces), int_list1) self.assertEqual(GrokArgHelper.comma_separated_ints(arg2), int_list2) self.assertEqual(GrokArgHelper.comma_separated_ints(arg4_empty), empty_list) self.assertEqual(GrokArgHelper.comma_separated_ints(arg4_empty_spaces), empty_list) self.assertEqual(GrokArgHelper.comma_separated_ints_set(arg1), int_set1) self.assertEqual(GrokArgHelper.comma_separated_ints_set(arg1_with_spaces), int_set1) self.assertEqual(GrokArgHelper.comma_separated_ints_set(arg2), int_set2) self.assertEqual(GrokArgHelper.comma_separated_ints_set(arg4_empty), empty_set) self.assertEqual(GrokArgHelper.comma_separated_ints_set(arg4_empty_spaces), empty_set) # invalid schema with self.assertRaises(argparse.ArgumentTypeError): GrokArgHelper.comma_separated_ints(arg3) def test_valid_domain_name(self): arg1 = '.' arg2 = 'www.example.com' arg3 = 'www..example.com' self.assertEqual(GrokArgHelper.valid_domain_name(arg1), dns.name.from_text(arg1)) self.assertEqual(GrokArgHelper.valid_domain_name(arg2), dns.name.from_text(arg2)) # invalid domain name with self.assertRaises(argparse.ArgumentTypeError): GrokArgHelper.valid_domain_name(arg3) def test_ingest_input(self): with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_bad_json: example_bad_json.write(b'{') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_no_version: example_no_version.write(b'{}') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_1: example_invalid_version_1.write(b'{ "_meta._dnsviz.": { "version": 1.11 } }') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_2: example_invalid_version_2.write(b'{ "_meta._dnsviz.": { "version": 5.0 } }') try: args = ['-r', EXAMPLE_AUTHORITATIVE] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.ingest_input() arghelper.args.input_file.close() # Bad json args = ['-r', example_bad_json.name] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # No version args = ['-r', example_no_version.name] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_1.name] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_2.name] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() finally: for tmpfile in (example_bad_json, example_no_version, \ example_invalid_version_1, example_invalid_version_2): os.remove(tmpfile.name) def test_ingest_names(self): args = ['example.com', 'example.net'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as names_file: names_file.write(b'example.com\nexample.net\n') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_names_only: example_names_only.write(b'{ "_meta._dnsviz.": { "version": 1.2, "names": [ "example.com.", "example.net.", "example.org." ] } }') try: args = ['-f', names_file.name] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) arghelper.args.names_file.close() args = ['-r', example_names_only.name] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net'), dns.name.from_text('example.org')]) arghelper.args.input_file.close() args = ['-r', example_names_only.name, 'example.com'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com')]) arghelper.args.input_file.close() finally: for tmpfile in (names_file, example_names_only): os.remove(tmpfile.name) def test_trusted_keys_file(self): tk1 = 'example.com. IN DNSKEY 256 3 7 AwEAAZ2YEuBl4X58v1CezDfZjT1viYn5kY3MF3lSDjvHjMZ6gJlYt4Qq oIdpChifmeJldEX9/wPc04Tg7MlEfV3m0x2j80dMyObM0FZTxzMgbTFk Zs0AWrDXELieGkFZv1FB9YoxSX2XqvpFxwvPyyszUtCy/c5hrb6vfKRB Jh+qIO+NsNrl6O8NiYjWWNjdiFw+c2BxzpArQoaA+rcoyDYwH4xGpvTw YLnE9HmkwTSQuwASkgWgX3KgTmsDEw4I0P5Tk+wvmNnaqDhmFMHJK5Oh 92wUX+ppxxSgUx4UIJmftzi7sCg0qekIYUf99Dkn7OlC8X0rjj+xO4cD hbTjGkxmsD0=' tk2 = 'example.com. IN DNSKEY 256 3 7 AwEAAaerI6CXvvG6U3UxkB0PXj+ORyGFtABYJ6JG3NL6w1KKlZl+73AS aPEEa7SXeuWmAWE1N3rsbnrMBvepBXkCbP609eoo2mJ8bsozT/NNwSSc FP1Ddw4wxpZAC/+/K736rF1HbI3ROS/rBTr7RW6rWzcyPbYFuUMVzrAM ZSJNJsTDcmyGc5Is3cFzNcrd3/Gmcjt8TKMmGq51HXWzFvxro7EH6aOl K6G4O4+mzaUKp91mg7DAVhX8yXnadXUZQ4yDfLzSleYQ2TroQqeSgI3X m/gUoACm3ELUOr84TmIKZ67X/zBTx8tHC5iBWY2tbIKqiJY7I4/aW4S4 NraCSRbDpbM=' tk1_rdata = ' '.join(tk1.split()[3:]) tk2_rdata = ' '.join(tk2.split()[3:]) tk_explicit = [(dns.name.from_text('example.com'), dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY, tk1_rdata)), (dns.name.from_text('example.com'), dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY, tk2_rdata))] args = ['example.com'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() self.assertEqual(arghelper.trusted_keys, None) arghelper.update_trusted_key_info() self.assertEqual(arghelper.trusted_keys, []) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as tk1_file: tk1_file.write(tk1.encode('utf-8')) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as tk2_file: tk2_file.write(tk2.encode('utf-8')) try: args = ['-t', tk1_file.name, '-t', tk2_file.name, 'example.com'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() arghelper.update_trusted_key_info() self.assertEqual(arghelper.trusted_keys, tk_explicit) for f in arghelper.args.trusted_keys_file: f.close() args = ['-t', '/dev/null', 'example.com'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() arghelper.update_trusted_key_info() self.assertEqual(arghelper.trusted_keys, []) for f in arghelper.args.trusted_keys_file: f.close() finally: for tmpfile in (tk1_file, tk2_file): os.remove(tmpfile.name) def test_option_combination_errors(self): # Names file and command-line domain names are mutually exclusive args = ['-f', '/dev/null', 'example.com'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() arghelper.args.names_file.close() def test_log_level(self): # Names file and command-line domain names are mutually exclusive args = [] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.log_level, logging.DEBUG) args = ['-l', 'info'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.log_level, logging.INFO) args = ['-l', 'warning'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.log_level, logging.WARNING) args = ['-l', 'error'] arghelper = GrokArgHelper(self.logger) arghelper.build_parser('grok') arghelper.parse_args(args) arghelper.set_kwargs() self.assertEqual(arghelper.log_level, logging.ERROR) if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_64_dnsviz_print_options.py0000644000175000017500000003060615001472663021542 0ustar00caseycaseyimport argparse import binascii import datetime import gzip import importlib import io import logging import os import subprocess import tempfile import unittest import dns.name, dns.rdatatype, dns.rrset, dns.zone from dnsviz.format import utc from dnsviz.util import get_default_trusted_keys from vars import * mod = importlib.import_module('dnsviz.commands.print') PrintArgHelper = getattr(mod, 'PrintArgHelper') AnalysisInputError = getattr(mod, 'AnalysisInputError') EXAMPLE_AUTHORITATIVE = get_probe_output_auth_file('unsigned') class DNSVizPrintOptionsTestCase(unittest.TestCase): def setUp(self): self.logger = logging.getLogger() for handler in self.logger.handlers: self.logger.removeHandler(handler) self.logger.addHandler(logging.NullHandler()) def test_rrtype_list(self): arg1 = 'A,AAAA,MX,CNAME' arg1_with_spaces = ' A , AAAA , MX , CNAME ' arg2 = 'A' arg3 = 'A,BLAH' arg4_empty = '' arg4_empty_spaces = ' ' type_list1 = [dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.MX, dns.rdatatype.CNAME] type_list2 = [dns.rdatatype.A] empty_list = [] self.assertEqual(PrintArgHelper.comma_separated_dns_types(arg1), type_list1) self.assertEqual(PrintArgHelper.comma_separated_dns_types(arg1_with_spaces), type_list1) self.assertEqual(PrintArgHelper.comma_separated_dns_types(arg2), type_list2) self.assertEqual(PrintArgHelper.comma_separated_dns_types(arg4_empty), empty_list) self.assertEqual(PrintArgHelper.comma_separated_dns_types(arg4_empty_spaces), empty_list) # invalid schema with self.assertRaises(argparse.ArgumentTypeError): PrintArgHelper.comma_separated_dns_types(arg3) def test_integer_list(self): arg1 = '1,2,3,4,5' arg1_with_spaces = ' 1 , 2 , 3 , 4 , 5 ' arg2 = '1' arg3 = '1,A' arg4_empty = '' arg4_empty_spaces = ' ' int_list1 = [1,2,3,4,5] int_list2 = [1] empty_list = [] int_set1 = set([1,2,3,4,5]) int_set2 = set([1]) empty_set = set([]) self.assertEqual(PrintArgHelper.comma_separated_ints(arg1), int_list1) self.assertEqual(PrintArgHelper.comma_separated_ints(arg1_with_spaces), int_list1) self.assertEqual(PrintArgHelper.comma_separated_ints(arg2), int_list2) self.assertEqual(PrintArgHelper.comma_separated_ints(arg4_empty), empty_list) self.assertEqual(PrintArgHelper.comma_separated_ints(arg4_empty_spaces), empty_list) self.assertEqual(PrintArgHelper.comma_separated_ints_set(arg1), int_set1) self.assertEqual(PrintArgHelper.comma_separated_ints_set(arg1_with_spaces), int_set1) self.assertEqual(PrintArgHelper.comma_separated_ints_set(arg2), int_set2) self.assertEqual(PrintArgHelper.comma_separated_ints_set(arg4_empty), empty_set) self.assertEqual(PrintArgHelper.comma_separated_ints_set(arg4_empty_spaces), empty_set) # invalid schema with self.assertRaises(argparse.ArgumentTypeError): PrintArgHelper.comma_separated_ints(arg3) def test_valid_domain_name(self): arg1 = '.' arg2 = 'www.example.com' arg3 = 'www..example.com' self.assertEqual(PrintArgHelper.valid_domain_name(arg1), dns.name.from_text(arg1)) self.assertEqual(PrintArgHelper.valid_domain_name(arg2), dns.name.from_text(arg2)) # invalid domain name with self.assertRaises(argparse.ArgumentTypeError): PrintArgHelper.valid_domain_name(arg3) def test_ingest_input(self): with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_bad_json: example_bad_json.write(b'{') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_no_version: example_no_version.write(b'{}') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_1: example_invalid_version_1.write(b'{ "_meta._dnsviz.": { "version": 1.11 } }') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_invalid_version_2: example_invalid_version_2.write(b'{ "_meta._dnsviz.": { "version": 5.0 } }') try: args = ['-r', EXAMPLE_AUTHORITATIVE] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.ingest_input() arghelper.args.input_file.close() # Bad json args = ['-r', example_bad_json.name] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # No version args = ['-r', example_no_version.name] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_1.name] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() # Invalid version args = ['-r', example_invalid_version_2.name] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) with self.assertRaises(AnalysisInputError): arghelper.ingest_input() arghelper.args.input_file.close() finally: for tmpfile in (example_bad_json, example_no_version, \ example_invalid_version_1, example_invalid_version_2): os.remove(tmpfile.name) def test_ingest_names(self): args = ['example.com', 'example.net'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as names_file: names_file.write(b'example.com\nexample.net\n') with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as example_names_only: example_names_only.write(b'{ "_meta._dnsviz.": { "version": 1.2, "names": [ "example.com.", "example.net.", "example.org." ] } }') try: args = ['-f', names_file.name] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net')]) arghelper.args.names_file.close() args = ['-r', example_names_only.name] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com'), dns.name.from_text('example.net'), dns.name.from_text('example.org')]) arghelper.args.input_file.close() args = ['-r', example_names_only.name, 'example.com'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.ingest_input() arghelper.ingest_names() self.assertEqual(list(arghelper.names), [dns.name.from_text('example.com')]) arghelper.args.input_file.close() finally: for tmpfile in (names_file, example_names_only): os.remove(tmpfile.name) def test_trusted_keys_file(self): tk1 = 'example.com. IN DNSKEY 256 3 7 AwEAAZ2YEuBl4X58v1CezDfZjT1viYn5kY3MF3lSDjvHjMZ6gJlYt4Qq oIdpChifmeJldEX9/wPc04Tg7MlEfV3m0x2j80dMyObM0FZTxzMgbTFk Zs0AWrDXELieGkFZv1FB9YoxSX2XqvpFxwvPyyszUtCy/c5hrb6vfKRB Jh+qIO+NsNrl6O8NiYjWWNjdiFw+c2BxzpArQoaA+rcoyDYwH4xGpvTw YLnE9HmkwTSQuwASkgWgX3KgTmsDEw4I0P5Tk+wvmNnaqDhmFMHJK5Oh 92wUX+ppxxSgUx4UIJmftzi7sCg0qekIYUf99Dkn7OlC8X0rjj+xO4cD hbTjGkxmsD0=' tk2 = 'example.com. IN DNSKEY 256 3 7 AwEAAaerI6CXvvG6U3UxkB0PXj+ORyGFtABYJ6JG3NL6w1KKlZl+73AS aPEEa7SXeuWmAWE1N3rsbnrMBvepBXkCbP609eoo2mJ8bsozT/NNwSSc FP1Ddw4wxpZAC/+/K736rF1HbI3ROS/rBTr7RW6rWzcyPbYFuUMVzrAM ZSJNJsTDcmyGc5Is3cFzNcrd3/Gmcjt8TKMmGq51HXWzFvxro7EH6aOl K6G4O4+mzaUKp91mg7DAVhX8yXnadXUZQ4yDfLzSleYQ2TroQqeSgI3X m/gUoACm3ELUOr84TmIKZ67X/zBTx8tHC5iBWY2tbIKqiJY7I4/aW4S4 NraCSRbDpbM=' tk1_rdata = ' '.join(tk1.split()[3:]) tk2_rdata = ' '.join(tk2.split()[3:]) tk_explicit = [(dns.name.from_text('example.com'), dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY, tk1_rdata)), (dns.name.from_text('example.com'), dns.rdata.from_text(dns.rdataclass.IN, dns.rdatatype.DNSKEY, tk2_rdata))] now = datetime.datetime.now(utc) tk_default = get_default_trusted_keys(now) args = ['example.com'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() self.assertEqual(arghelper.trusted_keys, None) arghelper.update_trusted_key_info(now) self.assertEqual(arghelper.trusted_keys, tk_default) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as tk1_file: tk1_file.write(tk1.encode('utf-8')) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as tk2_file: tk2_file.write(tk2.encode('utf-8')) try: args = ['-t', tk1_file.name, '-t', tk2_file.name, 'example.com'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() arghelper.update_trusted_key_info(now) self.assertEqual(arghelper.trusted_keys, tk_explicit) for f in arghelper.args.trusted_keys_file: f.close() args = ['-t', '/dev/null', 'example.com'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.aggregate_trusted_key_info() arghelper.update_trusted_key_info(now) self.assertEqual(arghelper.trusted_keys, []) for f in arghelper.args.trusted_keys_file: f.close() finally: for tmpfile in (tk1_file, tk2_file): os.remove(tmpfile.name) def test_option_combination_errors(self): # Names file and command-line domain names are mutually exclusive args = ['-f', '/dev/null', 'example.com'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() arghelper.args.names_file.close() # Names file and command-line domain names are mutually exclusive args = ['-O', '-o', '/dev/null'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) with self.assertRaises(argparse.ArgumentTypeError): arghelper.check_args() arghelper.args.output_file.close() # But this is allowed args = ['-o', '/dev/null'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.check_args() arghelper.args.output_file.close() # So is this args = ['-O'] arghelper = PrintArgHelper(self.logger) arghelper.build_parser('print') arghelper.parse_args(args) arghelper.check_args() if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_71_dnsviz_probe_run_offline.py0000644000175000017500000000775615001472663022340 0ustar00caseycaseyimport io import os import subprocess import tempfile import unittest from vars import * class DNSProbeRunOfflineTestCase(unittest.TestCase): def setUp(self): #TODO #with gzip.open(EXAMPLE_RECURSIVE, 'rb') as example_rec_in: # with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as self.example_rec_out: # self.example_rec_out.write(example_rec_in.read()) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as self.names_file: self.names_file.write((ZONE_ORIGIN + '\n').encode('utf-8')) self.output = tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) self.output.close() self.run_cwd = tempfile.mkdtemp(prefix='dnsviz') def tearDown(self): os.remove(self.names_file.name) os.remove(self.output.name) subprocess.check_call(['rm', '-rf', self.run_cwd]) def assertReturnCode(self, retcode): if retcode == 5: self.skipTest("No resolvers available") else: self.assertEqual(retcode, 0) def _probe_zone(self, subcat, signed): zone_file = get_zone_file(subcat, signed) delegation_file = get_delegation_file(subcat) output_file = get_probe_output_auth_file(subcat) cmd = ['dnsviz', 'probe', '-d', '0', '-A', '-x', '%s:%s' % (ZONE_ORIGIN, zone_file), '-N', '%s:%s' % (ZONE_ORIGIN, delegation_file)] if signed: cmd.extend(('-D', '%s:%s' % (ZONE_ORIGIN, delegation_file))) cmd.append(ZONE_ORIGIN) with io.open(output_file, 'wb') as fh: self.assertReturnCode(subprocess.call(cmd, stdout=fh)) def test_dnsviz_probe_auth_local_signed_nsec(self): self._probe_zone('signed-nsec', True) def test_dnsviz_probe_auth_local_signed_nsec3(self): self._probe_zone('signed-nsec3', True) def test_dnsviz_probe_input(self): input_file = get_probe_output_auth_file('signed-nsec') with io.open(self.output.name, 'wb') as fh_out: with io.open(input_file, 'rb') as fh_in: p = subprocess.Popen(['dnsviz', 'probe', '-d', '0', '-r', '-', ZONE_ORIGIN], stdin=subprocess.PIPE, stdout=fh_out) p.communicate(fh_in.read()) self.assertReturnCode(p.returncode) with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'probe', '-d', '0', '-r', input_file, ZONE_ORIGIN], stdout=fh), 0) def test_dnsviz_probe_names_input(self): input_file = get_probe_output_auth_file('signed-nsec') with io.open(self.output.name, 'wb') as fh: ret = subprocess.call(['dnsviz', 'probe', '-d', '0', '-r', input_file, '-f', self.names_file.name], stdout=fh) self.assertReturnCode(ret) with io.open(self.output.name, 'wb') as fh_out: with io.open(self.names_file.name, 'rb') as fh_in: p = subprocess.Popen(['dnsviz', 'probe', '-d', '0', '-r', input_file, '-f', '-'], stdin=subprocess.PIPE, stdout=fh_out) p.communicate(fh_in.read()) self.assertEqual(p.returncode, 0) def test_dnsviz_probe_output(self): input_file = get_probe_output_auth_file('signed-nsec') with io.open(self.output.name, 'wb') as fh: ret = subprocess.call(['dnsviz', 'probe', '-d', '0', '-r', input_file, ZONE_ORIGIN], cwd=self.run_cwd, stdout=fh) self.assertReturnCode(ret) with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'probe', '-d', '0', '-r', input_file, '-o', '-', ZONE_ORIGIN], cwd=self.run_cwd, stdout=fh), 0) with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'probe', '-d', '0', '-r', input_file, '-o', 'all.json', ZONE_ORIGIN], cwd=self.run_cwd, stdout=fh), 0) self.assertTrue(os.path.exists(os.path.join(self.run_cwd, 'all.json'))) if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1745253811.0 dnsviz-0.11.1/tests/test_72_dnsviz_graph_run.py0000644000175000017500000001302515001472663020613 0ustar00caseycaseyimport io import os import subprocess import tempfile import unittest from vars import * class DNSGraphRunTestCase(unittest.TestCase): def setUp(self): self.devnull = io.open('/dev/null', 'wb') #TODO #with gzip.open(EXAMPLE_RECURSIVE, 'rb') as example_rec_in: # with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as self.example_rec_out: # self.example_rec_out.write(example_rec_in.read()) with tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) as self.names_file: self.names_file.write((ZONE_ORIGIN + '\n').encode('utf-8')) self.output = tempfile.NamedTemporaryFile('wb', prefix='dnsviz', delete=False) self.output.close() self.run_cwd = tempfile.mkdtemp(prefix='dnsviz') def tearDown(self): self.devnull.close() os.remove(self.names_file.name) os.remove(self.output.name) subprocess.check_call(['rm', '-rf', self.run_cwd]) def test_dnsviz_graph_input(self): input_file = get_probe_output_auth_file('signed-nsec') with io.open(self.output.name, 'wb') as fh_out: with io.open(input_file, 'rb') as fh_in: p = subprocess.Popen(['dnsviz', 'graph'], stdin=subprocess.PIPE, stdout=fh_out) p.communicate(fh_in.read()) self.assertEqual(p.returncode, 0) with io.open(self.output.name, 'wb') as fh_out: with io.open(input_file, 'rb') as fh_in: p = subprocess.Popen(['dnsviz', 'graph', '-r', '-'], stdin=subprocess.PIPE, stdout=fh_out) p.communicate(fh_in.read()) self.assertEqual(p.returncode, 0) with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'graph', '-r', input_file], stdout=fh), 0) def test_dnsviz_graph_names_input(self): input_file = get_probe_output_auth_file('signed-nsec') with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'graph', '-r', input_file, '-f', self.names_file.name], stdout=fh), 0) with io.open(self.output.name, 'wb') as fh_out: with io.open(self.names_file.name, 'rb') as fh_in: p = subprocess.Popen(['dnsviz', 'graph', '-r', input_file, '-f', '-'], stdin=subprocess.PIPE, stdout=fh_out) p.communicate(fh_in.read()) self.assertEqual(p.returncode, 0) def test_dnsviz_graph_tk_input(self): input_file = get_probe_output_auth_file('signed-nsec') tk_file = get_tk_file('signed-nsec') with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'graph', '-r', input_file, '-t', tk_file], stdout=fh), 0) with io.open(self.output.name, 'wb') as fh_out: with io.open(tk_file, 'rb') as fh_in: p = subprocess.Popen(['dnsviz', 'graph', '-r', input_file, '-t', '-'], stdin=subprocess.PIPE, stdout=fh_out) p.communicate(fh_in.read()) self.assertEqual(p.returncode, 0) def test_dnsviz_graph_output(self): input_file = get_probe_output_auth_file('signed-nsec') with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'graph', '-r', input_file], cwd=self.run_cwd, stdout=fh), 0) with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'graph', '-r', input_file, '-Tdot', '-o', '-'], cwd=self.run_cwd, stdout=fh), 0) with io.open(self.output.name, 'wb') as fh: self.assertEqual(subprocess.call(['dnsviz', 'graph', '-r', input_file, '-o', 'all.dot'], cwd=self.run_cwd, stdout=fh), 0) self.assertTrue(os.path.exists(os.path.join(self.run_cwd, 'all.dot'))) self.assertFalse(os.path.exists(os.path.join(self.run_cwd, ZONE_ORIGIN + '.dot'))) self.assertEqual(subprocess.call(['dnsviz', 'graph', '-r', input_file, '-O'], cwd=self.run_cwd), 0) self.assertTrue(os.path.exists(os.path.join(self.run_cwd, ZONE_ORIGIN + '.dot'))) def test_dnsviz_graph_output_format(self): input_file = get_probe_output_auth_file('signed-nsec') magic_codes_mapping = { 'dot': b'digraph', 'png': b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A', 'svg': b'