pax_global_header00006660000000000000000000000064146611162340014516gustar00rootroot0000000000000052 comment=4efe54d689c016deb18e422b5f3ab735b208f6c0 asn-0.78.0/000077500000000000000000000000001466111623400123735ustar00rootroot00000000000000asn-0.78.0/.editorconfig000066400000000000000000000002201466111623400150420ustar00rootroot00000000000000root = true [*] indent_style = tab indent_size = 4 end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = trueasn-0.78.0/.github/000077500000000000000000000000001466111623400137335ustar00rootroot00000000000000asn-0.78.0/.github/workflows/000077500000000000000000000000001466111623400157705ustar00rootroot00000000000000asn-0.78.0/.github/workflows/docker.yaml000066400000000000000000000015311466111623400201230ustar00rootroot00000000000000name: docker-build-and-push on: workflow_dispatch: push: tags: - 'v*' jobs: docker: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Docker meta id: meta uses: docker/metadata-action@v5 with: images: nitefood/asn - name: Login to DockerHub if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v5 with: context: . push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} asn-0.78.0/Dockerfile000066400000000000000000000016661466111623400143760ustar00rootroot00000000000000FROM alpine:3.20.2 ENV IQS_TOKEN "" ENV IPINFO_TOKEN "" ENV CLOUDFLARE_TOKEN "" # - Prepare the config directory # - Create the entrypoint script that writes the API tokens to the config files # - Install prerequisite packages RUN mkdir -p /etc/asn && \ chown nobody:nobody /etc/asn/ && \ printf '%s\n' '#!/usr/bin/env bash' \ '[[ -n "$IQS_TOKEN" ]] && echo "$IQS_TOKEN" > /etc/asn/iqs_token' \ '[[ -n "$IPINFO_TOKEN" ]] && echo "$IPINFO_TOKEN" > /etc/asn/ipinfo_token' \ '[[ -n "$CLOUDFLARE_TOKEN" ]] && echo "$CLOUDFLARE_TOKEN" > /etc/asn/cloudflare_token' \ 'exec "$@"' > /entrypoint.sh && \ chmod +x /entrypoint.sh && \ apk update && \ apk add --no-cache aha bash bind-tools coreutils curl grepcidr3 ipcalc jq mtr ncurses nmap nmap-ncat whois COPY asn /bin/asn RUN chmod 0755 /bin/asn # Start the service by default USER nobody EXPOSE 49200/tcp ENTRYPOINT ["/entrypoint.sh", "/bin/asn"] CMD ["-l", "0.0.0.0"] asn-0.78.0/LICENSE000066400000000000000000000020641466111623400134020ustar00rootroot00000000000000MIT License Copyright (c) 2020 Adriano Provvisiero Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. asn-0.78.0/README.md000066400000000000000000001746721466111623400136730ustar00rootroot00000000000000# ASN Lookup Tool and Traceroute Server [![Packaging status](https://repology.org/badge/vertical-allrepos/asn.svg)](https://repology.org/project/asn/versions) #### Container support: [![Docker](https://img.shields.io/badge/docker-%230db7ed.svg?style=for-the-badge&logo=docker&logoColor=white)](#running-the-script-from-a-container) [![Google Cloud](https://img.shields.io/badge/GoogleCloud-%234285F4.svg?style=for-the-badge&logo=google-cloud&logoColor=white)](#running-the-script-from-a-container) #### OS support: [![Debian](https://img.shields.io/badge/Debian-D70A53?style=for-the-badge&logo=debian&logoColor=white)](#installation) [![Ubuntu](https://img.shields.io/badge/Ubuntu-E95420?style=for-the-badge&logo=ubuntu&logoColor=white)](#installation) [![Kali](https://img.shields.io/badge/Kali-268BEE?style=for-the-badge&logo=kalilinux&logoColor=white)](#installation) [![Cent OS](https://img.shields.io/badge/cent%20os-002260?style=for-the-badge&logo=centos&logoColor=F0F0F0)](#installation) [![Red Hat](https://img.shields.io/badge/Red%20Hat-EE0000?style=for-the-badge&logo=redhat&logoColor=white)](#installation) [![Rocky Linux](https://img.shields.io/badge/-Rocky%20Linux-%2310B981?style=for-the-badge&logo=rockylinux&logoColor=white)](#installation) [![Fedora](https://img.shields.io/badge/Fedora-294172?style=for-the-badge&logo=fedora&logoColor=white)](#installation) [![Arch](https://img.shields.io/badge/Arch%20Linux-1793D1?logo=arch-linux&logoColor=fff&style=for-the-badge)](#installation) [![Manjaro](https://img.shields.io/badge/Manjaro-35BF5C?style=for-the-badge&logo=Manjaro&logoColor=white)](#installation) [![Alpine Linux](https://img.shields.io/badge/Alpine_Linux-%230D597F.svg?style=for-the-badge&logo=alpine-linux&logoColor=white)](#installation) [![openSUSE](https://img.shields.io/badge/openSUSE-73BA25?logo=opensuse&logoColor=fff&style=for-the-badge)](#installation) [![FreeBSD](https://img.shields.io/badge/-FreeBSD-%23870000?style=for-the-badge&logo=freebsd&logoColor=white)](#installation) [![Nix](https://img.shields.io/badge/NIX-5277C3.svg?style=for-the-badge&logo=NixOS&logoColor=white)](#installation) [![macOS](https://img.shields.io/badge/mac%20os-000000?style=for-the-badge&logo=macos&logoColor=F0F0F0)](#installation) [![Windows](https://img.shields.io/badge/Windows-0078D6?style=for-the-badge&logo=windows&logoColor=white)](#installation) [![Raspberry Pi](https://img.shields.io/badge/-RaspberryPi-C51A4A?style=for-the-badge&logo=Raspberry-Pi)](#installation) ### Table of contents: * [Description](#description) * [Screenshots](#screenshots) * [Running the script from a container](#running-the-script-from-a-container) * [Installation](#installation) * _Optional: adding your [API tokens](#api-tokens) to improve functionalities_ * [Usage (as a command line tool)](#usage) * [Usage (as a lookup & traceroute server)](#running-lookups-from-the-browser) * [Usage (as a lookup API with JSON output)](#json-output-and-api-mode) --- ## Description ASN / RPKI validity / BGP stats / IPv4v6 / Prefix / ASPath / Organization / IP reputation / IP geolocation / IP fingerprinting / Network recon / lookup tool / Web traceroute server. This script serves the purpose of having a quick OSINT **command line tool** at disposal when investigating network data, which can come in handy in incident response scenarios as well (with features such as [bulk geolocation](#bulk-geolocation-mode) and threat scoring). It can be used as a **recon tool** by querying Shodan for data about any type of target (CIDR blocks/URLs/single IPs/hostnames). This will quickly give the user a complete breakdown about open ports, known vulnerabilities, known software and hardware running on the target, and more - without ever sending a single packet to the target. JSON output of the results, multiple simultaneous targets and IP list file inputs and are also supported. Click [here](#shodan-scanning-recon-mode) for more information about Shodan scanning mode. It can also be used as a **web-based traceroute server**, by running it in listening mode and launching lookups and traces from a local or remote browser (via a bookmarklet or custom search engine) or terminal (via `curl`, `elinks` or similar tools). Click [here](#running-lookups-from-the-browser) for more information about server mode functionality. Furthermore, it can serve as a self-hosted lookup **API endpoint** and output JSON-formatted data while running in both interactive and server mode. Click [here](#json-output-and-api-mode) for more information about API mode functionality. #### Features: * It will lookup relevant Autonomous System information for any given AS number, including: * **Organization name and RIR region** * **IXP Presence** (*Internet Exchange facilities where the AS is present*) * **Global AS rank** (*derived from the size of its customer cone, number of peering relationships and more*) * **BGP statistics** (*neighbours count, originated v4/v6 prefix count*) * **BGP incident history** (number of *BGP hijacks* and *route leaks* involving the target AS in the past 12 months, as a **victim** or a **hijacker**) * **Peering relationships** separated by type (*upstream/downstream/uncertain*), and sorted by observed *path count*, to give more reliable results (so for instance, the first few upstream peers are most likely to be transits). Furthermore, a recap of *transits/peers/customers* amount (per latest CAIDA data) is displayed. * **Announced prefixes** aggregated to the most relevant less-specific `INET(6)NUM` object (actual [LIR allocation](https://www.ripe.net/manage-ips-and-asns/db/support/documentation/ripe-database-documentation/rpsl-object-types/4-2-descriptions-of-primary-objects/4-2-4-description-of-the-inetnum-object)). * It will perform an **AS path trace** (using [mtr](https://github.com/traviscross/mtr) and retrieving AS data from the results) for single IPs or DNS results, optionally reporting detailed data for each hop, such as RPKI ROA validity, organization/network name, geographic location, etc. * It will detect **IXPs** (Internet Exchange Points) traversed during the trace, and highlight them for clarity. * It will attempt to lookup all relevant **abuse contacts** for any given IP or prefix. * It will perform **RPKI validity** lookups for every possible IP. Data is validated using the [RIPEStat RPKI validation API](https://stat.ripe.net/docs/data_api#rpki-validation). For path traces, the tool will match each hop's ASN/Prefix pair (retrieved from the Prefix Whois public server) with relevant published RPKI ROAs. In case of origin AS mismatch or unallowed more-specific prefixes, it will warn the user of a potential **route leak / BGP hijack** along with the offending AS in the path (requires `-d` option, see below for usage info). * *Read more about BGP hijkacking [here](https://en.wikipedia.org/wiki/BGP_hijacking).* * *Read more about RPKI [here](https://en.wikipedia.org/wiki/Resource_Public_Key_Infrastructure), [here](https://blog.cloudflare.com/rpki/), or [here](https://www.ripe.net/manage-ips-and-asns/resource-management/certification).* * It will perform **IP geolocation** lookups according to the logic described [below](#geolocation). * geolocation can be performed in **bulk mode**. See [here](#bulk-geolocation-mode) for more info. * the script can also **map all IPv4/IPv6 CIDR blocks** allocated to any given country, by querying data from Marcel Bischoff's [country-ip-blocks](https://github.com/herrbischoff/country-ip-blocks) repo. See [below](#mapping-the-ipv4v6-address-space-of-specific-countries) for more info. * It will perform **IP reputation, noise classification** and in-depth **threat analysis** reporting (especially useful when investigating foreign IPs from log files). * It will perform **IP fingerprinting** using Shodan's [InternetDB API](%5Bhttps://internetdb.shodan.io/%5D(https://internetdb.shodan.io/)) and report any known **vulnerabilities**, **open ports** and **services/operating system/hardware** pertaining to target IPs and individual trace hops (detailed traces only). * Directly querying Shodan for any type of targets (including CIDR blocks) is also possible. More informations [here](#shodan-scanning-recon-mode) about how to use the script as a recon tool. * It will perform **IP type identification** (*Anycast IP/Mobile network/Proxy host/Datacenter or hosting provider/IXP prefix*) for target IPs and individual trace hops. Broad type classification comes from [ip-api](https://ip-api.com), while detailed DC+region identification comes from [incolumitas.com](https://incolumitas.com/pages/Datacenter-IP-API/) * It will also identify **bogon** addresses being traversed and classify them according to the relevant RFC (Private address space/CGN space/Test address/link-local/reserved/etc.) * It is possible to search by **organization name** in order to retrieve a list of IPv4/6 network ranges related to a given company. A multiple choice menu will be presented if more than one organization matches the search query. * It is possible to search for **ASNs matching a given name**, in order to map the ASNs for a given organization. The list will be enriched by each result's AS rank and useful tags highlighting the highest-ranking ASNs found. * It is possible to quickly identify the **transit/upstream AS network(s)** for a given prefix, through analysis of observed BGP updates and ASPATHs. * the tool will also inform the user when a prefix is likely coming from a large tier-1 or multihomed network. * Lookup data can be integrated by third party tools by choosing **JSON output** and parsing the results externally, turning the script into a lookup API endpoint. Screenshots for every lookup option are below. The script uses the following services for data retrieval: * [Team Cymru](https://team-cymru.com/community-services/ip-asn-mapping/) * [The Prefix WhoIs Project](https://pwhois.org/) * [PeeringDB](https://www.peeringdb.com/) * [CAIDA ASRank](https://asrank.caida.org/) * [ifconfig.co](https://ifconfig.co/) * [ipify](https://www.ipify.org/) * [ipinfo.io](https://ipinfo.io) * [RIPEStat](https://stat.ripe.net/) * [RIPE IPmap](https://ipmap.ripe.net/) * [ip-api](https://ip-api.com/) * [StopForumSpam](https://www.stopforumspam.com/) * [IP Quality Score](https://www.ipqualityscore.com) * [Cloudflare Radar](https://radar.cloudflare.com/) * [ISC DSHIELD](https://isc.sans.edu/) * [GreyNoise](https://greynoise.io) * [Shodan](https://www.shodan.io/) * [NIST National Vulnerability Database](https://nvd.nist.gov/) * [Incolumitas.com](https://incolumitas.com/pages/Datacenter-IP-API/) * [RestCountries](https://restcountries.com/) * Marcel Bischoff's [country-ip-blocks](https://github.com/herrbischoff/country-ip-blocks) repo It also provides hyperlinks (in [server](#running-lookups-from-the-browser) mode) to the following external services when appropriate: * [HE.net](https://bgp.he.net) * [BGPView](https://bgpview.io) * [BGPTools](https://bgp.tools) * [ipinfo.io](https://ipinfo.io) * [Host.io](https://host.io) * [Cloudflare Radar](https://radar.cloudflare.com/) Requires Bash v4.2+. Tested on: * Linux * FreeBSD * Windows (WSL2, Cygwin) * MacOS *(thanks [Antonio Prado](https://github.com/Antonio-Prado) and Alessandro Barisone)* - - - ## Screenshots ### Generic usage * *IPv4 lookup with IP type detection (Anycast, Hosting/DC) and classification as good* ![ipv4lookup](https://github.com/nitefood/asn/assets/24555810/81def31a-e080-4b01-9aa2-25b979062963) * *IPv4 lookup (bad reputation IP) with threat analysis/scoring, CPE/CVE identification and open ports reporting* ![ipv4badlookup](https://github.com/nitefood/asn/assets/24555810/302dc69f-7026-4f41-afe6-e24c4d0a514a) * *IP fingerprinting with advanced datacenter+region identification, known vulnerabilities affecting the target and honeypot identification according to Shodan data* ![](https://user-images.githubusercontent.com/24555810/159185618-fa20f45c-91b4-45b4-ad82-02becc648fa5.png) * *IPv6 lookup* ![ipv6lookup](https://user-images.githubusercontent.com/24555810/159185780-44a1af6e-7aa9-4f52-b04c-55a314b2a5e3.png) * *Autonomous system number lookup with AS ranking, operational region, BGP stats and incident history, peering and prefix informations* ![asnlookup](https://github.com/user-attachments/assets/6afdd2cf-a454-4607-ac17-b62fe78ba816) * *Hostname/URL lookup* ![hostnamelookup](https://github.com/nitefood/asn/assets/24555810/f6c71594-d38a-4c7c-9142-5aa1e203f3fa) ### AS Path tracing * *ASPath trace to www.github.com* ![pathtrace](https://github.com/nitefood/asn/assets/24555810/8dfa68ba-de39-47f4-96d3-618210197e70) * *ASPath trace traversing both an unannounced PNI prefix (FASTWEB->SWISSCOM at hop 11) and an IXP (SWISSCOM -> RCN through Equinix Ashburn at hop 16)* ![pathtrace_pni_ixp](https://user-images.githubusercontent.com/24555810/100301579-b4d00c00-2f98-11eb-82c5-047c190ffcd6.png) * *Detailed ASPath trace to 8.8.8.8 traversing the Milan Internet Exchange (MIX) IXP peering LAN at hop 6* ![detailed_pathtrace](https://user-images.githubusercontent.com/24555810/117335188-28a50780-ae9b-11eb-98d9-cfd3bc2f1295.png) ### Network search by organization * *Organization search for "github"* ![search_by_org](https://user-images.githubusercontent.com/24555810/99845076-5b20a980-2b74-11eb-9312-986867034cc9.png) ### Shodan scanning * *Scanning for Shodan informations for a list of IPs* ![shodanscan](https://github.com/nitefood/asn/assets/24555810/550d3004-9cbc-404e-b74c-9248a2d0bb0f) ### Country IPv4/IPv6 CIDR mapping * *Displaying a list of CIDR blocks allocated to Jamaica* ![country_cidr](https://user-images.githubusercontent.com/24555810/163061676-bae440c6-ff0d-478e-8799-98a927600964.png) ### Bulk Geolocation / country stats * *Performing bulk extraction, geolocation and stats for IPs from a logfile* ![bulk_geolocation](https://user-images.githubusercontent.com/24555810/162656545-11db3759-6741-44e0-bcfb-f3542482415f.png) ### Suggested ASNs search * *Suggested ASNs (and respective AS rankings) for "google"* ![asnsuggest](https://github.com/nitefood/asn/assets/24555810/c8bd8cab-9894-4886-94b5-bfd6bb0b9d8e) ### Transit/Upstream lookup * *A large tier-1 network (**COMCAST**, AS7922) prefix is reachable through multiple other tier-1 networks like **COGENT** (AS174), **LEVEL3** (AS3356) etc. - likely through settlement-free peering rather than BGP transit:* ![upstreamfinder_tier1_network](https://github.com/nitefood/asn/assets/24555810/4a6a00d9-7a8c-4765-a4d6-ea99aa516200) * *Transit identification for a multihomed AS (**AS30036** announces this prefix to **Hurricane** and **GTT** in a balanced way):* ![upstreamfinder_multihoming](https://github.com/nitefood/asn/assets/24555810/6c036a97-83f8-4c37-9ebd-2717c2d53507) * *Preferred transit identification - **Chinese UNICOM AS** is a large network, but likely prefers Russian **TRANSTELECOM** (AS20485) transit over **TELIA** (AS1299), **HURRICANE** (AS6939), **GTT** (AS3257) and others:* ![upstreamfinder_preferred_transit](https://github.com/nitefood/asn/assets/24555810/dbb64ecc-394a-4fbd-8607-5b7f6955b340) - - - ## Running the script from a container To run the script without installing it locally, you have the following options: * **Docker** _(thanks [Gianni Stubbe](https://github.com/33Fraise33), [anarcat](https://github.com/anarcat), [Francesco Colista](https://github.com/fcolista), [arbal](https://github.com/arbal))_ _Note: the Docker image runs by default in server mode, if no parameters are given. This is equivalent to running the tool as `asn -l 0.0.0.0` (run server, bind to all IPv4 interfaces - this is necessary to expose the server port to the host machine). You can run the server with different [options](#syntax) by explicitly passing `-l [options]`. It's also possible to pass an [IpQualityScore](#ip-reputation-api-token-ipqualityscore), [ipinfo.io](#geolocation-api-token-ipinfoio) and/or [Cloudflare](#bgp-hijack-and-route-leak-incidents-cloudflare-radar) API token (both client and server runs) by setting, respectively, the `IQS_TOKEN`, `IPINFO_TOKEN` and `CLOUDFLARE_TOKEN` environment variables (examples below) in the container._ Usage examples: - Start server: `docker run -it -p 49200:49200 nitefood/asn` - Client mode: `docker run -it nitefood/asn 1.1.1.1` - Supply an IQS token: `docker run -it -e IQS_TOKEN="xxx" nitefood/asn [...]` - Supply multiple tokens: `docker run -it -e IQS_TOKEN="xxx" -e IPINFO_TOKEN="yyy" -e CLOUDFLARE_TOKEN="zzz" nitefood/asn [...]` * **Google Cloud Shell** _Note: server mode **is supported** out of the box in Cloud Shell, just follow the **bookmarklet** link that will be shown at server launch to access the VM for remote lookups._ **1.** Clone the repository in Cloud Shell by clicking the following button: [![Open in Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://shell.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/nitefood/asn&ephemeral=true&show=terminal) **2.** Prepare the GCP environment by launching `./cloudshell_bootstrap.sh` **3.** _(OPTIONAL)_ Input your [API tokens](#api-tokens) when requested to enable full script features - - - ## Installation This script requires **BASH v4.2** or later. You can check your version by running from your shell: ``` bash -c 'echo $BASH_VERSION' ``` After installation, you can use the script by running the `asn` command. ### Method 1: Install prerequisites + manual download > *Note: this method is **recommended** as it will always get you the **latest version** of the script.*
STEP 1. Install prerequisite packages

Some packages are required for full functionality: * **Debian 10 / Ubuntu 20.04 (or newer):** ``` apt -y install curl whois bind9-host mtr-tiny jq ipcalc grepcidr nmap ncat aha ``` * **Debian 9 / Ubuntu 18.04 (or older):** ``` apt -y install curl whois bind9-host mtr-tiny jq ipcalc grepcidr nmap git gcc make && \ git clone https://github.com/theZiz/aha.git && \ make install -C aha/ ``` * **CentOS / RHEL / Rocky Linux 9:** ``` dnf -y install epel-release && \ dnf -y install curl whois bind-utils mtr jq nmap nmap-ncat ipcalc aha grepcidr ``` * **CentOS / RHEL / Rocky Linux 8:** *(thanks [Robert Scheck](https://github.com/robert-scheck))* ``` dnf -y install epel-release 'dnf-command(copr)' && \ dnf -y copr enable robert/ipcalc && \ dnf -y install curl whois bind-utils mtr jq nmap nmap-ncat ipcalc aha grepcidr ``` * **CentOS / RHEL 7:** *(thanks [Robert Scheck](https://github.com/robert-scheck))* ``` yum -y install epel-release yum-plugin-copr && \ yum -y copr enable robert/ipcalc && \ yum -y install curl whois bind-utils mtr jq nmap nmap-ncat ipcalc aha grepcidr && \ hash -d ipcalc ``` * **Fedora:** ``` dnf -y install curl whois bind-utils mtr jq nmap nmap-ncat ipcalc aha grepcidr ``` * **openSUSE Leap 15.5 (or newer), openSUSE Tumbleweed** ``` zypper in -y curl whois bind-utils mtr jq nmap ncat ipcalc aha grepcidr ``` * **FreeBSD**: ``` env ASSUME_ALWAYS_YES=YES pkg install bash coreutils curl whois mtr jq ipcalc grepcidr nmap aha ``` * **Windows**: * **using [WSL2](https://docs.microsoft.com/en-us/windows/wsl/about) (recommended):** Install Windows Subsystem for Linux (v2) by following Microsoft's [guide](https://docs.microsoft.com/en-us/windows/wsl/install-win10#manual-installation-steps). On step 6, choose one of the Linux distributions listed above (Ubuntu 20.04 LTS is recommended). Once your WSL2 system is up and running, open a Linux terminal and follow the prerequisite installation instructions above for your distribution of choice. > *Note for WSL2 users: Check [this](https://devblogs.microsoft.com/commandline/systemd-support-is-now-available-in-wsl/) page for details on how to activate **systemd** if you plan to install the [asn service](#optional-installing-the-asn-server-as-a-system-service).* * **using [Cygwin](https://cygwin.com/index.html):** Most of the prerequisite packages listed above for *Debian 10 / Ubuntu 20.04 (or newer)* are obtainable directly with Cygwin's own Setup wizard (or through scripts like *apt-cyg*). You will still have to manually compile (or find a suitable third-party precompiled binary) the *mtr*, *grepcidr* and *aha* tools. Instructions on how to do so can be found directly on the respective projects homepages.

STEP 2. Script download and installation

Afterwards, to install the **asn** script from your shell to **/usr/bin**: `curl "https://raw.githubusercontent.com/nitefood/asn/master/asn" > /usr/bin/asn && chmod 0755 /usr/bin/asn`

### Method 2: Installing a packaged version of the script > *Note: packages may not reflect the latest version, check [Repology](https://repology.org/project/asn/versions) first.* Packaged versions of the tool are available for the following distributions:
Distribution list

* **Debian-based:** *(thanks [Marcos Rodrigues de Carvalho](https://github.com/odaydebian))* > *Debian 13 / Sid*\ > *Ubuntu 24.04 (or newer)*\ > *Kali (rolling)*\ > *Raspbian (testing)* ``` sudo apt update && sudo apt install asn ``` * **Manjaro / Arch Linux:** *(thanks [Worty](https://github.com/worty))* ``` yay -S asn-git ``` * **Alpine Linux 3.18 (or newer)** *(thanks [Francesco Colista](https://github.com/fcolista))* ``` apk add -X https://dl-cdn.alpinelinux.org/alpine/v3.19/community asn ``` * **NixOS** *(thanks [devhell](https://github.com/devhell))* * Package [here](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/networking/asn) * **MacOS** *(using [Homebrew](https://formulae.brew.sh/formula/asn), thanks [filippovitale](https://github.com/filippovitale))* ``` brew install asn ``` >*Note for MacOS users:* > > *Homebrew has a [policy](https://github.com/Homebrew/homebrew-core/issues/35085#issuecomment-447184214) not to install any binary with the **setuid** bit, and mtr (or actually, the mtr-packet helper binary that comes with it) requires to elevate to root to perform traces (good explanations for this can be found [here](https://github.com/traviscross/mtr/issues/204#issuecomment-723961118) and [here](https://github.com/traviscross/mtr/blob/master/SECURITY)). If mtr (and therefore `asn`) traces are not working on your system, you should either run `asn` as root using **sudo**, or set the proper SUID permission bit on the mtr (or better, on the mtr-packet) binary.*

### *(Optional)* Installing the *asn server* as a system service >*Note: this step is optional, and these instructions are only for **systemd**-based Linux systems (most current major distributions).* To control the **asn server** with utilities like *systemctl* and *service*, and to enable it to automatically start at boot, follow these steps: 1. create a new file called `/etc/systemd/system/asn.service` with the following content (make sure you edit the *ExecStart* line to match your installation path and desired startup options): ``` [Unit] Description=ASN lookup and traceroute server After=network.target StartLimitIntervalSec=0 [Service] Type=simple Restart=always RestartSec=1 User=nobody ExecStart=/usr/bin/asn -l 0.0.0.0 [Install] WantedBy=multi-user.target ``` 2. Enable the *CAP\_NET\_RAW* capability for the mtr-packet binary: `setcap cap_net_raw+ep $(which mtr-packet)` *Explanation: this will allow mtr-packet to create raw sockets (and thus perform traces) when launched as an unprivileged user (we're setting up the service to run as user nobody for added security), without the requirement of the setuid-root bit and without having to invoke mtr as root. A thorough explanation for this can be found [here](https://github.com/traviscross/mtr/blob/master/SECURITY).* 3. Now you can refer to standard systemd utilities to perform service operations: * To start the service: `systemctl start asn` * To stop the service: `systemctl stop asn` * To check its status and latest logs: `systemctl status asn` * To follow its logging in real time: `journalctl -f -u asn` * To start the service automatically on boot: `systemctl enable asn` * To disable automatic start on boot: `systemctl disable asn` ## API tokens The script can be configured to make use of your API tokens to enhance its functionalities. The currently supported API tokens are: ### Geolocation API token (ipinfo.io)
Geolocation API token details

The geolocation provider of choice for single lookups (i.e. when not running bulk geolocation queries with the `-g` option) is **ipinfo.io**. By default, the script uses the free (no API key) tier that supports up to **1,000** geolocation requests per day. In order to boost this limit (for free) to **50,000** requests per month, an API key token is required. In order to obtain an API token, after [signing up](https://ipinfo.io/signup), the API token can be found in the [token section](https://ipinfo.io/account/token) of your reserved area. Once copied, the token should be written to one of the following files (parsed in that order): `$HOME/.asn/ipinfo_token` or `/etc/asn/ipinfo_token` The `/etc`-based file should be used when running asn in **server mode**. The `$HOME`-based file takes precedence if both files exist, and is ideal for **user mode** (that is, running `asn` interactively from the command line). In order to do so, you can use the following command: ***User mode:*** `TOKEN=""; mkdir "$HOME/.asn/" && echo "$TOKEN" > "$HOME/.asn/ipinfo_token" && chmod -R 600 "$HOME/.asn/"` ***Server mode:*** `TOKEN=""; mkdir "/etc/asn/" && echo "$TOKEN" > "/etc/asn/ipinfo_token" && chmod -R 700 "/etc/asn/" && chown -R nobody /etc/asn/` Either way, `asn` will pick up your token on the next run (no need to restart the service if running in server mode), and use it to query the ipinfo.io API.

### IP reputation API token (IPQualityScore)
IP reputation API token details

The script will perform first-level IPv4/v6 reputation lookups using [StopForumSpam](https://www.stopforumspam.com/), and in case of a match it will perform a second-level, in-depth threat analysis for targets and trace hops using the [IPQualityScore](https://www.ipqualityscore.com/) API. The StopForumSpam API is free and requires no sign-up, and the service aggregates a [huge](https://www.stopforumspam.com/contributors) amount of blacklist feeds. Still, in order to use the IPQualityScore API for in-depth threat reporting, it's necessary to [sign up](https://www.ipqualityscore.com/create-account) for their service (it's free) and get an API token (it will be emailed to you on sign-up), which will entitle you to 5000 free lookups per month. Once obtained, the api token should be written to one of the following files (parsed in that order): `$HOME/.asn/iqs_token` or `/etc/asn/iqs_token` The `/etc`-based file should be used when running asn in **server mode**. The `$HOME`-based file takes precedence if both files exist, and is ideal for **user mode** (that is, running `asn` interactively from the command line). In order to do so, you can use the following command: ***User mode:*** `TOKEN=""; mkdir "$HOME/.asn/" && echo "$TOKEN" > "$HOME/.asn/iqs_token" && chmod -R 600 "$HOME/.asn/"` ***Server mode:*** `TOKEN=""; mkdir "/etc/asn/" && echo "$TOKEN" > "/etc/asn/iqs_token" && chmod -R 700 "/etc/asn/" && chown -R nobody /etc/asn/` Either way, `asn` will pick up your token on the next run (no need to restart the service if running in server mode), and use it to query the IPQualityScore API. > ***Note:*** > *IPQualityScore is not queried by default for every target, but only for targets that get flagged as BAD by StopForumSpam. It's possible to override this behavior (and force IQS lookup for every target) by setting the `IQS_ALWAYS_QUERY` parameter to `true` in the [preferences file](#preferences-file-homeasnrc). It is also possible to specify [custom query settings](https://www.ipqualityscore.com/documentation/proxy-detection/overview) through the `IQS_CUSTOM_SETTINGS` parameter.*

### BGP hijack and route leak incidents (Cloudflare Radar)
Cloudflare token details

When this token is available, an additional lookup will be enabled for **autonomous system** targets, in order to enumerate the BGP incidents (both **BGP hijacks** and **BGP route leaks**) involving the target ASN. The script will use the [Cloudfare Radar](https://radar.cloudflare.com/) API to retrieve the amount of incidents involving the target ASN in the past 12 months. Additionally, it will report how many incidents saw the target ASN as a **hijacker** or as a **victim**. The Cloudflare Radar API is **free** to use, but requires a registration. The steps are: 1. [Sign up](https://dash.cloudflare.com/sign-up) for a free Cloudflare account and **validate your email** 2. From the [Cloudflare dashboard](https://dash.cloudflare.com/profile/api-tokens/), go to **My Profile > API Tokens**. 3. Select **Create Token** 4. Choose the "*Read Cloudflare Radar data*" template 5. Click **Continue to summary** (the default values are fine) 6. Click **Create token** Once obtained, the api token should be written to one of the following files (parsed in that order): `$HOME/.asn/cloudflare_token` or `/etc/asn/cloudflare_token` The `/etc`-based file should be used when running asn in **server mode**. The `$HOME`-based file takes precedence if both files exist, and is ideal for **user mode** (that is, running `asn` interactively from the command line). In order to do so, you can use the following command: ***User mode:*** `TOKEN=""; mkdir "$HOME/.asn/" && echo "$TOKEN" > "$HOME/.asn/cloudflare_token" && chmod -R 600 "$HOME/.asn/"` ***Server mode:*** `TOKEN=""; mkdir "/etc/asn/" && echo "$TOKEN" > "/etc/asn/cloudflare_token" && chmod -R 700 "/etc/asn/" && chown -R nobody /etc/asn/` Either way, `asn` will pick up your token on the next run (no need to restart the service if running in server mode), and use it to query the Cloudflare Radar API.

- - - ## Usage ##### *Syntax* `asn [OPTIONS] [TARGET]` `asn [-v] -l [SERVER OPTIONS]` where `TARGET` can be one of the following: * **AS number** \-\- lookup matching ASN and BGP announcements/neighbours data\. Supports "as123" and "123" formats \(case insensitive\) * **IPv4/IPv6/Prefix** \-\- lookup matching route\(4/6), IP reputation and ASN data * **Hostname** \-\- resolve the host and lookup data \(same as IPv4/IPv6 lookup\. Supports multiple IPs \- e\.g\. DNS RR\) * **URL** \-\- extract hostname/IP from the URL and lookup relative data\. Supports any protocol prefix\, non\-standard ports and [prepended credentials](https://en.wikipedia.org/wiki/Basic_access_authentication#URL_encoding) * **Organization name** \-\- search by company name and lookup network ranges exported by \(or related to\) the company Options: * `-t` * enables lookup and path tracing for targets **(this is the default behavior)** >*.asnrc option equivalent: `MTR_TRACING=true` (default: `true`)* * `-d` * enables detailed trace mode (more info below) >*.asnrc option equivalent: `DETAILED_TRACE=true` (default: `false`)* * `-n` * disables path tracing and only outputs lookup info _(for IP targets)_ * disables additional INETNUM/origin lookups _(for AS targets)_ >*.asnrc option equivalent: `MTR_TRACING=false` (default: `true`), `ADDITIONAL_INETNUM_LOOKUP=false` (default: `true`)* * `-s` * Launch a Shodan InternetDB scan for the target(s). Supports multiple targets, mixed target types (IP/hostname/CIDR/URL) and piping from stdin. * `-o` * forces a Search-By-Organization lookup and skip all target identification checks * `-a` * enable *ASN suggestion mode*. This will search for all ASNs matching a given name. * `-u` * enable *Transit/Upstream lookup mode*. This will inspect BGP updates and ASPATHs for the TARGET address/prefix and identify possible transit/upstream autonomous systems. * `-c` * enable *Country CIDR mode*. This will output all IPv4/v6 CIDR blocks allocated to the specified country. * `-g` * enable *Bulk Geolocation mode*. This will extract all IPv4/v6 addresses from the input, geolocate them and draw some stats. * `-l` * Launch the script in *server mode*. See **Server Options** below * `-j` * enables compact JSON output. Useful for feeding the output into other tools (like `jq` or other parsers), or storing the lookup results. >*.asnrc option equivalent: `JSON_OUTPUT=true` (default: `false`)* * `-J` * enables pretty-printed JSON output. >*.asnrc option equivalent: `JSON_PRETTY=true` (default: `false`)* * `-m` * enables monochrome mode (disables all colors). >*.asnrc option equivalent: `MONOCHROME_MODE=true` (default: `false`)* * `-v` * Enable debug messages (will display all URLs being queried to help identify external API slowdowns). In client mode, `asn` will log all output (external calls and their response data) to the location defined by `$ASN_LOGFILE` _(by default the logfile can be found at `$HOME/asndebug.log`)_. >*.asnrc option equivalent: `ASN_DEBUG=true` (default: `false`)* * `-h` * Show usage information. Server Options: * `BIND_ADDRESS` * IP address (v4/v6) to bind the listening server to (e.g. `asn -l 0.0.0.0`) >*.asnrc option equivalent: `DEFAULT_SERVER_BINDADDR_v4=""` (default: `"127.0.0.1"`) and `DEFAULT_SERVER_BINDADDR_v6=""` (default: `"::1"`)* * `BIND_PORT` * TCP Port to bind the listening server to (e.g. `asn -l 12345`) >*.asnrc option equivalent: `DEFAULT_SERVER_BINDPORT=""` (default: `"49200"`)* * `BIND_ADDRESS BIND_PORT` * IP address and port to bind the listening server to (e.g. `asn -l ::1 12345`) * `-v` * Enable verbose output and debug messages in server mode >*.asnrc option equivalent: `ASN_DEBUG=true` (default: `false`)* * `--allow host[,host,...]` * Allow only given hosts to connect to the server * `--allowfile file` * A file of hosts allowed to connect to the server * `--deny host[,host,...]` * Deny given hosts from connecting to the server * `--denyfile file` * A file of hosts denied from connecting to the server * `--max-conns ` * The maximum number of simultaneous connections accepted by the server. 100 is the default. >*Note: Every option in server mode (after* `-l`*) is passed directly to the ncat listener.* *Refer to* `man ncat` *for more details on the available commands.* *Unless specified, the default IP:PORT values of **127.0.0.1:49200** (for IPv4) or **[::1]:49200** (for IPv6) will be used (e.g.* `asn -l`*)* ##### *Default behavior:* * The script will attempt to automatically identify the `TARGET` type, if invoked with `-t`, `-n`, `-d` or without options, * AS path tracing is **enabled by default** for all lookups involving an IP or hostname. In case of multiple IP results, the script will trace the first IP, with a preference for IPv6 if possible on the user's host. ##### *Preferences file (`$HOME/.asnrc`)* Options defaults can be overridden by creating a file called `.asnrc` in the user's **home directory**. The following values are the defaults. Any (or all) of them can be specified in the settings file and adjusted to the user's preference: ```shell ASN_LOGFILE="$HOME/asndebug.log" MTR_TRACING=true ADDITIONAL_INETNUM_LOOKUP=true DETAILED_TRACE=false MTR_ROUNDS=5 MAX_CONCURRENT_SHODAN_REQUESTS=10 SHODAN_SHOW_TOP_N=5 MONOCHROME_MODE=false ASN_DEBUG=false JSON_OUTPUT=false JSON_PRETTY=false DEFAULT_SERVER_BINDADDR_v4="127.0.0.1" DEFAULT_SERVER_BINDADDR_v6="::1" DEFAULT_SERVER_BINDPORT="49200" IQS_ALWAYS_QUERY=false IQS_CUSTOM_SETTINGS="" ``` ##### *Detailed mode (`-d` | `DETAILED_TRACE=true`)* * Detailed hop info reporting and RPKI validation can be turned on by passing the `[-d|--detailed]` command line switch. This will enable querying the public [pWhois server](https://pwhois.org/server.who) and the [RIPEStat RPKI validation API](https://stat.ripe.net/docs/data_api#rpki-validation) for every hop in the mtr trace. Relevant info will be displayed as a "tree" below the hop data, in addition to Team Cymru's server output (which only reports the AS name that the organization originating the prefix gave to its autonomous system number). This can be useful to figure out more details regarding the organization's name, the prefix' intended designation, and even (to a certain extent) its geographical scope. Furthermore, this will enable a warning whenever RPKI validation fails for one of the hops in the trace, indicating which AS in the path is wrongly announcing (as per current pWhois data) the hop prefix, indicating a potential route leak or BGP hijacking incident. ##### *Organization search (`-o`)* * The script will try to figure out if the input is an Organization name (i.e. if it doesn't look like an IP address, an AS number or a hostname). In order to force an organization search (for example for Orgs containing `.` in their name), pass the `[-o|--organization]` command line switch. ##### *ASN suggest (`-a`)* * The script will try to find ASNs matching the given search string, using the RIPEStat API. This mode can be used to map all the autonomous systems related to a given company. ##### *Upstream/transit identification (`-u`)* * The script will inspect BGP updates for the given IP (v4/v6) and identify the likelyhood of the upstream autonomous system(s) being transit(s) for the origin AS. A probability of the upstreams being transits will be inferred by the amount of times the upstream AS appears in the observed ASPATHs towards the target IP, in comparison to other BGP peers. The script will also inform the user if the prefix is being simultaneously announced to multiple upstreams (e.g. in case of BGP multihoming, tier-1 prefixes, anycast addresses, etc.). JSON mode is supported. ##### *Server mode (`-l`)* * The script will start up a webserver allowing the user to run remote lookups and traceroutes directly from the browser. The web server is actually an [ncat](https://nmap.org/ncat/) listener waiting for requests, responding to browsers querying through the HTTP protocol. This interface makes for a straightforward integration into user workflow and no need to download any client-side tools. By simply using a Javascript [bookmarklet](https://en.wikipedia.org/wiki/Bookmarklet) or custom [search engine](https://www.howtogeek.com/114176/how-to-easily-create-search-plugins-add-any-search-engine-to-your-browser/), it will be possible to launch remote traces and lookups without ever leaving the browser. Refer to the [this section](#running-lookups-from-the-browser) for more information. ## Notes ##### *Organization data, IP Reputation, noise classification and IP fingerprinting* * Organization data is taken from pWhois * IP reputation data is taken from StopForumSpam and IpQualityScore > Reputation is also enriched with IP *noise* classification (addresses that have been observed scanning the Internet, and very likely to appear in your logs), taken from [GreyNoise](https://greynoise.io). This will also help identify known-good IPs (e.g. Google networks, CDNs, etc.) from aggressive, known-malicious scanners. * IP fingerprinting data is retrieved from Shodan's [InternetDB API](https://internetdb.shodan.io/). Data includes open ports, [software/hardware information](https://en.wikipedia.org/wiki/Common_Platform_Enumeration) and [known vulnerabilities](https://en.wikipedia.org/wiki/Common_Vulnerabilities_and_Exposures) pertaining to the IP address. ##### *Geolocation* The script will perform IP and trace hop geolocation with this logic: 1. Using the [ipinfo.io](https://ipinfo.io/) service as a primary source of geolocation data. It offers extremely precise geolocation data based on a proprietary network of geographically distributed probes, and is extremely reliable 2. Using the [ip-api](https://ip-api.com/) service as a fallback source of geolocation data 3. Using the [Prefix Whois](https://pwhois.org/) service as a last-resort source of geolocation data ##### *IP Classification* The script will use the ip-api, incolumitas.com, ipinfo.io and PeeringDB services to classify target IPs and trace hops into these categories: * [Anycast](https://en.wikipedia.org/wiki/Anycast) IP * Mobile network * Proxy host (TOR exit node/VPN/etc) * Hosting network (datacenter/hosting provider/etc) along with detailed DC and region identification where available * IXP network ##### *IXP detection and unannounced prefixes* * The script will detect [IXPs](https://en.wikipedia.org/wiki/Internet_exchange_point) traversed during path traces by matching them with [PeeringDB](https://www.peeringdb.com/)'s comprehensive dataset of IXP prefixes. * The script will also attempt a best-effort, fallback generic `whois` lookup when Team Cymru, pWhois and PeeringDB have no info about the IP address or prefix. This is usually the case with some [PNI](https://en.wikipedia.org/wiki/Peering#Private_peering) prefixes, and will give better insight into the path taken by packets. ## Running lookups from the browser ##### *Prerequisite tools for server mode* Server mode requires two tools for its functionality: `ncat` and `aha`. Specifically, [aha](https://github.com/theZiz/aha) (the ANSI->HTML converter) v0.5+ is required. The ncat tool is contained inside the *nmap* package on older distributions (e.g. Ubuntu 18.04, Debian 9), while it is packaged as a standalone tool on newer ones. Please refer to the [installation](#installation) section and run the appropriate commands to install the required packages for your operating system, and optionally to install the asn server as a systemd service. ##### *Advantages of server mode* The main advantage of running lookups from the browser, is that every IP address and AS number gets converted into a hyperlink, allowing to perform subsequent lookups by simply clicking on them. When looking up an URL/hostname/domain, quick WHOIS info and links to relevant external resources will be available in the results. When looking up an AS number, all peering ASNs will be clickable. Also, if an AS peers at a public facility, PeeringDB info for that facility will be linked directly. Furthermore, additional external BGP information sources will be linked, directly for the target ASN. Here are some examples: ![srvmode_hostname_lookup](https://user-images.githubusercontent.com/24555810/117340152-e1217a00-aea0-11eb-9d8f-abaea0d3389e.png) ![srvmode_whois](https://user-images.githubusercontent.com/24555810/117340278-01e9cf80-aea1-11eb-8cad-457f60e80a86.png) ![srvmode_asn_lookup](https://user-images.githubusercontent.com/24555810/117340969-d7e4dd00-aea1-11eb-8e94-88d166f67360.png) #### Server side Once started in **server mode**, `asn` will spin up a custom webserver waiting for browser requests. This is what the server-side console looks like: ![server_console](https://user-images.githubusercontent.com/24555810/102154363-80ee5500-3e79-11eb-840f-53e3619be2e4.png) The server is now ready to accept browser requests (only from the local machine, in this case - since I've launched it with no command line switches, which defaults to listening on **127.0.0.1:49200**. Refer to the [usage](#usage) section for more information about the available server options). #### Client side Visit [this page](http://127.0.0.1:49200/asn_bookmarklet) in your browser and follow the instructions to copy the bookmarklet to your bookmarks toolbar: ![bookmarklet_install](https://user-images.githubusercontent.com/24555810/102159720-640b4f00-3e84-11eb-8360-afa79b6f0f5f.png) ##### *How it works* The bookmarklet is actually a small piece of Javascript code which will grab the hostname of the website you're currently visiting in the browser, and pass it to the server through a simple *HTTP GET* request. The server then proceeds to perform the lookup and traceroute (from its own viewpoint, just like it does when ran interactively from the command line), and feed the results to your browser through an HTML page, mimicking the effect of a scrolling terminal. >*Note: The link you drag to the bookmarks bar is actually a *minified* (i.e.: compacted) version of the source javascript code, but for reference, here's the full source:* > >```javascript >javascript:(function () { > var asnserver = "localhost:49200"; > var target = window.location.hostname; > var width = screen.width - screen.width / 7; > var height= screen.height - screen.height / 4; > var left = window.innerWidth / 2 - width / 2; > var top = window.innerHeight / 2 - height / 2; > window.open("http://" + asnserver + "/asn_lookup&" + target, "newWindow", "width=" + width + ",height=" + height + ",top=" + top + ",left=" + left); >})(); >``` > >*If you want to "un-minify" the actual bookmarklet code, you can refer to [this site](https://unminify.com/)* Once the trace is finished, an option to share the output on [termbin](https://termbin.com/) is given to the user. This makes for quick sharing of the traceroute or lookup output with other people: ![termbin](https://user-images.githubusercontent.com/24555810/102160506-dc264480-3e85-11eb-9e14-8c851f261172.png) ![termbin_2](https://user-images.githubusercontent.com/24555810/102168101-f49b5c80-3e8f-11eb-8bc6-9f0592fa9624.png) #### Search engine setup In order to take full advantage of having `asn` inside the browser, it is possible to configure it as a custom search engine for the browser search bar. This allows to leverage the server to search for **ASNs**, **URLs**, **IPs**, **Hostnames**, and so on, depending on the search string. Generally speaking, this implies instructing the browser that when a certain **keyword** is prepended to a search, the following characters (the actual **search string**, identified by `%s`) have to be passed to a certain URL. The URL is then composed according to this logic, and opened just like a normal webpage. I've used `@asn` for my keyword, but anything would do. In order to speed up things, one could very well use a shorter tag (e.g. `#`) that, when used in the address bar, automatically switches your search engine to the ASN Lookup server. Note that the leading `@` sign is not mandatory, just handy since it doesn't get in the way of normal searches, but there's much freedom with that. For quick reference, the location URL string to enter (for both Firefox and Chrome) is: `http://127.0.0.1:49200/asn_lookup&%s`. Of course that sends lookup requests to the *locally* running ASN server. Here's how to add a search engine in Firefox and Chrome: ***Firefox:*** * Simply create a new bookmark and fill its details like this: ![searchsetup_firefox](https://user-images.githubusercontent.com/24555810/102160982-c6fde580-3e86-11eb-9885-c23eb60d622b.png) Afterwards, you will be able to run queries and traceroutes by simply entering, for example, `@asn 8.8.8.8` in the browser's location bar. ***Chrome:*** 1. Right click the location bar and select **Manage search engines...** ![searchsetup_chrome_1](https://user-images.githubusercontent.com/24555810/102161929-87d09400-3e88-11eb-9e42-70087e3fab87.png) 2. Click **Add**: ![searchsetup_chrome_2](https://user-images.githubusercontent.com/24555810/102162100-dc740f00-3e88-11eb-8037-528fbcc636e9.png) 3. Fill in the details as shown below: ![searchsetup_chrome_3](https://user-images.githubusercontent.com/24555810/102162218-16451580-3e89-11eb-85d1-a4d24c980d7d.png) As usual, the keyword is entierly customizable to your preference. ***Other browsers:*** * You may want to follow [this post](https://www.howtogeek.com/114176/how-to-easily-create-search-plugins-add-any-search-engine-to-your-browser/) to search for instructions on how to add a custom search engine for your browser of choice. #### Running the server on an external host ##### *Port forwarding* In order to access the server remotely, beside binding to `0.0.0.0` (or any other relevant IP address for your scenario), if the host is behind a NAT router, you'll need to forward the listening port (`BIND_PORT`) from the host/router outside IP to the actual machine where the ASN server is running on. It is a single TCP port (by default `TCP/49200`), and you can change it via the command line parameters (see [Usage](#usage)). ##### *Textual browser client* It is possible to launch remote traces from another command line, and view the results directly in the terminal. All it takes is a compatible text browser, for example `elinks` (but you can download results for later reviewing even using `curl` or really anything else). The script makes use of 8-bit ANSI colors for its output, so the command to launch a remote trace using elinks would be something like this: `elinks -dump -dump-color-mode 3 http://:49200/asn_lookup&8.8.8.8` ##### *Security considerations* The server logic in itself is very simple: the script implements a basic web server entirely in BASH, leveraging the fact that it can talk to a browser using the HTTP protocol and the HTML language, in a reasonably simple way. The core behind it revolves around [ncat](https://nmap.org/ncat/), a very robust and stable netcat-like network tool. This is the actual "server" listening for incoming connection, and spawning connection handlers (that is, 'single-purpose' instances of the `asn` script itself) as clients connect. If you decide to open it to the outside (i.e.: binding it to something that is not localhost, and launching traces from outside your local machine), please bear in mind that there is no authentication mechanism (yet) integrated into the code, so theoretically anybody with the right URL could *spawn traceroutes from your server* and view the results (bear in mind however that the server sanitizes user input by stripping any dangerous characters). To contrast that, fortunately `ncat` implements a robust allow/deny logic (based both on command line parameters and files, a la `/etc/hosts.allow` and `hosts.deny`). The script supports passing parameters directly to `ncat`, therefore it's possible to make full use of its filtering capabilities and lock the server to a restricted range of trusted IPs. The available options, and some usage examples, can be viewed by running `asn -h`. >*Note: if you plan to run the server somewhere else than your local machine, remember to change the bookmarklet code and the custom search engine URL values to reflect the actual IP of the asn server. It is naturally possible to have multiple bookmarklets and search engine keywords to map to different ASN server instances.* > >*For the bookmarklet, you'll need to change this value at the very beginning:* `var asnserver="localhost:49200"` *and make it point to the new address:port pair. No further change is required in the remaining JS code.* ## Shodan scanning (Recon Mode) The tool can query Shodan's InternetDB API to look up informations regarding any type of targets when launched with the `-s` command line switch. If the scan identifies any vulnerabilities, the NIST NVD API is queried in order to provide descriptions, any well known names and a link to learn more about the top ones. Currently supported targets are: - **IP addresses** - **CIDR blocks** *(will scan all of the IPs in the range)* - **URLs** - **Hostnames** *(will resolve to an IP (or list of IPs) and query all of them)* Target types can be mixed and queried in a single run. Targets can be piped to the tool via standard input as well. *Usage Examples:* `asn -s 1.1.1.1 8.8.8.8 9.9.9.9` `asn -s https://www.google.com 8.8.8.0/24` `asn -s < iplist` `curl -s https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/blocklist_de_bots.ipset | asn -s` Shodan scan results can be output in JSON mode by passing the `-j` or `-J` options. >*Note: the Nmap tool is needed to use this feature, but note that **no packets whatsoever** are sent to the targets. Nmap is only required to break down CIDR blocks into single IPs (as a calculator tool).* ## Mapping the IP(v4/v6) address space of specific countries The tool will search and display all IPv4 and IPv6 CIDR blocks allocated to a specific country when launched with the `-c` command line switch, plus some statistics. * Searching for a specific country code with a leading dot (e.g. `.fr`) will yield direct results for France, while full text search will display country codes matching the search string, or proceed to display the results if only one match is found. * Statistics such as v4 prefix length distribution, total IPv4 addresses available to the country, IPv4 addresses per capita, etc. are included. * JSON output is supported. *Usage Examples:* `asn -c germany` `asn -c .de` ```shell # scan a random norwegian subnet for CVE/CPE/open ports/hostnames: asn -jc .no | jq -r ".results[] | .ipv4[$RANDOM % .ipv4_blocks]" | asn -s ``` ## Bulk geolocation mode In this mode the tool will extract all IPv4 and IPv6 addresses from the input data and geolocate them. Anycast detection and general stats (top IPv4/IPv6 addresses with number of occurrences, number of IPs per country etc.) are included in the output. Bulk geolocation is quicker than normal `asn` lookups (300 IP addresses can be parsed in ~5s), and its main use case is to extract, geolocate and calculate country/occurrence stats for any number of IPs from arbitrarily formatted data streams (e.g. server logs). JSON output and stdin input are supported. *Usage Examples:* `asn -g 1.1.1.1 8.8.8.8` ```shell # geolocate webserver clients asn -g < /var/log/apache2/access.log ``` ```shell # geolocate IPs that have logged in to the system last | asn -g ``` ## JSON output and API mode #### Locally (shell mode) The tool can be instructed to output lookup results in JSON mode by using the `-j` (compact JSON) or `-J` (pretty-printed JSON) command line options:
Example 1 - IPv4 lookup

##### Command: `asn -J 8.8.8.8` ##### Output: ```json { "target": "8.8.8.8", "target_type": "ipv4", "result": "ok", "reason": "success", "version": "0.78.0", "request_time": "2024-08-20T02:50:28", "request_duration": 5, "api_tokens": { "ipqualityscore": true, "ipinfo": true, "cloudflare": true }, "result_count": 1, "results": [ { "ip": "8.8.8.8", "ip_version": "4", "reverse": "dns.google", "org_name": "Google LLC", "net_range": "8.8.8.0/24", "net_name": "GOGL", "abuse_contacts": [ "network-abuse@google.com" ], "routing": { "is_announced": true, "as_number": "15169", "as_name": "GOOGLE, US", "as_rank": "1788", "route": "8.8.8.0/24", "route_name": "", "roa_count": "1", "roa_validity": "valid" }, "type": { "is_bogon": false, "is_anycast": true, "is_mobile": false, "is_proxy": false, "is_dc": true, "dc_details": { "dc_name": "Google LLC" }, "is_ixp": false }, "geolocation": { "city": "Mountain View", "region": "California", "country": "United States", "cc": "US" }, "reputation": { "status": "good", "is_known_good": true, "known_as": "Google Public DNS" }, "fingerprinting": { "ports": [ 53, 443 ] } } ] } ```

Example 2 - ASN lookup

##### Command: `asn -J 5505` ##### Output: ```json { "target": "5505", "target_type": "asn", "result": "ok", "reason": "success", "version": "0.78.0", "request_time": "2024-08-20T02:50:46", "request_duration": 17, "api_tokens": { "ipqualityscore": true, "ipinfo": true, "cloudflare": true }, "result_count": 1, "results": [ { "asn": "5505", "asname": "VADAVO, ES", "asrank": 4448, "org": "VDV-VLC-RED06 VDV-VLC-RED06 - CLIENTES TELECOM", "holder": "VADAVO SOLUCIONES SL", "abuse_contacts": [ "abuse@vadavo.com" ], "registration_date": "2016-12-13T08:28:07", "ixp_presence": [ "DE-CIX Madrid: DE-CIX Madrid Peering LAN", "ESpanix Madrid Lower LAN" ], "prefix_count_v4": 8, "prefix_count_v6": 1, "bgp_peer_count": 36, "bgp_hijack_incidents": { "total": 0, "as_hijacker": 0, "as_victim": 0 }, "bgp_leak_incidents": { "total": 0 }, "bgp_peers": { "upstream": [ "1299", "6939", "59432", "174", "34549", "25091", "35625", "33891", "48348", "13030", "8218", "41327", "3303", "4455", "6424", "6057", "34927", "9498", "35280", "1239" ], "downstream": [ "48952", "208248", "205086", "202054" ], "uncertain": [ "24482", "51185", "41047", "29680", "212483", "198150", "14840", "49544", "39384", "37721", "36236", "25160" ] }, "announced_prefixes": { "v4": [ "185.210.225.0/24", "188.130.247.0/24", "185.210.227.0/24", "185.123.205.0/24", "185.123.207.0/24", "185.210.226.0/24", "185.123.206.0/24", "185.123.204.0/24" ], "v6": [ "2a03:9320::/32" ] }, "inetnums": { "v4": [ "185.123.204.0/22", "185.210.225.0/24", "185.210.226.0/24", "185.210.227.0/24", "188.130.247.0/24" ], "v6": [ "2a03:9320::/32" ] }, "inetnums_announced_by_other_as": { "v4": [ { "prefix": "188.130.254.0/24", "origin_asn": "", "origin_org": "", "is_announced": false } ], "v6": [] } } ] } ```

Example 3 - enumerating abuse contacts for every IP to which a hostname resolves

##### Command: `asn -j www.google.com | jq '[.results[].abuse_contacts[]] | unique[]'` ##### Output: ``` "network-abuse@google.com" "ripe-contact@google.com" ```

Example 4 - enumerating known vulnerabilities for a target

##### Command: `asn -j 45.67.34.100 | jq '.results[].fingerprinting.vulns[]'` ##### Output: ``` "CVE-2017-15906" "CVE-2018-15919" ```

Example 5 - upstream/transit AS lookup for a given IP

##### Command: `asn -Ju 72.17.119.201` ##### Output: ```json { "target": "72.17.119.201", "target_type": "ipv4", "result": "ok", "reason": "success", "version": "0.78.0", "request_time": "2024-08-20T02:54:03", "request_duration": 4, "api_tokens": { "ipqualityscore": true, "ipinfo": true, "cloudflare": true }, "result_count": 1, "results": [ { "prefix": "72.17.0.0/17", "origin_as": "33363", "origin_as_name": "BHN-33363, US", "origin_as_rank": 441, "upstreams_count": 1, "upstreams": [ { "asn": "7843", "asname": "TWC-7843-BB, US", "probability": 100, "is_tier1": false } ], "multiple_upstreams": false } ] } ```

Example 6 - enumerating unannounced address blocks for a given AS

##### Command: `asn -j AS5505 | jq -r '.results[].inetnums_announced_by_other_as.v4[] | select(.is_announced==false) | .prefix'` ##### Output: ``` 188.130.254.0/24 ```

Example 7 - enumerating the amount of BGP hijacking incidents involving a given AS

##### Command: `asn -j AS8860 | jq '.results[].bgp_hijack_incidents'` ##### Output: ``` { "total": 18, "as_hijacker": 11, "as_victim": 7 } ```

#### Remotely (API endpoint) By running the script in [server mode](#running-lookups-from-the-browser), it is possible to use it as a self-hosted lookup API service by running HTTP queries against it and retrieving the results in compact or pretty-printed JSON format. The server exposes the `asn_lookup_json` and `asn_lookup_jsonp` endpoints for this purpose. The syntax is the same as with normal browser-based remote queries. *Example 1: querying the server remotely using `curl` (compact output):* ```shell root@KRUSTY:~# curl -s "http://localhost:49200/asn_lookup_json&1.1.1.1" {"target":"1.1.1.1","target_type":"ipv4","result":"ok","reason":"success","version":"0.72.1","request_time":"2022-03-29T00:13:11","request_duration":5,"result_count":1,"results":[{"ip":"1.1.1.1","ip_version":"4","reverse":"one.one.one.one","org_name":"APNIC and Cloudflare DNS Resolver project","abuse_contacts":["helpdesk@apnic.net"],"routing":{"is_announced":true,"as_number":"13335","as_name":"CLOUDFLARENET, US","net_range":"1.1.1.0/24","net_name":"APNIC-LABS","roa_count":"1","roa_validity":"valid"},"type":{"is_bogon":false,"is_anycast":true,"is_mobile":false,"is_proxy":false,"is_dc":true,"dc_details":{"dc_name":"Cloudflare"},"is_ixp":false},"geolocation":{"city":"Magomeni","region":"Dar es Salaam","country":"Tanzania","cc":"TZ"},"reputation":{"status":"good","is_known_good":true,"known_as":"Cloudflare Public DNS"},"fingerprinting":{"ports":[53,80,443]}}]} ``` *Example 2: querying the server remotely using `curl` (pretty printed output):* ```shell root@KRUSTY:~# curl -s "http://localhost:49200/asn_lookup_jsonp&10.0.0.1" { "target": "10.0.0.1", "target_type": "ipv4", "result": "ok", "reason": "success", "version": "0.72.1", "request_time": "2022-03-29T00:14:57", "request_duration": 0, "result_count": 1, "results": [ { "ip": "10.0.0.1", "ip_version": "4", "org_name": "IANA", "routing": { "is_announced": false, "net_name": "PRIVATE-ADDRESS-ABLK-RFC1918-IANA-RESERVED" }, "type": { "is_bogon": true, "bogon_type": "rfc1918 (Private Space)" }, "reputation": {}, "fingerprinting": {} } ] } ``` - - - ## Thanks An initial version of this script was featured in the **Security Trails** blog post "[*ASN Lookup Tools, Strategies and Techniques*](https://securitytrails.com/blog/asn-lookup#autonomous-system-lookup-script)". Thank you [Esteban](https://www.estebanborges.com/)! Thanks [Massimo Candela](https://github.com/massimocandela/) for your support and excellent work on [IPmap](https://ipmap.ripe.net/), [BGPlay](https://github.com/massimocandela/BGPlay) and [TraceMON](https://github.com/RIPE-NCC/tracemon)! Thanks to all the awesome contributors for their code, ideas, suggestions, packages and bug reports! ## Feedback and contributing Any feedback or pull request to improve the code is welcome. Feel free to contribute! asn-0.78.0/asn000077500000000000000000006271311466111623400131140ustar00rootroot00000000000000#!/usr/bin/env bash # ╭──────────────────────────────────────────────────────────────────────────────────────╮ # │ ASN / IPv4 / IPv6 / Prefix / AS Path / Organization lookup and server tool │ # │ │ # │Project homepage: │ # │ │ # │ https://github.com/nitefood/asn │ # │ │ # │Usage: │ # │ │ # │ (Launch the script without parameters or visit the project's homepage for usage info)│ # ╰──────────────────────────────────────────────────────────────────────────────────────╯ ASN_VERSION="0.78.0" ASN_LOGFILE="$HOME/asndebug.log" # ╭──────────────────╮ # │ Helper functions │ # ╰──────────────────╯ docurl(){ # shellcheck disable=SC2124 if [ "$ASN_DEBUG" = true ]; then parm="$@" DebugPrint "${yellow}curl $parm${default}" curloutput=$(curl "$@") echo "$curloutput" curloutput_jsonp=$(jq -C '.' <<<"$curloutput" 2>/dev/null) [[ -n "$curloutput_jsonp" ]] && echo -e "$curloutput_jsonp" >> "$ASN_LOGFILE" || echo "$curloutput" >> "$ASN_LOGFILE" echo "" >> "$ASN_LOGFILE" else curl "$@" fi } WhoisASN(){ found_asname=$(host -t TXT "AS${1}.asn.cymru.com" | grep -v "NXDOMAIN" | awk -F'|' 'NR==1{print substr($NF,2,length($NF)-2)}') if [ -n "$found_asname" ]; then ((json_resultcount++)) pwhois_full_asn_info=$(whois -h whois.pwhois.org "registry source-as=$1") # fetch last org only (in case there are multiple orgs listed for this AS) pwhois_asn_info=$(tac <<<"$pwhois_full_asn_info" | grep -m1 -E "^Org-Name") pwhois_asn_info+="\n" pwhois_asn_info+=$(tac <<<"$pwhois_full_asn_info" | grep -m1 -E "^Create-Date") found_holder=$(docurl -m5 -s "https://stat.ripe.net/data/as-overview/data.json?resource=AS$1&sourceapp=nitefood-asn" | jq -r 'select (.data.holder != null) | .data.holder') # RIPE usually outputs holder as "ASNAME - actual company name", trim it to just the company name in such cases found_holder=$(awk -F' - ' '{ if ( $2 ) {print $2} else {print} }' <<<"$found_holder") found_org=$(echo -e "$pwhois_asn_info" | grep -E "^Org-Name:" | cut -d ':' -f 2 | sed 's/^[ \t]*//') [[ -z "$found_org" ]] && found_org="N/A" found_abuse_contact=$(docurl -m5 -s "https://stat.ripe.net/data/abuse-contact-finder/data.json?resource=AS$asn&sourceapp=nitefood-asn" | jq -r 'select (.data.abuse_contacts[0] != null) | .data.abuse_contacts[0]') [[ -z "$found_abuse_contact" ]] && found_abuse_contact="-" pwhois_createdate=$(echo -e "$pwhois_asn_info" | grep -E "^Create-Date:" | cut -d ':' -f 2- | sed 's/^[ \t]*//') if [ "$JSON_OUTPUT" = true ]; then [[ -z "$pwhois_createdate" ]] && found_createdate="" || found_createdate=$(date -d "$pwhois_createdate" "+%Y-%m-%dT%H:%M:%S") else [[ -z "$pwhois_createdate" ]] && found_createdate="N/A" || found_createdate=$(date -d "$pwhois_createdate" "+%Y-%m-%d %H:%M:%S") fi fi } QueryRipestat(){ StatusbarMessage "Retrieving BGP data for AS$1 ($found_asname)" # BGP routing stats ripestat_routing_data=$(docurl -m5 -s "https://stat.ripe.net/data/routing-status/data.json?resource=AS$1&sourceapp=nitefood-asn") if [ -n "$ripestat_routing_data" ]; then ripestat_ipv4=$(jq -r '.data.announced_space.v4.prefixes' <<<"$ripestat_routing_data") ripestat_ipv6=$(jq -r '.data.announced_space.v6.prefixes' <<<"$ripestat_routing_data") ripestat_bgp=$(jq -r '.data.observed_neighbours' <<<"$ripestat_routing_data") fi # BGP neighbours list StatusbarMessage "Retrieving peering data for AS$1 ($found_asname)" ripestat_neighbours_data=$(docurl -m10 -s "https://stat.ripe.net/data/asn-neighbours/data.json?resource=AS$1&sourceapp=nitefood-asn") upstream_peers=$(jq -r '.data.neighbours | sort_by(.power) | reverse[] | select (.type=="left") | .asn' <<<"$ripestat_neighbours_data") downstream_peers=$(jq -r '.data.neighbours | sort_by(.power) | reverse[] | select (.type=="right") | .asn' <<<"$ripestat_neighbours_data") uncertain_peers=$(jq -r '.data.neighbours | sort_by(.power) | reverse[] | select (.type=="uncertain") | .asn' <<<"$ripestat_neighbours_data") if [ "$JSON_OUTPUT" = true ]; then json_abuse_contacts=$(docurl -m5 -s "https://stat.ripe.net/data/abuse-contact-finder/data.json?resource=AS$1&sourceapp=nitefood-asn" | jq -cM 'select (.data.abuse_contacts != null) | .data.abuse_contacts') [[ -z "$json_abuse_contacts" ]] && json_abuse_contacts="[]" json_upstream_peers=$(jq -c --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$upstream_peers") json_downstream_peers=$(jq -c --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$downstream_peers") json_uncertain_peers=$(jq -c --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$uncertain_peers") else RESOLVE_COUNT=8 OUTPUT_PEERS_PER_LINE=4 # resolve AS names of the first n upstreams upstream_peercount=$(echo "$upstream_peers" | wc -l) resolved_upstream_peers="" count=0 for peer in $(echo -e "$upstream_peers" | head -n $RESOLVE_COUNT); do (( count++ )) peername=$(docurl -s "https://stat.ripe.net/data/as-overview/data.json?resource=AS$peer&sourceapp=nitefood-asn" | jq -r '.data.holder' | sed 's/ - .*//' ) if [ "$IS_ASN_CHILD" = true ]; then resolved_upstream_peers+="${greenbg} $peername ($peer) ${default} " else resolved_upstream_peers+="${greenbg} $peername ($peer) ${default} " fi [[ $(( count % OUTPUT_PEERS_PER_LINE )) -eq 0 ]] && resolved_upstream_peers+="\n" done # and add the remaining ones as AS numbers only unresolved_peercount=$(( upstream_peercount - RESOLVE_COUNT )) if [ "$unresolved_peercount" -ge 1 ]; then resolved_upstream_peers+="and more: " for peer in $(echo -e "$upstream_peers" | tail -n $unresolved_peercount ); do if [ "$IS_ASN_CHILD" = true ]; then resolved_upstream_peers+="$peer${default} " else resolved_upstream_peers+="${green}${peer}${default} " fi done fi upstream_peers="$resolved_upstream_peers" # resolve AS names of the first n downstreams downstream_peercount=$(echo "$downstream_peers" | wc -l) resolved_downstream_peers="" count=0 for peer in $(echo -e "$downstream_peers" | head -n $RESOLVE_COUNT); do (( count++ )) peername=$(docurl -s "https://stat.ripe.net/data/as-overview/data.json?resource=AS$peer&sourceapp=nitefood-asn" | jq -r '.data.holder' | sed 's/ - .*//' ) if [ "$IS_ASN_CHILD" = true ]; then resolved_downstream_peers+="${yellowbg} $peername ($peer) ${default} " else resolved_downstream_peers+="${yellowbg} $peername ($peer) ${default} " fi [[ $(( count % OUTPUT_PEERS_PER_LINE )) -eq 0 ]] && resolved_downstream_peers+="\n" done # and add the remaining ones as AS numbers only unresolved_peercount=$(( downstream_peercount - RESOLVE_COUNT )) if [ "$unresolved_peercount" -ge 1 ]; then resolved_downstream_peers+="and more: " for peer in $(echo -e "$downstream_peers" | tail -n $unresolved_peercount ); do if [ "$IS_ASN_CHILD" = true ]; then resolved_downstream_peers+="$peer${default} " else resolved_downstream_peers+="${yellow}${peer}${default} " fi done fi downstream_peers="$resolved_downstream_peers" # resolve AS names of the first n uncertains uncertain_peercount=$(echo "$uncertain_peers" | wc -l) resolved_uncertain_peers="" count=0 for peer in $(echo -e "$uncertain_peers" | head -n $RESOLVE_COUNT); do (( count++ )) peername=$(docurl -s "https://stat.ripe.net/data/as-overview/data.json?resource=AS$peer&sourceapp=nitefood-asn" | jq -r '.data.holder' | sed 's/ - .*//' ) if [ "$IS_ASN_CHILD" = true ]; then resolved_uncertain_peers+="${lightgreybg} $peername ($peer) ${default} " else resolved_uncertain_peers+="${lightgreybg} $peername ($peer) ${default} " fi [[ $(( count % OUTPUT_PEERS_PER_LINE )) -eq 0 ]] && resolved_uncertain_peers+="\n" done # and add the remaining ones as AS numbers only unresolved_peercount=$(( uncertain_peercount - RESOLVE_COUNT )) if [ "$unresolved_peercount" -ge 1 ]; then resolved_uncertain_peers+="and more: " for peer in $(echo -e "$uncertain_peers" | tail -n $unresolved_peercount ); do if [ "$IS_ASN_CHILD" = true ]; then resolved_uncertain_peers+="$peer${default} " else resolved_uncertain_peers+="${white}${peer}${default} " fi done fi uncertain_peers="$resolved_uncertain_peers" fi StatusbarMessage "Retrieving prefix allocations and announcements for AS$1 ($found_asname)" ipv4_inetnums="" ipv6_inetnums="" json_ipv4_other_inetnums="" json_ipv6_other_inetnums="" ripe_prefixes=$(docurl -m10 -s "https://stat.ripe.net/data/announced-prefixes/data.json?resource=$1&sourceapp=nitefood-asn" | jq -r '.data.prefixes[].prefix') json_ripe_prefixes=$(jq -cM --slurp --raw-input 'split("\n") | map(select(length > 0)) | {v4:map(select(contains(":")|not)), v6:map(select(contains(":")))}' <<<"$ripe_prefixes") ipv4_ripe_prefixes=$(grep -v ":" <<<"$ripe_prefixes" | grep -Ev "^$" | sort) ipv4_ripe_prefixes_count=$(wc -l <<<"$ipv4_ripe_prefixes") ipv6_ripe_prefixes=$(grep ":" <<<"$ripe_prefixes" | grep -Ev "^$" | sort) ipv6_ripe_prefixes_count=$(wc -l <<<"$ipv6_ripe_prefixes") # open persistent tcp connection to RIPE whois server exec 6<>/dev/tcp/whois.ripe.net/43 prefixcounter=0 for prefix in $ipv6_ripe_prefixes; do ((prefixcounter++)) StatusbarMessage "Retrieving information for IPv6 prefix $prefixcounter/$ipv6_ripe_prefixes_count" # old way (one whois lookup per prefix) # inet6nums=$(whois -h whois.ripe.net -- "-T inet6num -K -L --resource $prefix" | \ # grep -m2 inet6num | cut -d ':' -f 2- | sed 's/^[ \t]*//') # new way (direct tcp connection to whois server with persistent whois connection) echo -e "-k -T inet6num -K -L --resource $prefix" >&6 whoisoutput="" # read whois output from the tcp stream line by line while IFS= read -r -u 6 whoisoutputline; do if [ -n "$whoisoutputline" ]; then whoisoutput+="$whoisoutputline\n" continue fi # last line was empty, check if next line is empty too # if we get two empty lines in a row, the whois output is finished IFS= read -r -u 6 whoisoutputline [[ -z "$whoisoutputline" ]] && break || whoisoutput+="$whoisoutputline\n" done inet6nums=$(echo -e "$whoisoutput" | grep -m2 inet6num | cut -d ':' -f 2- | sed 's/^[ \t]*//') for inet6num in $inet6nums; do # exclude RIR supernets prefix_size=$(echo "$inet6num" | cut -d '/' -f 2) [[ "$prefix_size" -le 12 ]] && continue || ipv6_inetnums+="${inet6num}\n" done done prefixcounter=0 lookedup_parents_cache="" for prefix in $ipv4_ripe_prefixes; do ((prefixcounter++)) StatusbarMessage "Retrieving information for IPv4 prefix $prefixcounter/$ipv4_ripe_prefixes_count" # old way (one whois lookup per prefix) # parent_inetnum=$(whois -h whois.ripe.net -- "-T inetnum -K -L --resource $prefix" | \ # grep -E -m1 "^inetnum" | awk '{print $2"-"$4}' | xargs ipcalc -r 2>/dev/null | grep -v "deaggregate") # new way (direct tcp connection to whois server with persistent whois connection) echo -e "-k -T inetnum -K -L --resource $prefix" >&6 whoisoutput="" # read whois output from the tcp stream line by line while IFS= read -r -u 6 whoisoutputline; do if [ -n "$whoisoutputline" ]; then whoisoutput+="$whoisoutputline\n" continue fi # last line was empty, check if next line is empty too # if we get two empty lines in a row, the whois output is finished IFS= read -r -u 6 whoisoutputline [[ -z "$whoisoutputline" ]] && break || whoisoutput+="$whoisoutputline\n" done parent_inetnum=$(echo -e "$whoisoutput" | grep -E -m1 "^inetnum") if [ -n "$parent_inetnum" ]; then parent_inetnum=$(awk '{print $2"-"$4}' <<<"$parent_inetnum") parent_inetnum=$(IpcalcDeaggregate "$parent_inetnum") # check if the inetnum containing this prefix is being announced by the same AS as the prefix itself, otherwise # it means it's part of a larger supernet by some other AS (e.g. larger carrier allocating own prefix to smaller customer) if ! grep -q "$parent_inetnum" <<<"$lookedup_parents_cache"; then # this parent inetnum hasn't been looked up yet lookedup_parents_cache+="$parent_inetnum\n" LookupASNAndRouteFromIP "$parent_inetnum" if [ -z "$found_asname" ] || [ "$1" = "$found_asn" ]; then # the target AS is also announcing the larger inetnum, or nobody is announcing it. Either way consider it part of the target's resources ipv4_inetnums+="$parent_inetnum\n" else # the larger inetnum is being announced by another AS, only add the announced (smaller) prefix to the list ipv4_inetnums+="$prefix\n" fi else # this parent inetnum has already been looked up # if it's not present in the list of ipv4_inetnums, it means it's part of a larger supernet by some other AS. # therefore we only add the announced (smaller) prefix to the list if ! grep -q "$parent_inetnum" <<<"$ipv4_inetnums"; then ipv4_inetnums+="$prefix\n" fi fi else ipv4_inetnums+="$prefix\n" fi done # close persistent tcp connection to RIPE whois server if { true >&6; } 2<> /dev/null; then echo -e "-k" >&6 fi if [ "$ADDITIONAL_INETNUM_LOOKUP" = true ]; then # fetch further inetnums allocated to this AS from pWhois StatusbarMessage "Identifying additional INETNUMs (not announced or announced by other AS) allocated to AS$1" pwhois_prefixes=$(PwhoisListPrefixesForOrg "$found_org") pwhois_unique_prefixes=$(comm -13 <(echo -e "$ipv4_ripe_prefixes" | sort) <(echo -e "$pwhois_prefixes" | sort)) pwhois_unique_prefixes=$(comm -13 <(echo -e "$lookedup_parents_cache" | sort) <(echo -e "$pwhois_unique_prefixes" | sort)) if [ -n "$pwhois_unique_prefixes" ]; then pwhois_unique_prefixes_count=$(wc -l <<<"$pwhois_unique_prefixes") StatusbarMessage "Identifying origin AS for $pwhois_unique_prefixes_count additional IPv4 prefix(es)" # NEW WAY (bulk query to Team Cymru whois server) # map the unique prefixes pWhois reported to an array mapfile -t pwhois_unique_prefixes_array < <(echo -e "$pwhois_unique_prefixes") # assemble a bulk Team Cymru whois lookup query for the new prefixes found in pWhois. # we'll check if they're announced by the target AS, by different one, or by no one at all # (e.g. target AS has delegated announcements for this prefix to another AS, or is not announcing it) # and compile a list to integrate into the allocated IP resources for this AS teamcymru_bulk_query="begin\n" for prefix in $pwhois_unique_prefixes; do teamcymru_bulk_query+="$prefix\n" done teamcymru_bulk_query+="end" prefixcounter=0 for single_prefix_data in $(echo -e "$teamcymru_bulk_query" | ncat --no-shutdown whois.cymru.com 43 | grep "|" | sed 's/\ *|\ */|/g'); do prefix="${pwhois_unique_prefixes_array[$prefixcounter]}" prefix_originator_asn=$(echo "$single_prefix_data" | cut -d '|' -f 1) if [ "$prefix_originator_asn" = "$1" ]; then # prefix originator is same as target AS, add this prefix to the allocated IP resources for this AS ipv4_inetnums+="$prefix\n" elif [ "$prefix_originator_asn" = "NA" ]; then # prefix not announced, add this prefix to the allocated IP resources for this AS with a "not announced" remark if [ "$JSON_OUTPUT" = true ]; then [[ -n "$json_ipv4_other_inetnums" ]] && json_ipv4_other_inetnums+="," json_ipv4_other_inetnums+="{\"prefix\":\"$prefix\",\"origin_asn\":\"\",\"origin_org\":\"\", \"is_announced\":false}" elif [ "$IS_ASN_CHILD" = true ]; then # skip colors, they will be added along with hyperlinks later ipv4_inetnums+=$(printf "%-18s → not announced" "$prefix") ipv4_inetnums+="\n" else ipv4_inetnums+=$(printf "${dim}%-18s → ${red}not announced${default}${green}" "$prefix") ipv4_inetnums+="\n" fi else # prefix is announced by a different AS, add this prefix to the allocated IP resources for this AS prefix_originator_org=$(echo "$single_prefix_data" | cut -d '|' -f 3) if [ "$JSON_OUTPUT" = true ]; then [[ -n "$json_ipv4_other_inetnums" ]] && json_ipv4_other_inetnums+="," json_ipv4_other_inetnums+="{\"prefix\":\"$prefix\",\"origin_asn\":\"$prefix_originator_asn\",\"origin_org\":\"$prefix_originator_org\", \"is_announced\":true}" elif [ "$IS_ASN_CHILD" = true ]; then # skip colors, they will be added along with hyperlinks later ipv4_inetnums+=$(printf "%-18s → announced by AS%s %s" "$prefix" "${prefix_originator_asn}" "${prefix_originator_org}") ipv4_inetnums+="\n" else ipv4_inetnums+=$(printf "%-18s ${dim}→ announced by ${default}${red}AS%s ${default}${green}%s" "$prefix" "${prefix_originator_asn}" "${prefix_originator_org}") ipv4_inetnums+="\n" fi fi ((prefixcounter++)) done # OLD WAY (one lookup per prefix) # for prefix in $pwhois_unique_prefixes; do # # found a new prefix in pWhois, check if it's announced by a different AS # # (e.g. target AS has delegated announcements for this prefix to another AS) # ((prefixcounter++)) # StatusbarMessage "Identifying origin AS for additional IPv4 prefix $prefixcounter/$pwhois_unique_prefixes_count" # LookupASNAndRouteFromIP "$prefix" # prefix_originator_asn="$found_asn" # if [ -z "$prefix_originator_asn" ] || [ "$prefix_originator_asn" = "$1" ]; then # # prefix originator is same as target AS, or prefix not announced # # add this prefix to the allocated IP resources for this AS # ipv4_inetnums+="$prefix\n" # else # # this prefix is allocated to target AS, but is being announced by a different AS # prefix_originator_org=$(docurl -m5 -s "https://stat.ripe.net/data/as-overview/data.json?resource=AS${found_asn}&sourceapp=nitefood-asn" | \ # jq -r 'select (.data.holder != null) | .data.holder' | \ # awk -F' - ' '{ if ( $2 ) {print $2} else {print} }' \ # ) # if [ "$JSON_OUTPUT" = true ]; then # [[ -n "$json_ipv4_other_inetnums" ]] && json_ipv4_other_inetnums+="," # json_ipv4_other_inetnums+="{\"prefix\":\"$prefix\",\"origin_asn\":\"$prefix_originator_asn\",\"origin_org\":\"$prefix_originator_org\"}" # else # ipv4_inetnums+="$prefix (announced by AS${prefix_originator_asn} - ${prefix_originator_org})\n" # fi # fi # done fi fi if [ -n "$ipv4_inetnums" ]; then ipv4_inetnums=$(echo -e "$ipv4_inetnums" | sort -iu) if [ "$IS_ASN_CHILD" = true ] && [ "$JSON_OUTPUT" = false ]; then # HTML output html="" for inetnum in $ipv4_inetnums; do if grep -q "announced by" <<<"$inetnum"; then # handle special case " → announced by AS" actual_inetnum=${inetnum:0:18} originator=$(cut -d ' ' -f 7 <<<"$inetnum") rest_of_line=$(cut -d ' ' -f 8- <<<"$inetnum") html+="$actual_inetnum" html+=" → announced by " html+="$originator ${rest_of_line}\n" elif grep -q "not announced" <<<"$inetnum"; then # handle special case " → not announced" actual_inetnum=${inetnum:0:18} html+="$actual_inetnum" html+=" → not announced\n" else html+="$inetnum\n" fi done ipv4_inetnums="$html" fi fi if [ -n "$ipv6_inetnums" ]; then ipv6_inetnums=$(echo -e "$ipv6_inetnums" | sort -u) if [ "$IS_ASN_CHILD" = true ] && [ "$JSON_OUTPUT" = false ]; then html="" for inet6num in $ipv6_inetnums; do html+="$inet6num\n" done ipv6_inetnums="$html" fi fi if [ "$JSON_OUTPUT" = true ]; then json_ipv4_aggregated_inetnums=$(jq -cM --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$ipv4_inetnums") json_ipv6_aggregated_inetnums=$(jq -cM --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$ipv6_inetnums") fi StatusbarMessage } RIPESuggestASN(){ TRIM_WHITESPACES=false input=$(tr '[:lower:]' '[:upper:]' <<<"$1") ripe_suggest_output="" while true; do for input_variation in "${input}" "AS_${input}" "AS-${input}" "${input}_AS" "${input}-AS"; do StatusbarMessage "Retrieving suggested ASNs for ${bluebg}${input_variation}${lightgreybg}" # lookup input variation (AS_, AS-, _AS, -AS) ripe_suggest_output+=$(docurl -s "https://stat.ripe.net/data/searchcomplete/data.json?resource=${input_variation}&sourceapp=nitefood-asn" | \ jq -r '.data.categories[] | select ( .category == "ASNs" ) | .suggestions[]') done StatusbarMessage if [ -n "$ripe_suggest_output" ]; then found_suggestions=$(jq -r '.description' <<<"$ripe_suggest_output" | sort -u) for suggestion in $found_suggestions; do echo -e "\n${green}$suggestion${default}" for suggestion_asn in $(jq -r 'select (.description=="'"$suggestion"'") | .value' <<<"$ripe_suggest_output" | awk 'NR==1{print}'); do echo -en "\t${yellow}$suggestion_asn${default} (Rank: " GetCAIDARank "${suggestion_asn:2}" echo -en "${caida_asrank_recap}" echo "${default})" done done echo "" return elif [ "$TRIM_WHITESPACES" = false ]; then TRIM_WHITESPACES=true oldinput="$input" # shellcheck disable=SC2001 input=$(echo "$oldinput" | sed 's/[ \t]*//g') if [ "$input" = "$oldinput" ]; then echo -e "\n${redbg}No suggestions found${default}\n" return else continue fi else echo -e "\n${redbg}No suggestions found${default}\n" return fi done } WhoisIP(){ # $1: (mandatory) IP to lookup # $2: (optional) if set to anything, only perform a generic whois lookup (skip pWhois/RPKI/IXP lookups) [[ "$JSON_OUTPUT" = true ]] && ((json_resultcount++)) GENERIC_WHOIS_LOOKUP_ONLY=false [[ "$#" -gt 1 ]] && GENERIC_WHOIS_LOOKUP_ONLY=true full_whois_data=$(timeout 5 whois "$1" 2>/dev/null) network_whois_data=$(echo -e "$full_whois_data" | grep -i -E "^netname:|^orgname:|^org-name:|^owner:|^descr:|^country:") # fetch whois inetnum and later compare to cymru prefix, in order to find smallest match (sometimes whois and cymru/pwhois prefix data diverge) whois_inetnum=$(IpcalcDeaggregate "$(grep -E -m1 "inet[6]?num|NetRange"<<<"$full_whois_data" | awk '{print $2 $3 $4}')") # handle problematic IPs where whois gives out wrong info [[ "$whois_inetnum" = "192.168.1.1/32" ]] && whois_inetnum="$found_route" ixp_data="" ixp_geo="" ip_type_json_output="" # Check if input is a bogon address if [ "$IS_BOGON" = false ]; then ip_type_json_output+="\"is_bogon\":false" if [ "$GENERIC_WHOIS_LOOKUP_ONLY" = false ]; then hostname=$(RdnsLookup "$1") [[ -z "$hostname" ]] && hostname="-" abuse_whois_data=$(echo -e "$full_whois_data" | grep -E "^OrgAbuseEmail:|^abuse-c:|^% Abuse|^abuse-mailbox:") abusecontacts=$(AbuseLookupForPrefix "$abuse_whois_data" "$1") fi if [ "$UNANNOUNCED_PREFIX" = false ] && [ "$GENERIC_WHOIS_LOOKUP_ONLY" = false ]; then # Prefix found in the Team Cymru DB, perform pWhois lookup and CAIDA rank lookup PwhoisLookup "$1" GetCAIDARank "$found_asn" else # No data in the Team Cymru DB for this IP (unannounced prefix), or pWhois being skipped if [ "$GENERIC_WHOIS_LOOKUP_ONLY" = false ]; then [[ -z "$network_whois_data" ]] && PrintErrorAndExit "Error: no data found for $input" found_asn="N/A (address not announced)" found_asname="" fi IPGeoRepLookup "$1" IPShodanLookup "$1" # check if it's an IXP, otherwise fall back to generic whois [[ "$GENERIC_WHOIS_LOOKUP_ONLY" = false ]] && IsIXP "$1" if [ -n "$ixp_data" ]; then if [ "$JSON_OUTPUT" = true ]; then pwhois_org="$ixp_data" else pwhois_org="${bluebg} IXP ${default} ${blue}${ixp_data}${default}" ip_type_data=" ${yellowbg} Internet Exchange ${default}" fi else pwhois_org=$(echo -e "$network_whois_data" | grep -i -E "^orgname:|^org-name:|^owner:" | cut -d ':' -f 2 | sed 's/^[ \t]*//' | while read -r line; do echo -n "$line / "; done | sed 's/ \/ $//') fi [[ -z "$pwhois_org" ]] && pwhois_org="N/A" found_route=$(echo -e "$network_whois_data" | grep -i -m2 -E "^descr:" | cut -d ':' -f 2 | sed 's/^[ \t]*//' | while read -r line; do if [ -n "$line" ]; then echo -n "$line / "; fi; done | sed 's/ \/ $//') [[ -z "$found_route" ]] && found_route="N/A" pwhois_net=$(echo -e "$network_whois_data" | grep -i -E "^netname:" | cut -d ':' -f 2 | sed 's/^[ \t]*//' | while read -r line; do echo -n "$line / "; done | sed 's/ \/ $//') [[ -z "$pwhois_net" ]] && pwhois_net="N/A" if [ -n "$ixp_geo" ]; then pwhois_geo="$ixp_geo" elif [ -n "$ip_geo_data" ]; then pwhois_geo="$ip_geo_data" else pwhois_geo=$(echo -e "$network_whois_data" | grep -m1 -i -E "^country:" | cut -d ':' -f 2 | sed 's/^[ \t]*//') geo_cc_json_output="$pwhois_geo" fi [[ -z "$pwhois_geo" ]] && pwhois_geo="N/A" fi [[ -n "$ixp_data" ]] && ip_type_json_output+=",\"is_ixp\":true" || ip_type_json_output+=",\"is_ixp\":false" else # bogon address, skip lookups ip_type_json_output+="\"is_bogon\":true" ip_type_json_output+=",\"bogon_type\":\"$json_bogon_type\"" hostname="-" found_asn="-" pwhois_org="IANA" found_route="N/A" abusecontacts="-" pwhois_net=$(echo -e "$network_whois_data" | grep -i -E "^netname:" | cut -d ':' -f 2 | sed 's/^[ \t]*//' | while read -r line; do echo -n "$line / "; done | sed 's/ \/ $//') found_asname="" ip_type_data=" $bogon_tag" pwhois_geo="-" ip_rep_data="-" fi indent=$(( longest+4 )) if [ -n "$found_asname" ]; then output_asname="${green}($found_asname)" else output_asname="" fi rpki_output="" if [ "$UNANNOUNCED_PREFIX" = false ] && [ "$GENERIC_WHOIS_LOOKUP_ONLY" = false ]; then # we skip RPKI lookup in both cases because if SKIP_WHOIS=true then we're being # called from TraceASPath, and RPKI lookup will be performed there subsequently StatusbarMessage "Checking RPKI validity for ${bluebg}AS${found_asn}${lightgreybg} and prefix ${bluebg}${found_route}${lightgreybg}" RPKILookup "$found_asn" "$found_route" StatusbarMessage [[ "$JSON_OUTPUT" = false ]] && echo "" elif [ "$IS_BOGON" = false ]; then rpki_output="${red}N/A (address not announced)${default}" else rpki_output="-" fi found_subprefix="$found_route" whois_routename="" # compare cymru net with whois net and pick longer (smaller), while retaining larger for route information. # we want to identify subnets to which target IPs belong, even when they aren't # announced directly but within a larger route. if [ "$found_route" != "N/A" ] && [ "$found_route" != "$whois_inetnum" ]; then foundroute_prefixlen=$(cut -d '/' -f 2 <<<"$found_route") whois_prefixlen=$(cut -d '/' -f 2 <<<"$whois_inetnum") if (( whois_prefixlen > foundroute_prefixlen )) 2>/dev/null; then found_subprefix="$whois_inetnum" # lookup route name (RIPE) whois_routename=$(timeout 5 whois "$found_route" | grep -m1 -E "^descr:" | cut -d ':' -f 2 | sed 's/^ *//g') fi fi if [ "$JSON_OUTPUT" = true ]; then # JSON output final_json_output+="{" final_json_output+="\"ip\":\"$1\"," grep -q ":" <<<"$1" && ipversion="6" || ipversion="4" final_json_output+="\"ip_version\":\"$ipversion\"," [[ "$hostname" != "-" ]] && final_json_output+="\"reverse\":\"$hostname\"," final_json_output+="\"org_name\":\"$pwhois_org\"," # next field can be == $found_route or can be different (smaller) in case the IP belongs to a subnet (of $found_route) that's not routed directly final_json_output+="\"net_range\":\"$found_subprefix\"," final_json_output+="\"net_name\":\"$pwhois_net\"," if [ -n "$abusecontacts" ] && [ "$abusecontacts" != "-" ]; then final_json_output+="\"abuse_contacts\":$abusecontacts," fi final_json_output+="\"routing\":{" if [ -n "$found_asname" ]; then final_json_output+="\"is_announced\":true," final_json_output+="\"as_number\":\"$found_asn\"," final_json_output+="\"as_name\":\"${found_asname//\"/\\\"}\"," final_json_output+="\"as_rank\":\"${caida_asrank//\"/\\\"}\"," final_json_output+="\"route\":\"$found_route\"," final_json_output+="\"route_name\":\"$whois_routename\"," final_json_output+="\"roa_count\":\"$roacount_json_output\"," final_json_output+="\"roa_validity\":\"$roavalidity_json_output\"" else final_json_output+="\"is_announced\":false," if [ "$found_route" != "N/A" ]; then final_json_output+="\"net_name\":\"$found_route ($pwhois_net)\"" else final_json_output+="\"net_name\":\"$pwhois_net\"" fi fi final_json_output+="},\"type\":{$ip_type_json_output}" if [ "$IS_BOGON" != true ]; then final_json_output+=",\"geolocation\":{" final_json_output+="\"city\":\"$geo_city_json_output\"," final_json_output+="\"region\":\"$geo_region_json_output\"," final_json_output+="\"country\":\"$geo_country_json_output\"," final_json_output+="\"cc\":\"$geo_cc_json_output\"" final_json_output+="}" fi else # Normal output printf "${white}%${longest}s${default} ┌${bluebg}PTR${default} %s\n" "$1" "$hostname" if [ "$IS_ASN_CHILD" = true ] && [ -n "$found_asname" ]; then summary_asn="$found_asn" ipinfolink="ipinfo.io" summary_ipinfo=" ($ipinfolink🔗)" else summary_asn="$found_asn" summary_ipinfo="" fi printf "${white}%${indent}s${bluebg}ASN${default} ${red}%s %s${default}\n" "├" "$summary_asn" "$output_asname" [[ "$UNANNOUNCED_PREFIX" = false ]] && printf "${white}%${indent}s${bluebg}RNK${default} ${default}%s${default}\n" "├" "$caida_asrank_recap" printf "${white}%${indent}s${bluebg}ORG${default} ${green}%s${default}\n" "├" "$pwhois_org" if [ "$found_subprefix" != "$found_route" ]; then # target IP belongs to a subnet announced within a larger route printf "${white}%${indent}s${bluebg}NET${default} ${yellow}%s (%s)${default}\n" "├" "$found_subprefix" "$pwhois_net" [[ -n "$whois_routename" ]] && whois_routename=" ($whois_routename)" printf "${white}%${indent}s${bluebg}ROU${default} ${yellow}%s%s%s${default}\n" "├" "$found_route" "$whois_routename" "$summary_ipinfo" else # target IP belongs to a subnet announced directly printf "${white}%${indent}s${bluebg}NET${default} ${yellow}%s (%s)%s${default}\n" "├" "$found_subprefix" "$pwhois_net" "$summary_ipinfo" fi printf "${white}%${indent}s${bluebg}ABU${default} ${blue}%s${default}\n" "├" "$abusecontacts" printf "${white}%${indent}s${bluebg}ROA${default} %s\n" "├" "$rpki_output" [[ -n "$ip_type_data" ]] && printf "${white}%${indent}s${bluebg}TYP${default}%s\n" "├" "${ip_type_data}" printf "${white}%${indent}s${bluebg}GEO${default} ${magenta}%s${default}" "├" "$pwhois_geo" if [ "$IS_ASN_CHILD" = true ] && [ -n "$flag_icon_cc" ]; then # signal to the parent connhandler the correct country flag to display for this IP echo -n " #COUNTRYCODE $flag_icon_cc" fi printf "\n" fi } IsBogon(){ bogon_tag="" IS_BOGON=false # Bogon regex patterns localhostregex='(^127\.)' # RFC 1122 localhost thisnetregex='(^0\.)' # RFC 1122 'this' network privateregex='(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1$)|(^[fF][cCdD])' # RFC 1918 private space - cheers https://stackoverflow.com/a/11327345/5377165 cgnregex='(^100\.6[4-9]\.)|(^100\.[7-9][0-9]\.)|(^100\.1[0-1][0-9]\.)|(^100\.12[0-7]\.)' # RFC 6598 Carrier grade nat space llregex='(^169\.254\.)' # RFC 3927 link local ietfprotoregex='(^192\.0\.0\.)' # IETF protocol assignments testnetregex='(^192\.0\.2\.)|(^198\.51\.100\.)|(^203\.0\.113\.)' # RFC 5737 TEST-NET benchmarkregex='(^192\.1[8-9]\.)' # RFC 2544 Network interconnect device benchmark testing sixtofouranycast='(^192\.88\.99\.)' # RFC 7526 6to4 anycast relay multicastregex='(^22[4-9]\.)|(^23[0-9]\.)' # Multicast reservedregex='(^24[0-9]\.)|(^25[0-5]\.)' # Reserved for future use/limited broadcast (255.255.255.255) if [[ "$1" =~ $localhostregex ]]; then bogon_tag="rfc1122 (Localhost)" MTR_TRACING=false elif [[ "$1" =~ $thisnetregex ]]; then bogon_tag="rfc1122 ('this' network)" MTR_TRACING=false elif [[ "$1" =~ $privateregex ]]; then bogon_tag="rfc1918 (Private Space)" elif [[ "$1" =~ $cgnregex ]]; then bogon_tag="rfc6598 (CGN Space)" elif [[ "$1" =~ $llregex ]]; then bogon_tag="rfc3927 (Link-Local)" elif [[ "$1" =~ $ietfprotoregex ]]; then bogon_tag="(Reserved for IETF protocol assignments)" elif [[ "$1" =~ $testnetregex ]]; then bogon_tag="rfc5737 (Reserved for Test Networks)" elif [[ "$1" =~ $benchmarkregex ]]; then bogon_tag="rfc2544 (Reserved for Network device benchmark testing)" elif [[ "$1" =~ $sixtofouranycast ]]; then bogon_tag="rfc7526 (6to4 anycast relay)" elif [[ "$1" =~ $multicastregex ]]; then bogon_tag="(Multicast Address)" MTR_TRACING=false elif [[ "$1" =~ $reservedregex ]]; then bogon_tag="(Reserved Address)" MTR_TRACING=false fi if [ -n "$bogon_tag" ]; then IS_BOGON=true json_bogon_type="$bogon_tag" bogon_tag="${yellowbg} BOGON ${default} ${bogon_tag}" fi } LookupASNAndRouteFromIP(){ found_asn="" found_route="" found_asname="" IsBogon "$1" if [ "$IS_BOGON" = false ]; then if echo "$1" | grep -q ':'; then # whois query for IPv6 addresses output=$(whois -h whois.cymru.com " -f -p -u $1" | sed 's/\ *|\ */|/g') found_asn=$(echo "$output" | awk -F'[|]' 'NR==1{print $1}') if [ "$found_asn" = "NA" ]; then # Team Cymru has no data for this IPv6. Inform WhoisIP() that we will have to fall back to a generic whois lookup. found_asn="" UNANNOUNCED_PREFIX=true else found_asname=$(echo "$output" | awk -F'[|]' 'NR==1{print $4}') found_route=$(echo "$output" | awk -F'[|]' 'NR==1{print $3}') UNANNOUNCED_PREFIX=false # lookup CAIDA rank info for the origin AS GetCAIDARank "$found_asn" fi else # Query RIPEStat for IPv4 addresses output=$(docurl -m5 -s "https://stat.ripe.net/data/prefix-overview/data.json?resource=$1&sourceapp=nitefood-asn") if jq -r '.data.announced' <<<"$output" | grep -q "true"; then found_asn=$(jq -r '.data.asns[0].asn' <<<"$output") found_asname=$(jq -r '.data.asns[0].holder' <<<"$output") # look up the country this ASN is located in country=$(docurl -m5 -s "https://stat.ripe.net/data/rir-stats-country/data.json?resource=AS${found_asn}" | jq -r '.data.located_resources[0].location') [[ "$country" != "null" ]] && found_asname="${found_asname}, ${country}" found_route=$(jq -r '.data.resource' <<<"$output") UNANNOUNCED_PREFIX=false else # RIPEStat has no data for this IPv4. Fallback to Team Cymru DNS query (faster than whois) rev=$(echo "$1" | cut -d '/' -f 1 | awk -F'.' '{printf $4 "." $3 "." $2 "." $1}') output=$(host -t TXT "$rev.origin.asn.cymru.com" | awk -F'"' 'NR==1{print $2}' | sed 's/\ *|\ */|/g') found_asn=$(echo "$output" | awk -F'[|]' 'NR==1{print $1}' | cut -d ' ' -f 1) # final cut gets first origin AS only if cymru has multiple if [ -n "$found_asn" ]; then found_asname=$(host -t TXT "AS$found_asn.asn.cymru.com" | grep -v "NXDOMAIN" | awk -F'|' 'NR==1{print substr($NF,2,length($NF)-2)}') found_route=$(echo "$output" | awk -F'[|]' 'NR==1{print $2}') UNANNOUNCED_PREFIX=false else # Team Cymru has no data for this IPv4 either. Inform WhoisIP() that we will have to fall back to a generic whois lookup. UNANNOUNCED_PREFIX=true fi fi fi else # bogon address, consider it unannounced UNANNOUNCED_PREFIX=true fi } ResolveHostnameToIPList(){ raw_host_output=$(host "$1" 2>/dev/null) if echo -e "$raw_host_output" | grep -q "mail is handled"; then host_output=$(echo "$raw_host_output" | grep -B100 -A0 -m1 "mail is handled" | sed '$d') else host_output="$raw_host_output" fi ip=$(echo "$host_output" | grep -Eo "$ipv4v6regex") echo -e "$ip\n" } PrintErrorAndExit(){ if [ "$JSON_OUTPUT" = true ]; then # json output status_json_output="fail" reason_json_output="${1//\"/\\\"}" json_resultcount=0 PrintJsonOutput elif [ "$IS_ASN_CHILD" = true ]; then echo -e "\n${redbg}${1}${default}\n" # get the error in the html report tput sgr0 else # normal output echo -e "\n${redbg}${1}${default}" >&2 tput sgr0 fi exit 1 } PrintUsage(){ # if an argument is passed, it will be displayed on stderr and the script will exit with error script_name=$(basename "$0") JSON_OUTPUT=false BoxHeader "ASN / RPKI validity / BGP stats / IPv4v6 / Prefix / ASPath / Organization / IP reputation lookup tool" >&2 echo -e "\nVERSION:\n\n ${ASN_VERSION}" \ "\n\nUSAGE:\n\n $script_name [${green}OPTIONS${default}] [${blue}TARGET${default}]" \ "\n $script_name [${red}-v${default}] ${red}-l${default} [${red}SERVER OPTIONS${default}]" \ "\n\nOPTIONS:" \ "\n\n ${green}-t (enable trace)\n\t${default}Enable AS path trace to the ${blue}TARGET${default} (this is the default behavior)" \ "\n\n ${green}-n (no trace|no additional INETNUM lookups)\n\t${default}Disable tracing the AS path to the ${blue}TARGET${default} (for IP targets) or" \ "\n\tDisable additional (unannounced / announced by other AS) INETNUM lookups for the ${blue}TARGET${default} (for AS targets)" \ "\n\n ${green}-d (detailed)\n\t${default}Output detailed hop info during the AS path trace to the ${blue}TARGET${default}" \ "\n\tThis option also enables RPKI validation/BGP hijacking detection for every hop" \ "\n\n ${green}-a (ASN Suggest)\n\t${default}Lookup AS names and numbers matching ${blue}TARGET${default}" \ "\n\n ${green}-u (Transit/Upstream lookup)\n\t${default}Inspect BGP updates and ASPATHs for the ${blue}TARGET${default} address/prefix and identify possible transit/upstream autonomous systems" \ "\n\n ${green}-c (Country CIDR)\n\t${default}Lookup all IPv4/v6 CIDR blocks allocated to the ${blue}TARGET${default} country" \ "\n\n ${green}-g (Bulk Geolocate)\n\t${default}Geolocate all IPv4/v6 addresses passed as ${blue}TARGET${default}" \ "\n\tThis mode supports multiple targets, stdin input and IP extraction from input, e.g." \ "\n\t'asn -g < /var/log/apache2/error.log' or 'echo 1.1.1.1 2.2.2.2 | asn -g'" \ "\n\n ${green}-s (Shodan scan)\n\t${default}Query Shodan's InternetDB for CVE/CPE/Tags/Ports/Hostnames data about ${blue}TARGET${default}" \ "\n\tThis mode supports multiple targets and stdin input, e.g." \ "\n\t'asn -s < iplist' or 'echo 1.1.1.0/24 google.com | asn -s'" \ "\n\n ${green}-o (organization search)\n\t${default}Force ${blue}TARGET${default} to be treated as an Organization Name" \ "\n\n ${green}-m (monochrome output)\n\t${default}Disable colored output" \ "\n\n ${green}-v (verbose)\n\t${default}Enable (and log to \$HOME/asndebug.log) debug messages (URLs being queried and variable names being assigned)." \ "\n\tAPI call response data (i.e. the JSON output) is logged to the logfile.${default}" \ "\n\n ${green}-j (compact JSON output)\n\t${default}Set output to compact JSON mode (ideal for machine parsing)" \ "\n\n ${green}-J (pretty-printed JSON output)\n\t${default}Set output to pretty-printed JSON mode" \ "\n\n ${green}-h (help)\n\t${default}Show this help screen" \ "\n\n ${red}-l (lookup server)\n\t${default}Launch the script in server mode. See ${red}SERVER OPTIONS${default} below" \ "\n\nTARGET:" \ "\n\n ${blue}${default}\n\tLookup matching ASN and BGP announcements/neighbours data." \ "\n\t(Supports \"as123\" and \"123\" formats - case insensitive)" \ "\n\n ${blue}${default}\n\tLookup matching route(4/6), IP reputation and ASN data" \ "\n\n ${blue}${default}\n\tLookup matching ASN data" \ "\n\n ${blue}${default}\n\tLookup matching IP, route and ASN data. Supports multiple IPs - e.g. DNS RR" \ "\n\n ${blue}${default}\n\tExtract hostname/IP from the URL and lookup relative data. Supports any protocol prefix, non-standard ports and prepended credentials" \ "\n\n ${blue}${default}\n\tSearch by company name and lookup network ranges exported by (or related to) the company" \ "\n\nSERVER OPTIONS:" \ "\n\n ${red}BIND_ADDRESS${default}\n\tIP address (v4/v6) to bind the listening server to (e.g. '$script_name -l 0.0.0.0')\n\tDefault value: ${red}${DEFAULT_SERVER_BINDADDR_v4} (IPv4) or ${DEFAULT_SERVER_BINDADDR_v6} (IPv6)${default}" \ "\n\n ${red}BIND_PORT${default}\n\tTCP Port to bind the listening server to (e.g. '$script_name -l 12345')\n\tDefault value: ${red}${DEFAULT_SERVER_BINDPORT}${default}" \ "\n\n ${red}BIND_ADDRESS${default} ${red}BIND_PORT${default}\n\tIP address and port to bind the listening server to (e.g. '$script_name -l ::1 12345')" \ "\n\n ${red}-v (verbose)\n\t${default}Enable verbose output and debug messages in server mode${default}" \ "\n\n ${red}--allow host[,host,...]\n\t${default}Allow only given hosts to connect to the server${default}" \ "\n\n ${red}--allowfile file\n\t${default}A file of hosts allowed to connect to the server${default}" \ "\n\n ${red}--deny host[,host,...]\n\t${default}Deny given hosts from connecting to the server${default}" \ "\n\n ${red}--denyfile file\n\t${default}A file of hosts denied from connecting to the server${default}" \ "\n\n ${red}-m, --max-conns \n\t${default}The maximum number of simultaneous connections accepted by the server. 100 is the default.${default}" \ "\n\n\n Note: Every option in server mode (after -l) is passed directly to the ncat listener." \ "\n Refer to ${blue}man ncat${default} for more details on the available commands." \ "\n Unless specified, the default IP:PORT values of ${DEFAULT_SERVER_BINDADDR_v4}:${DEFAULT_SERVER_BINDPORT} (for IPv4) or [${DEFAULT_SERVER_BINDADDR_v6}]:${DEFAULT_SERVER_BINDPORT} (for IPv6) will be used (e.g. 'asn -l')" \ "\n\n Example server usage:" \ "\n\t${blue}asn -l${default}" \ "\n\t (starts server on default IP(v4/v6):PORT)\n" \ "\n\t${blue}asn -l 0.0.0.0 --allow 192.168.0.0/24,192.168.1.0/24,192.168.2.245${default}" \ "\n\t (binds to all availables IPv4 interfaces on the default port, allowing only connections from the three specified subnets)\n" \ "\n\t${blue}asn -l :: 2222 --allow 2001:DB8::/32${default}" \ "\n\t (binds to all availables IPv6 interfaces on port 2222, allowing only connections from the specified prefix)\n" \ "\n\t${blue}asn -v -l 0.0.0.0 --allowfile \"~/goodips.txt\" -m 5${default}" \ "\n\t (verbose mode, bind to all IPv4 interfaces, use an allowfile with allowed addresses, accept a maximum of 5 concurrent connections)\n" \ "\n Bookmarklet configuration page:" \ "\n\tplease visit ${blue}http://127.0.0.1:49200/asn_bookmarklet${default} and follow the instructions. More documentation is available on github (link below)." \ "\n\n\nProject homepage: ${yellow}https://github.com/nitefood/asn${default}\n" >&2 [[ -n "$1" ]] && PrintErrorAndExit "$1" } PwhoisLookup(){ StatusbarMessage "Collecting pWhois data" pwhois_output=$(whois -h whois.pwhois.org "$1") StatusbarMessage if echo "$pwhois_output" | grep -vq "That IP address doesn't appear"; then # pwhois_asn=$(echo "$pwhois_output" | grep -E "^Origin-AS" | cut -d ':' -f 2 | sed 's/^ //') # pwhois_prefix=$(echo "$pwhois_output" | grep -E "^Prefix" | cut -d ':' -f 2 | sed 's/^ //') pwhois_asorg=$(echo "$pwhois_output" | grep -E "^AS-Org-Name" | cut -d ':' -f 2 | sed 's/^ //') # group all "Org-Name" fields on a single line pwhois_org=$(echo "$pwhois_output" | grep -E "^Org-Name" | cut -d ':' -f 2 | sed 's/^[ \t]*//g' | while read -r line; do echo -n "$line / "; done | sed 's/ \/ $//') pwhois_net=$(echo "$pwhois_output" | grep -E "^Net-Name" | cut -d ':' -f 2 | sed 's/^ //') # if pWhois' Net-Name=Org-Name, then it's more useful to use AS-Org-Name instead of Org-Name (unless AS-Org-Name is empty) if [ -n "$pwhois_asorg" ] && [ "$pwhois_net" = "$pwhois_org" ]; then pwhois_org="$pwhois_asorg" fi IPGeoRepLookup "$1" IPShodanLookup "$1" pwhois_geo="$ip_geo_data" if [ -z "$ip_geo_data" ]; then if echo "$pwhois_output" | grep -q -E "^Geo-"; then # use "Geo-" fields in pWhois output cityfield="Geo-City" regionfield="Geo-Region" ccfield="Geo-CC" else cityfield="City" regionfield="Region" ccfield="Country-Code" fi pwhois_city=$(echo "$pwhois_output" | grep -m1 -E "^${cityfield}" | cut -d ':' -f 2 | sed 's/^ //') pwhois_region=$(echo "$pwhois_output" | grep -m1 -E "^${regionfield}" | cut -d ':' -f 2 | sed 's/^ //') pwhois_cc=$(echo "$pwhois_output" | grep -m1 -E "^${ccfield}" | cut -d ':' -f 2 | sed 's/^ //') flag_icon_cc=$(tr '[:upper:]' '[:lower:]' <<<"$pwhois_cc") if [ "$pwhois_city" = "NULL" ] || [ "$pwhois_region" = "NULL" ]; then pwhois_geo="$pwhois_cc" else pwhois_geo="$pwhois_city, $pwhois_region ($pwhois_cc)" fi fi else pwhois_output=""; fi } RdnsLookup(){ # reverse DNS (PTR) lookup. # get first lookup result only (in case of multiple PTR records) and remove trailing dot and CR (Cygwin) from hostname rdns=$(host "$1" | awk 'NR==1{sub(/\.\r?$/, "", $NF); print $NF}') if echo "$rdns" | grep -E -q "NXDOMAIN|SERVFAIL|REFUSED|^record$"; then rdns=""; fi echo "$rdns" } AbuseLookupForPrefix(){ # $1="whois data", $2=prefix if [ -n "$mtr_output" ] && [ "$DETAILED_TRACE" = false ]; then # skip abuse lookup for individual trace hops in non-detailed mode return fi whoisdata="$1" prefix="$2" abuselist="" for abusecontact in $(echo -e "$whoisdata" | grep -E "^OrgAbuseEmail:|^abuse-c:|^% Abuse|^abuse-mailbox:" | awk '{print $NF}' | tr -d \'); do if ! grep -q '@' <<<"$abusecontact"; then # the currently parsed abuse contact, found in whois data, is not an email (but likely a NIC handle), fall back to RIPE API resolvedabuse=$(docurl -m5 -s "https://stat.ripe.net/data/abuse-contact-finder/data.json?resource=$2&sourceapp=nitefood-asn" | jq -r 'select (.data.abuse_contacts != null) | .data.abuse_contacts[]') if ! grep -q '@' <<<"$resolvedabuse"; then # RIPE API didn't give back an email, second and last fall back to DSHIELD API dshield_abuse_contact=$(docurl -m15 -s --user-agent "nitefood/asn" https://isc.sans.edu/api/ip/$2?json | jq -r 'select (.ip.asabusecontact != null) | .ip.asabusecontact') if grep -q '@' <<<"$dshield_abuse_contact"; then resolvedabuse="$dshield_abuse_contact" fi fi [[ -n "$resolvedabuse" ]] && abusecontact="$resolvedabuse" fi [[ -n "$abuselist" ]] && abuselist+="\n" abuselist+="$abusecontact" done if [ "$JSON_OUTPUT" = true ]; then # json output if [ -n "$abuselist" ]; then echo -e "$abuselist" | jq -cM --slurp --raw-input 'split("\n") | map(select(length > 0)) | unique' fi else # normal output if [ -n "$abuselist" ]; then # use jq to join contacts together and separate them with the " / " multi-character delimiter, while getting rid of spurious newlines echo -e "$abuselist" | jq -r --slurp --raw-input 'split("\n") | map(select(length > 0)) | unique | join(" / ")' else echo "-" fi fi } HopPrint(){ StatusbarMessage # shellcheck disable=SC2124 output="$@" # create hyperlinks if running in server mode if [ "$IS_ASN_CHILD" = true ]; then [[ -z "$ixp_tag" ]] && htmlcolor="$htmlwhite" || htmlcolor="$htmlblue" # extract and trim duplicate IPs from the trace output line found_ips=$(grep -Eo "${ipv4v6regex}" <<<"$output" | sort -u) if [ -n "$found_ips" ]; then for ip in $found_ips; do # replace only the last IP in the trace output, so to handle special cases # where the trace hop PTR contains the full dotted IP, ex: # 1.2.3.4.domain.name (1.2.3.4) -> 1.2.3.4.domain.name () output=$(sed "s/\(.*\)$ip/\1$ip<\/a>/" <<<"$output") done fi fi echo -e "$output" StatusbarMessage "Analyzing collected trace output to ${bluebg}${host_to_trace}${lightgreybg}" } TraceASPath(){ starttime=$(date +%s) host_to_trace="$1" border_color="$lightblue" # attepmt to be as responsive as possible # ideal output width : 150 (size of headers in normal traces) # ideal border width : 100 (used in detailed traces only) if [ "$terminal_width" -ge 153 ]; then output_width=150 border_width=100 elif [ "$terminal_width" -ge 103 ]; then output_width=$((terminal_width-3)) border_width=100 else output_width=$((terminal_width-3)) border_width=$((terminal_width-3)) fi WhatIsMyIP if [ "$DETAILED_TRACE" = true ]; then headermsg="Detailed trace to $userinput" else headermsg="Trace to $userinput" fi BoxHeader "$headermsg" # check if we're trying to trace an IPv6 from an IPv4-only box echo "$host_to_trace" | grep -q ':' && is_ipv6=true || is_ipv6=false if [ "$is_ipv6" = true ] && [ "$HAVE_IPV6" = false ]; then PrintErrorAndExit "Error: cannot trace an IPv6 from this IPv4-only host!" fi echo "" StatusbarMessage "Collecting trace data to ${bluebg}${host_to_trace}${lightgreybg}" # start the mtr trace DebugPrint "${yellow}mtr → $host_to_trace ($MTR_ROUNDS rounds)${default}" mtr_output=$(mtr -C -n -c"$MTR_ROUNDS" "$host_to_trace" | tail -n +2) declare -a tracehops_array declare -a aspath_array # initialize the aspath array with our source AS if [ "$HAVE_IPV6" = true ]; then found_asn=$(docurl -s "https://stat.ripe.net/data/whois/data.json?resource=$local_wanip&sourceapp=nitefood-asn" | jq -r '.data.irr_records[0] | map(select(.key | match ("origin"))) | .[].value') WhoisASN "$found_asn" else LookupASNAndRouteFromIP "$local_wanip" fi if [ -z "$found_asn" ]; then found_asn="XXX" found_asname="(Unknown)" fi if [ "$IS_ASN_CHILD" = true ]; then aspatharray_asnvalue="$found_asn" else aspatharray_asnvalue="$found_asn" fi # Retrieve local AS CAIDA rank GetCAIDARank "$found_asn" if [ -n "$caida_asrank_text" ]; then aspath_entry=$(printf "${red}%-6s ${green}%s${default}${dim} •%s${dim}${default}" "$aspatharray_asnvalue" "$(echo "$found_asname" | cut -d ',' -f 1)" "$caida_asrank_text") else aspath_entry=$(printf "${red}%-6s ${green}%s${default}" "$aspatharray_asnvalue" "$(echo "$found_asname" | cut -d ',' -f 1)") fi aspath_array+=("$aspath_entry") # mtr finished, analyze and output results # print trace headers (only non-detailed trace) if [ "$DETAILED_TRACE" = false ]; then HopPrint "$(printf "${lightgreybg}%4s %-$((output_width-61))s %7s %13s %s ${default}" "Hop" "IP Address" "Loss%" "Ping avg" "AS Information")" fi LAST_HOP=false ROUTING_LOOP=false # parse mtr output (csv) hop_num=0 while true; do ((hop_num++)) IFS=',' read -ra cur_hop_data <<< "$(echo "$mtr_output" | head -n "$hop_num" | tail -n 1)" mtr_hopnum=${cur_hop_data[4]} [[ "$hop_num" -gt "$mtr_hopnum" ]] && break # we're past the last trace hop, quit the loop hop_ip=${cur_hop_data[5]} hop_loss=${cur_hop_data[6]%.*} hop_ping=$(echo "${cur_hop_data[10]}" | awk '{ printf ("%.1f\n", $1) }') if [ "$hop_loss" -ne 0 ]; then # color packet loss yellow if between 1% and 50%, or red if > 50% [[ "$hop_loss" -le 50 ]] && loss_color="$lightyellow" || loss_color="$lightred" if [ "$DETAILED_TRACE" = false ]; then hop_loss=$(printf "${loss_color}%5s" "$hop_loss") # packet loss position in normal path trace (spacing to fit the column alignment) else hop_loss="${loss_color}${hop_loss}" # packet loss position in detailed path trace (no spacing) fi fi tracehops_array["$hop_num"]="$hop_ip" ixp_tag="" hop_asn="" if [ "$hop_ip" = "???" ]; then # print no reply hop info if [ "$DETAILED_TRACE" = true ]; then trailing_line=$(printf '%.0s═' $(seq $((border_width-9-${#hop_ip}))) ) hop_output="$(printf "${border_color}╔═[${default}%3s. %s ${border_color}]%s╗${default}\n" "$hop_num" "$hop_ip" "$trailing_line")" hop_output+="\n ├${bluebg}RTT${default} ${white}* (No reply)${default}\n" hop_output+=" └${bluebg}LOS${default} ${hop_loss}%${default} packet loss\n" else hop_output="$(printf "%3s. %-$((output_width-60))s %6s${default} %13s %s" "$hop_num" "$hop_ip" "$hop_loss%" "*" "${white}(No reply)${default}")" fi HopPrint "${hop_output}" continue # jump to the next hop fi # do a reverse DNS lookup for the IP hostname=$(RdnsLookup "$hop_ip") # check for routing loops, if so fail immediately if [ "$hop_num" -ge 2 ]; then prev_hop=${tracehops_array[$(( hop_num-1 ))]} # Check disabled to avoid detecting routing loops in rare cases where the same hop appears twice in a row during the trace # if [ "$hop_ip" = "$prev_hop" ]; then # ROUTING_LOOP=true # break # fi fi if [ "$hop_num" -ge 4 ]; then # detect routing loops two_hops_ago=${tracehops_array[$(( hop_num-2 ))]} three_hops_ago=${tracehops_array[$(( hop_num-3 ))]} if [ "$hop_ip" = "$two_hops_ago" ] && [ "$prev_hop" = "$three_hops_ago" ]; then ROUTING_LOOP=true break fi fi # AS DATA lookup # check if it's the last hop [[ "$hop_ip" = "$host_to_trace" ]] && LAST_HOP=true LookupASNAndRouteFromIP "$hop_ip" # check if IP is a bogon if [ "$IS_BOGON" = true ]; then asn_data="$bogon_tag" pwhois_output="" else # Hop IP is not a bogon address. Proceed with lookup of hop data if [ "$UNANNOUNCED_PREFIX" = true ]; then # No data in the Team Cymru DB. Check to see if the ip is assigned to an IXP, or fall back to generic whois. WhoisIP "$hop_ip" >/dev/null # don't display this, we're being parsed off-screen hop_asn="${red}N/A (address not announced)${default}" hop_org="$pwhois_org" hop_net="$pwhois_net" [[ -n "$found_route" ]] && hop_net+=" ($found_route)" hop_typ="$ip_type_data" hop_cpe="$ip_shodan_cpe_data" hop_ports="$ip_shodan_ports_data" hop_tags="$ip_shodan_tags_data" hop_cve="$ip_shodan_cve_data" hop_geo="$pwhois_geo" hop_rep="$ip_rep_data" if [ -n "$ixp_data" ]; then # this hop is an IXP ixp_tag="${bluebg} IXP ${default}" asn_data="${ixp_tag} ${blue}${ixp_data}${default}" hop_org="${ixp_tag} ${blue}${ixp_data}${default}" aspath_entry=$(printf "%-6s ${blue}%s${default}" "$ixp_tag" "$ixp_data") aspath_array+=("$aspath_entry") else # no data found and not an IXP hop, try retrieving relevant info from a generic whois lookup hop_whois_data=$(echo -e "$full_whois_data" | grep -i -m2 -E "^netname:|^orgname:|^org-name:|^descr:" | cut -d ':' -f 2 | sed 's/^[ \t]*//' | while read -r line; do echo -n "$line / "; done | sed 's/ \/ $//') if [ -z "$hop_whois_data" ]; then asn_data="${yellow}(No data)${default}" else asn_data="${yellow}(${hop_whois_data})${default}" fi fi else # $hop_ip belongs to an announced prefix if [ "$IS_ASN_CHILD" = true ]; then asn_data="${red}[AS$found_asn] ${green}$found_asname${default}" aspatharray_asnvalue="$found_asn" else asn_data="${red}[AS$found_asn] ${green}$found_asname${default}" aspatharray_asnvalue="$found_asn" fi # Retrieve AS CAIDA rank for this hop GetCAIDARank "$found_asn" if [ -n "$caida_asrank_text" ]; then aspath_entry=$(printf "${red}%-6s ${green}%s${default}${dim} •%s${dim}${default}" "$aspatharray_asnvalue" "$(echo "$found_asname" | cut -d ',' -f 1)" "$caida_asrank_text") else aspath_entry=$(printf "${red}%-6s ${green}%s${default}" "$aspatharray_asnvalue" "$(echo "$found_asname" | cut -d ',' -f 1)") fi # avoid adding the same AS multiple times in a row in the summary path if [[ "${aspath_array[-1]}" != "$aspath_entry" ]]; then aspath_array+=("$aspath_entry") fi fi if [ "$DETAILED_TRACE" = true ] && [ "$UNANNOUNCED_PREFIX" = false ]; then hop_asn="$found_asn" hop_prefix="$found_route" # run a pWhois lookup if the hop is within an announced prefix PwhoisLookup "$hop_ip" # in the event where Cymru has data, but pWhois doesn't, run a WhoisIP to fetch generic whois info # specifying the "generic_whois_lookup_only" parameter in order not to run pWhois/IXP lookups again [[ -z "$pwhois_output" ]] && WhoisIP "$hop_ip" "generic_whois_lookup_only" >/dev/null # don't display this, we're being parsed off-screen hop_org="$pwhois_org" hop_net="$pwhois_net" hop_typ="$ip_type_data" hop_cpe="$ip_shodan_cpe_data" hop_ports="$ip_shodan_ports_data" hop_tags="$ip_shodan_tags_data" hop_cve="$ip_shodan_cve_data" hop_geo="$pwhois_geo" hop_rep="$ip_rep_data" fi fi # DNS data (only used if a hostname was resolved) if [ -n "$hostname" ] && [ ! "$hostname" = "-" ]; then hop_ip="$hostname ($hop_ip)" fi # IXP coloring [[ -z "$ixp_tag" ]] && hop_ip="${white}${hop_ip}${default}" || hop_ip="${blue}${hop_ip}${default}" # print trace hop info if [ "$DETAILED_TRACE" = false ]; then hop_output="$(printf "%3s. %-$((output_width-45))s %6s${default} %10s ms %s" "$hop_num" "${hop_ip}" "$hop_loss%" "${hop_ping}" "$asn_data")" else trailing_line=$(printf '%.0s═' $(seq $((border_width+6-${#hop_ip}))) ) hop_output="$(printf "${border_color}╔═[${default}%3s. %s ${border_color}]%s╗${default}\n" "$hop_num" "${hop_ip}" "$trailing_line")" hop_output+="\n ├${bluebg}RTT${default} ${white}${hop_ping} ms${default}\n" hop_output+=" ├${bluebg}LOS${default} ${hop_loss}%${default} packet loss${default}" # only add ASN data when not an IXP, otherwise we'll have duplicate data when ORG gets printed later if [ -z "$ixp_tag" ]; then # if it's a bogon address it's going to be the last branch of the displayed tree if [ "$IS_BOGON" = true ]; then hop_output+="\n └${bluebg}TYP${default} $asn_data${default}" else hop_output+="\n ├${bluebg}ASN${default} $asn_data${default}" fi fi fi HopPrint "$hop_output" if [ "$DETAILED_TRACE" = true ] && [ -n "$hop_asn" ]; then if [ -n "$found_asname" ]; then # only run RPKI lookups if the prefix is announced RPKILookup "$found_asn" "$hop_prefix" if [ "$INVALID_ROA" = true ]; then # notify user of possible BGP hijack/route leak aspath_array[-1]+=" ${redbg} ─> WARNING: POSSIBLE ROUTE LEAK / BGP HIJACK ${default}" fi else rpki_output="${red}N/A (hop address not announced)${default}" fi # compose hop detail output # 1. hop ASN, ORG, NET, ROA hop_details="" [[ "$DETAILED_TRACE" = false ]] && hop_details=" ├${bluebg}ASN${default} ${red}${hop_asn}${default}\n" [[ "$ixp_tag" = "" ]] && hop_details+=" ├${bluebg}RNK${default} ${caida_asrank_recap}${default}\n" hop_details+=" ├${bluebg}ORG${default} ${green}${hop_org}${default}\n" hop_details+=" ├${bluebg}NET${default} ${yellow}${hop_net}${default}\n" hop_details+=" ├${bluebg}ROA${default} ${rpki_output}\n" # 2. hop TYP (optional, only if hop is a particular IP type (anycast/hosting/etc)) [[ -n "$hop_typ" ]] && hop_details+=" ├${bluebg}TYP${default}${hop_typ}${default}\n" # 3. hop CPE, PORTS, TAGS, CVE [[ -n "$hop_cpe" ]] && hop_details+=" ├${bluebg}CPE${default}${hop_cpe}${default}\n" [[ -n "$hop_ports" ]] && hop_details+=" ├${bluebg}POR${default}${hop_ports}${default}\n" [[ -n "$hop_tags" ]] && hop_details+=" ├${bluebg}TAG${default}${hop_tags}${default}\n" [[ -n "$hop_cve" ]] && hop_details+=" ├${bluebg}CVE${default}${hop_cve}${default}\n" # 4. hop GEO, REP hop_details+=" ├${bluebg}GEO${default} ${magenta}${hop_geo}${default}\n" hop_details+=" └${bluebg}REP${default} ${hop_rep}${default}" [[ "$DETAILED_TRACE" = false ]] && hop_details+="\n" # display hop details on screen HopPrint "$hop_details" [[ "$DETAILED_TRACE" = true ]] && HopPrint "\n" elif [ "$DETAILED_TRACE" = true ]; then # PWHOIS lookups ON, but no valid hop data (e.g. no-reply hop). Just add a newline [[ "$DETAILED_TRACE" = true ]] && HopPrint "\n" fi [[ "$LAST_HOP" = true ]] && break done # mtr output parsing complete if [ "$LAST_HOP" = false ]; then # last hop wasn't our target IP. Add a missing last hop to the trace. mtr_end_msg="${redbg} no route to host" [[ "$ROUTING_LOOP" = true ]] && mtr_end_msg+=" (routing loop detected)" mtr_end_msg+=" ${default}" # print hop info with final error message if [ "$DETAILED_TRACE" = false ]; then HopPrint "$(printf "%3s. %-$((output_width-40))s ${lightred}%11s%%${default} %13s %s" "$hop_num" "$mtr_end_msg" "100" "*" "${white}(No reply)${default}")" else upper_border="${border_color}╔$(printf '%.0s═' $(seq "$border_width"))╗${default}" lower_border="${border_color}╚$(printf '%.0s═' $(seq "$border_width"))╝${default}" HopPrint "${upper_border}\n ${mtr_end_msg}\n${lower_border}" fi aspath_array+=("${mtr_end_msg}${default}") fi endtime=$(date +%s) runtime=$((endtime-starttime)) StatusbarMessage if [ "$IS_ASN_CHILD" = true ]; then # date and time is already displayed elsewhere in server mode echo -e "\nTrace completed in $runtime seconds.\n" else tracetime=$(date +'%F %T %Z') echo -e "\nTrace completed in $runtime seconds on $tracetime\n" fi BoxHeader "AS path to $userinput" echo -en "\n " for as in "${aspath_array[@]}"; do if [ "$as" = "${aspath_array[0]}" ]; then echo -en "${as} ${yellowbg} Local AS ${default}" else echo -en "${as}${default}" fi if [ "$as" != "${aspath_array[-1]}" ]; then echo -en "\n ╭╯\n ╰" fi done echo -e "\n" } SearchByOrg(){ unset orgs declare -a orgs echo "" if [ "$ORG_FILTER" = false ]; then StatusbarMessage "Searching for organizations matching ${bluebg}$1${lightgreybg}" full_org_search_data=$(whois -h whois.pwhois.org "registry org-name=$1") original_organizations=$(echo -e "$full_org_search_data" | grep -E "^Org-Name:" | cut -d ':' -f 2- | sed 's/^ //g' | sort -uf) total_orgsearch_results=$(echo -e "$original_organizations" | wc -l) organizations="$original_organizations" else # user chose to apply a search filter to a previous query if [ ${#orgfilters_array[@]} -eq 0 ] && [ ${#excl_orgfilters_array[@]} -eq 0 ]; then # user deleted all search filters. Revert to original query result organizations="$original_organizations" ORG_FILTER=false else StatusbarMessage "Applying filters" filtered_org="$original_organizations" # parse all inclusion filters for filter in "${orgfilters_array[@]}"; do apply_filter=$(echo -e "$filtered_org" | grep -i -- "$filter") if [ -z "$apply_filter" ]; then StatusbarMessage echo -en "${yellow}Warning: No results found for ${bluebg}${filter}${default}" sleep 2 # remove last filter term unset 'orgfilters_array[${#orgfilters_array[@]}-1]' else filtered_org="$apply_filter" fi done # parse all exclusion filters for filter in "${excl_orgfilters_array[@]}"; do apply_filter=$(echo -e "$filtered_org" | grep -i -v -- "$filter") if [ -z "$apply_filter" ]; then StatusbarMessage echo -en "${yellow}Warning: No more results found if excluding ${bluebg}${filter}${default}" sleep 2 # remove last filter term unset 'excl_orgfilters_array[${#excl_orgfilters_array[@]}-1]' else filtered_org="$apply_filter" fi done # have we removed all filters (because of no matches)? go back to unfiltered results if [ ${#orgfilters_array[@]} -eq 0 ] && [ ${#excl_orgfilters_array[@]} -eq 0 ]; then ORG_FILTER=false echo "" fi organizations="$filtered_org" fi fi for orgname in $organizations; do orgs+=("$orgname") done StatusbarMessage if [ ${#orgs[@]} -eq 0 ]; then # company search yielded no results PrintErrorAndExit "Error: no organizations found" fi # Menu showing loop while true; do ShowMenu searchresults="" [[ "$LOOKUP_ALL_RESULTS" == true ]] && orgs_to_lookup=("${orgs[@]}") || orgs_to_lookup=("$org") for org in "${orgs_to_lookup[@]}"; do orgids=$(echo -e "$full_org_search_data" | grep -i -E -B1 "Org-Name: $org$" | grep "Org-ID" | cut -d ':' -f 2- | sed 's/^ //g') NO_ERROR_ON_INTERRUPT=true for ipversion in 4 6; do NO_RESULTS=true searchresults+=$(BoxHeader "IPv${ipversion} networks for organization \"${org}\"") # iterate over Org-IDs related to the company (in case of multiple Org-IDs for a single Org-Name) for orgid in $orgids; do StatusbarMessage "Looking up IPv${ipversion} networks for organization ${bluebg}$org${lightgreybg} (Org-ID: ${bluebg}${orgid}${lightgreybg})" netblocks_output="" if [ "$ipversion" = "4" ]; then # Parse IPv4 NETBLOCKS netblocks=$(whois -h whois.pwhois.org "netblock org-id=${orgid}" | grep -E "^\*>") netblocks_header=" IPv4 NET RANGE | INFO" for netblock in $netblocks; do prefix=$(echo -e "$netblock" | cut -d '>' -f 2 | cut -d '|' -f 1) netname=$(echo -e "$netblock" | cut -d '>' -f 2 | cut -d '|' -f 2 | tr -d ' ') netblock_type=$(echo -e "$netblock" | cut -d '>' -f 2 | cut -d '|' -f 3 | tr -d ' ') if [ "$netblock_type" = "unknown" ]; then nettype="" else nettype=" (${yellow}$netblock_type${default})" fi regdate=$(echo -e "$netblock" | cut -d '>' -f 2 | cut -d '|' -f 4 | tr -d ' ') if [ "$HAVE_IPCALC" = true ]; then # deaggregate IPv4 netblocks into CIDR prefixes for readability prefix_spacing=19 trimmed_prefix=$(echo "$prefix" | tr -d ' ') prefix=$(IpcalcDeaggregate "$trimmed_prefix") else # no ipcalc, use direct pWhois output prefix_spacing=41 fi netblocks_output+=$(printf "\n${blue}%${prefix_spacing}s${default} | ${green}%-45s${default} - Registered: ${magenta}%s${default}%s" "$prefix" "$netname" "$regdate" "$nettype") done [[ "$HAVE_IPCALC" = true ]] && netblocks_header=" IPv4 PREFIX | INFO" else # Parse IPv6 NETBLOCKS netblocks=$(whois -h whois.pwhois.org "netblock6 org-id=${orgid}" | grep -E "^Net-(Range|Name|Handle|Type)|^Register-Date" |\ cut -d ':' -f 2- |\ sed 's/^ //g' |\ awk '{if (NR%5) {ORS=""} else {ORS="\n"}{print $0"|"}}') # cheers https://stackoverflow.com/a/35315421/5377165 netblocks_header=" IPv6 NET RANGE | INFO" for netblock in $netblocks; do prefix=$(echo -e "$netblock" | cut -d '|' -f 1) netname=$(echo -e "$netblock" | cut -d '|' -f 2) nethandle=$(echo -e "$netblock" | cut -d '|' -f 3) netname+=" (${nethandle})" netblock_type=$(echo -e "$netblock" | cut -d '|' -f 4) if [ "$netblock_type" = "unknown" ]; then nettype="" else nettype=" (${yellow}$netblock_type${default})" fi regdate=$(echo -e "$netblock" | cut -d '|' -f 5) prefix_spacing=61 netblocks_output+=$(printf "\n${blue}%${prefix_spacing}s${default} | ${green}%-45s${default} - Registered: ${magenta}%s${default}%s" "$prefix" "$netname" "$regdate" "$nettype") done fi if [ -n "$netblocks_output" ]; then # Print out netblocks NO_RESULTS=false searchresults+=$(echo -e "\n${red}Org-ID: ${magenta}${orgid}${red}${default}\n${netblocks_header}${netblocks_output}") searchresults+="\n" fi done [[ "$NO_RESULTS" = true ]] && searchresults+="\n\t${red}No results found${default}\n" done done NO_ERROR_ON_INTERRUPT=false StatusbarMessage echo -e "$searchresults\n${yellow}────────────────────────────────────────────────────${default}" # let the user choose if they want to run a quick IP lookup while true; do echo -e "\n- Enter any ${blue}IP/Prefix${default} to look it up or" echo -e "- Press ${yellow}ENTER${default} to return to the menu:\n" echo -n ">> " read -r choice # check if it's an IPv4/IPv6 if [ -n "${choice}" ]; then input=$(echo "$choice" | sed 's/\/.*//g' | grep -Eo "$ipv4v6regex") if [ -n "$input" ]; then # valid IP echo "" StatusbarMessage "Looking up data for ${bluebg}${input}${lightgreybg}" LookupASNAndRouteFromIP "$input" (( longest=${#input}+1 )) WhoisIP "$input" PrintReputationAndShodanData "$input" StatusbarMessage continue else continue fi else # user pressed ENTER, go back to main organizations menu clear break fi done done } ShowMenu(){ # show selection menu for search-by-company results clear BoxHeader "Organizations matching \"$userinput\"" if [ "$ORG_FILTER" = true ]; then num_inclusion_filters="${#orgfilters_array[@]}" num_exclusion_filters="${#excl_orgfilters_array[@]}" num_filters=$(( num_inclusion_filters+num_exclusion_filters )) [[ $num_filters = 1 ]] && s="" || s="s" ACTIVE_FILTERS_STRING=$'\n'"${bluebg}${black}${num_filters} Active filter${s}:${default}" # recap inclusion filters for filter in "${orgfilters_array[@]}"; do ACTIVE_FILTERS_STRING+=" ${lightgreybg}${filter}${default}" done # recap exclusion filters for filter in "${excl_orgfilters_array[@]}"; do ACTIVE_FILTERS_STRING+=" ${lightgreybg}${red}-${filter}${default}" done ACTIVE_FILTERS_STRING+=$'\n' else ACTIVE_FILTERS_STRING="" fi if [ "$HAVE_IPCALC" = true ]; then IPCALC_WARNING="" else IPCALC_WARNING=$'\n'"${yellow}Warning: program ${red}ipcalc${yellow} not found."$'\n'"Install it to enable netblock→CIDR"$'\n'"prefix aggregation.${default}"$'\n' fi PS3="${yellow}────────────────────────────────────────────────────${default} $ACTIVE_FILTERS_STRING ${yellow}${#orgs[@]} of $total_orgsearch_results total results shown${default} Choose an organization or enter: - <${green}text${default}> to FILTER FOR A STRING - <${blue}-${default}> to EXCLUDE A STRING - <${blue}x${default}> to REMOVE ALL FILTERS - <${blue}a${default}> to LOOKUP ALL RESULTS (max 10) - <${blue}q${default}> to QUIT $IPCALC_WARNING >> " echo -e "${yellow}────────────────────────────────────────────────────${green}" COLUMNS=1 set -o posix select choice in "${orgs[@]}"; do for org in "${orgs[@]}"; do if [[ "$org" = "$choice" ]]; then LOOKUP_ALL_RESULTS=false break 2 fi done case "$REPLY" in "q"|"Q") echo "" exit 0 ;; "-") # add an exclusion filter echo -n "Enter a string to ${red}exclude${default}: " read -r exclusion_string excl_orgfilters_array+=("$exclusion_string") ORG_FILTER=true SearchByOrg ;; "x"|"X") # reset filters if [ "$ORG_FILTER" = true ]; then unset orgfilters_array unset excl_orgfilters_array declare -a orgfilters_array declare -a excl_orgfilters_array SearchByOrg fi ;; "a"|"A") # lookup all results if [ "${#orgs[@]}" -gt 10 ]; then echo -en "\n${redbg}Too many results! Please add some filters!${default}\n" sleep 2 continue fi LOOKUP_ALL_RESULTS=true break ;; *) # apply filter to the results orgfilters_array+=("$REPLY") ORG_FILTER=true SearchByOrg ;; esac done set +o posix echo "" } PrintReputationAndShodanData(){ # We already collected reputation and Shodan data since we called IPGeoRepLookup() and IPShodanLookup() previously if [ "$JSON_OUTPUT" = true ]; then # JSON output final_json_output+=",\"reputation\":{" [[ -n "$json_rep" ]] && final_json_output+="\"status\":\"$json_rep\"" [[ -n "$json_iqs_threat_score" ]] && final_json_output+="$json_iqs_threat_score" [[ "$gn_noisy" = "true" ]] && final_json_output+=",\"is_noisy\":$gn_noisy" [[ -n "$gn_json_is_knowngood" ]] && final_json_output+=",\"is_known_good\":$gn_json_is_knowngood" [[ -n "$gn_json_is_knownbad" ]] && final_json_output+=",\"is_known_bad\":$gn_json_is_knownbad" [[ -n "$gn_json_aka" ]] && final_json_output+=",\"known_as\":\"$gn_json_aka\"" [[ -n "$json_iqs_threat_tags" ]] && final_json_output+="$json_iqs_threat_tags" final_json_output+="},\"fingerprinting\":{" [[ -n "$shodan_cpes_json_output" ]] && final_json_output+="\"cpes\":$shodan_cpes_json_output," [[ -n "$shodan_ports_json_output" ]] && final_json_output+="\"ports\":$shodan_ports_json_output," [[ -n "$shodan_tags_json_output" ]] && final_json_output+="\"tags\":$shodan_tags_json_output," [[ -n "$shodan_vulns_json_output" ]] && final_json_output+="\"vulns\":$shodan_vulns_json_output," # truncate last comma [[ ${final_json_output: -1} = "," ]] && final_json_output=${final_json_output::-1} final_json_output+="}" final_json_output+="}" else # normal output [[ -n "$ip_shodan_cpe_data" ]] && printf "${white}%${indent}s${bluebg}CPE${default}%s\n" "├" "${ip_shodan_cpe_data}" [[ -n "$ip_shodan_ports_data" ]] && printf "${white}%${indent}s${bluebg}POR${default}%s\n" "├" "${ip_shodan_ports_data}" [[ -n "$ip_shodan_tags_data" ]] && printf "${white}%${indent}s${bluebg}TAG${default}%s\n" "├" "${ip_shodan_tags_data}" [[ -n "$ip_shodan_cve_data" ]] && printf "${white}%${indent}s${bluebg}CVE${default}%s\n" "├" "${ip_shodan_cve_data}" printf "${white}%${indent}s${bluebg}REP${default} ${magenta}%s${default}\n\n" "└" "$ip_rep_data" fi } ShodanRecon(){ shodan_starttime=$(date +%s) shodan_json_output="" curloutput="" urllist="" # identify target(s) type targetlist=$(echo -e "$userinput" | tr ' ' '\n') target_count=$(echo -e "$targetlist" | wc -l) if [ "$target_count" -gt 1 ]; then # user passed multiple targets "e.g. asn -s 1.1.1.1 8.8.8.8" userinput="multiple targets" fi BoxHeader "Shodan scan for $userinput" for target in $targetlist; do input=$(echo "$target" | sed 's/\/.*//g' | grep -Eo "$ipv4v6regex") if [ -z "$input" ]; then # Input is not an IP Address. See if it's a hostname (includes at least one dot) if echo "$target" | grep -q "\."; then target=$(awk -F[/:] '{gsub(".*//", ""); gsub(".*:.*@", ""); print $1}' <<<"$target") target=$(ResolveHostnameToIPList "$target") [[ -z "$target" ]] && continue # could not resolve hostname json_target_type="hostname" target=$(grep -v ":" <<<"$target") numips=$(wc -l <<<"$target") [[ $numips = 1 ]] && s="" || s="es" else # not an IP, not a hostname continue fi else json_target_type="ip" fi # prepare shodan InternetDB API URL list for every IP in the given CIDR range for ip in $target; do if grep -q ":" <<<"$ip"; then # Shodan has no data for IPv6 addresses continue fi [[ -n "$urllist" ]] && urllist+="\n" urllist+=$(nmap -sL -n "$ip" 2>/dev/null | awk '/Nmap scan report/{print "https://internetdb.shodan.io/"$NF}') # cheers https://stackoverflow.com/a/31412705 done done [[ -z "$urllist" ]] && PrintErrorAndExit "no valid targets found" numtargets=$(echo -e "$urllist" | wc -l) firsturl=1 while true; do saved_lasturl="$lasturl" lasturl=$(( firsturl + MAX_CONCURRENT_SHODAN_REQUESTS )) # check if concurrent Shodan requests setting is > number of IPs yet to scan # if so, perform a batch lookup with all the urls and break. Otherwise # perform max concurrent lookups, sleep and continue if [ "$lasturl" -ge "$numtargets" ]; then lasturl="$numtargets" LAST_BATCH=true fi urlbatch=$(echo -e "$urllist" | awk "NR==$firsturl,NR==$lasturl") StatusbarMessage "Collecting data for IPs ${bluebg}${firsturl}-${lasturl}${lightgreybg} of ${bluebg}$numtargets${lightgreybg} total ($MAX_CONCURRENT_SHODAN_REQUESTS threads)" DebugPrint "${yellow}curl {$(echo -e "$urlbatch" | tr '\n' ',')}${default}" batchoutput=$( xargs -P "$MAX_CONCURRENT_SHODAN_REQUESTS" -n 1 curl -s < <(echo -e "$urlbatch") ) if grep -q "Rate limit exceeded" <<<"$batchoutput"; then # rate limit exceeded during current batch of queries, loop while waiting to continue retryafter=$(docurl -s -i "https://internetdb.shodan.io/127.0.0.1" | awk '/^retry-after:/ {print $2}' | tr -d '\r\n') while [[ ${retryafter} -gt 1 ]]; do StatusbarMessage "Shodan rate limit hit during batch ${bluebg}${firsturl}-${lasturl}${lightgreybg}, resuming in ${redbg}${retryafter}${lightgreybg}s" sleep 1 retryafter=$(( retryafter-1 )) done # retry last batch lasturl="$saved_lasturl" continue else curloutput+="$batchoutput" [[ "$LAST_BATCH" = true ]] && break # enable below to introduce a delay between batches of curl queries to Shodan (could help with rate limiting) # sleep .5 firsturl=$(( lasturl++ )) fi done StatusbarMessage # convert single API output results to array, delete IPs for which Shodan had no data available json=$(jq -cM --slurp 'del (.[] | select(.ip==null))' <<<"$curloutput") if [ "$json" = "[]" ]; then final_json_output="$json" PrintErrorAndExit "Shodan has no data for $userinput" fi shodan_json_output="$json" [[ "$JSON_OUTPUT" = true ]] && return shodan_endtime=$(date +%s) shodan_runtime=$((shodan_endtime-shodan_starttime)) StatusbarMessage "Parsing collected data" vulnlist="" hostnamelist="" portlist="" taglist="" cpelist="" total_hosts_with_vulns=0 total_hosts_with_ports=0 total_hosts_with_tags=0 total_hosts_with_cpe=0 total_hosts_with_hostnames=0 # find all IPs in the json having a non-null ".ip" attribute. It means Shodan has some data about that IP. interesting_iplist=$(echo -e "$json" | jq -r '.[].ip' | sort | uniq) # iterate over interesting IPs for ip in $interesting_iplist; do output+="\n${white}•${default} $ip" attribs=$(jq -r '.[] | select(.ip=="'"$ip"'")| to_entries | .[] | .key + "=" + (.value | @sh)' <<<"$json") numattrs=$(grep -Evc "=$|^ip=" <<<"$attribs"); # get number of populated attributes # "attribs" contains a list of attributes for a single IP, composed like this: # # cpes='cpename1' 'cpename2' # hostnames='abc.com' # ip='1.2.3.4' # ports=22 80 8080 # tags='vpn' # vulns='CVE-123' 'CVE-345' # iterate over resulting attributes. count=0 # will be used to decide which tree symbol to use (├ or └) # - hostnames ip_hostnames=$(grep -E "^hostnames=" <<<"$attribs" | cut -d '=' -f 2- | tr -d "'" | uniq) hostnamelist+=$(echo -e "\n$ip_hostnames" | tr ' ' '\n') if [ -n "$ip_hostnames" ]; then count=$(( count+1 )) [[ "$count" -eq "$numattrs" ]] && treesymbol="└" || treesymbol="├" ip_hostnames=$(echo -e "$ip_hostnames" | sed -e 's/ / • /g' -e "s/.\{136\}/&\n /g") total_hosts_with_hostnames=$(( total_hosts_with_hostnames+1 )) output+="\n\t${treesymbol}${bluebg} PTR ${default} ${white}$ip_hostnames${default}" fi # - cpes ip_cpes=$(grep -E "^cpes=" <<<"$attribs" | cut -d '=' -f 2- | tr -d "'" | uniq) # use more user-friendly names for some CPEs ip_cpes=$(echo -e "$ip_cpes" | \ sed -e 's,cpe:/a:apache:http_server,Apache-HTTPD,g' \ -e 's,cpe:/o:linux:linux_kernel,[O/S]-Linux,g' \ -e 's,cpe:/o:microsoft:windows,[O/S]-Microsoft-Windows,g' \ -e 's,cpe:/o:debian:debian_linux,[O/S]-Debian-Linux,g' \ -e 's,cpe:/a:microsoft:internet_information_services,Microsoft-IIS,g' \ -e 's,cpe:/a:microsoft:exchange_server,Microsoft-Exchange,g' \ -e 's,cpe:/a:openbsd:openssh,OpenSSH,g' \ -e 's,cpe:/a:igor_sysoev:nginx,Nginx,g' \ -e 's,cpe:/a:php:php,PHP,g' \ -e 's,cpe:/a:jquery:jquery,jQuery,g' \ -e 's,cpe:/a:getbootstrap:bootstrap,Bootstrap,g' \ -e 's,cpe:/a:pureftpd:pure-ftpd,Pure-FTPd,g' \ -e 's,cpe:/a:postfix:postfix,Postfix,g' \ -e 's,cpe:/a:openssl:openssl,Postfix,g' \ -e 's,cpe:/a:mysql:mysql,MySQL,g' \ ) cpelist+=$(echo -e "\n$ip_cpes" | tr ' ' '\n') if [ -n "$ip_cpes" ]; then count=$(( count+1 )) [[ "$count" -eq "$numattrs" ]] && treesymbol="└" || treesymbol="├" ip_cpes=$(echo -e "$ip_cpes" | sed -e 's/ / • /g' -e "s/.\{136\}/&\n /g") total_hosts_with_cpe=$(( total_hosts_with_cpe+1 )) output+="\n\t${treesymbol}${bluebg} CPE ${default} ${blue}$ip_cpes${default}" fi # - ports ip_ports=$(grep -E "^ports=" <<<"$attribs" | cut -d '=' -f 2- | uniq) portlist+=$(echo -e "\n$ip_ports" | tr ' ' '\n') if [ -n "$ip_ports" ]; then count=$(( count+1 )) [[ "$count" -eq "$numattrs" ]] && treesymbol="└" || treesymbol="├" ip_ports=$(echo -e "$ip_ports" | sed -e 's/ / • /g' -e "s/.\{136\}/&\n /g") total_hosts_with_ports=$(( total_hosts_with_ports+1 )) output+="\n\t${treesymbol}${bluebg} POR ${default} ${green}$ip_ports${default}" fi # - tags ip_tags=$(grep -E "^tags=" <<<"$attribs" | cut -d '=' -f 2- | tr -d "'" | uniq) taglist+=$(echo -e "\n$ip_tags" | tr ' ' '\n') if [ -n "$ip_tags" ]; then count=$(( count+1 )) [[ "$count" -eq "$numattrs" ]] && treesymbol="└" || treesymbol="├" ip_tags=$(echo -e "$ip_tags" | sed -e 's/ / • /g' -e "s/.\{136\}/&\n /g") total_hosts_with_tags=$(( total_hosts_with_tags+1 )) output+="\n\t${treesymbol}${bluebg} TAG ${default} ${yellow}$ip_tags${default}" fi # - vulns ip_vulns=$(grep -E "^vulns=" <<<"$attribs" | cut -d '=' -f 2- | tr -d "'" | uniq) vulnlist+=$(echo -e "\n$ip_vulns" | tr ' ' '\n') if [ -n "$ip_vulns" ]; then count=$(( count+1 )) [[ "$count" -eq "$numattrs" ]] && treesymbol="└" || treesymbol="├" # ip_vulns=$(echo -e "$ip_vulns" | sed -e 's/ / • /g') ip_vulns=$(echo -e "$ip_vulns" | sed -e 's/ / • /g' -e "s/.\{136\}/&\n /g") total_hosts_with_vulns=$(( total_hosts_with_vulns+1 )) output+="\n\t${treesymbol}${bluebg} CVE ${default} ${red}$ip_vulns${default}" fi output+="\n" done StatusbarMessage echo -e "$output" final_stats="${default}\n______________________________\n\n" final_stats+="${green}$total_hosts_with_ports exposed${default} host"; [[ "$total_hosts_with_ports" != "1" ]] && final_stats+="s"; final_stats+=" found" final_stats+="\n${blue}$total_hosts_with_cpe hosts${default} identified with a known CPE" final_stats+="\n${yellow}$total_hosts_with_tags tagged${default} host"; [[ "$total_hosts_with_tags" != "1" ]] && final_stats+="s"; final_stats+=" identified" final_stats+="\n$total_hosts_with_hostnames hostname"; [[ "$total_hosts_with_hostnames" != "1" ]] && final_stats+="s"; final_stats+=" discovered" final_stats+="\n${red}$total_hosts_with_vulns vulnerable${default} host"; [[ "$total_hosts_with_vulns" != "1" ]] && final_stats+="s"; final_stats+=" found" final_stats+="\n\n$numtargets host"; [[ "$numtargets" != "1" ]] && final_stats+="s"; final_stats+=" analyzed in $shodan_runtime seconds.\n" # display final statistics (non-JSON output mode only) BoxHeader "Statistics" # top N ports echo -e "\n${green}[TOP ${SHODAN_SHOW_TOP_N} Open Ports] \n" if [ -z "$portlist" ]; then # no ports found echo -e "${green} Nothing found${default}\n" else for port in $(echo -e "$portlist" | sort | grep -Ev '^$' | uniq -c | sort -rn | head -n "${SHODAN_SHOW_TOP_N}"); do porthits=$(awk '{print $1}' <<<"$port") portnum=$(awk '{print $2}' <<<"$port") portname=$(ResolveWellKnownPort "$portnum") [[ -n "$portname" ]] && portname="(${portname})" printf "%10s host(s) → Port %5s %s\n" "$porthits" "$portnum" "$portname" done echo -e "$default" fi # top N CPEs echo -e "${blue}[TOP ${SHODAN_SHOW_TOP_N} CPEs] \n" if [ -z "$cpelist" ]; then # no CPEs found echo -e "${blue} Nothing found${default}\n" else for cpe in $(echo -e "$cpelist" | sort | grep -Ev '^$' |uniq -c | sort -rn | head -n "${SHODAN_SHOW_TOP_N}"); do cpehits=$(awk '{print $1}' <<<"$cpe") cpefullname=$(awk '{print $2}' <<<"$cpe") cpetype=$(echo "$cpefullname" | cut -d ':' -f 2) cpename=$(echo "$cpefullname" | cut -d ':' -f 3-) case "${cpetype}" in "/a") type="APP" ;; "/o") type="O/S" ;; "/h") type="H/W" ;; *) type="" ;; esac [[ -n "$type" ]] && cpename="[$type] $cpefullname" || cpename="$cpefullname" printf "%10s host(s) → %s\n" "$cpehits" "$cpename" done echo -e "$default" fi # top N tags echo -e "${yellow}[TOP ${SHODAN_SHOW_TOP_N} Tags] \n" if [ -z "$taglist" ]; then # no tags found echo -e "${yellow} Nothing found${default}\n" else for tag in $(echo -e "$taglist" | sort | grep -Ev '^$' | uniq -c | sort -rn | head -n "${SHODAN_SHOW_TOP_N}"); do taghits=$(awk '{print $1}' <<<"$tag") tagname=$(awk '{print $2}' <<<"$tag") printf "%10s host(s) → %s\n" "$taghits" "$tagname" done echo -e "$default" fi # first N hostnames discovered echo -e "${white}[First ${SHODAN_SHOW_TOP_N} Hostnames discovered] \n" if [ -z "$hostnamelist" ]; then # no tags found echo -e "${yellow} Nothing found${default}\n" else for hostname in $(echo -e "$hostnamelist" | grep -Ev '^$' | head -n "${SHODAN_SHOW_TOP_N}"); do printf " %s\n" "$hostname" done echo -e "$default" fi # top N vulnerabilities echo -e "${red}[TOP ${SHODAN_SHOW_TOP_N} Vulnerabilities by number of occurrences] \n" StatusbarMessage "Identifying CVE score and severity for vulnerable hosts" cvestats_text="" if [ -z "$vulnlist" ]; then # no vulns found cvestats_text="${red} Nothing found${default}" else for cve in $(echo -e "$vulnlist" | sort | grep -Ev '^$' | uniq -c | sort -rn | head -n "${SHODAN_SHOW_TOP_N}"); do vulnhits=$(awk '{print $1}' <<<"$cve") cvenum=$(awk '{print $2}' <<<"$cve") cvejsondata=$(docurl -s "https://services.nvd.nist.gov/rest/json/cves/2.0?cveId=$cvenum") v3score=$(jq -r '.vulnerabilities[0].cve.metrics.cvssMetricV31[0].cvssData.baseScore | select(length>0)' <<<"$cvejsondata" 2>/dev/null) v3severity=$(jq -r '.vulnerabilities[0].cve.metrics.cvssMetricV31[0].cvssData.baseSeverity | select(length>0)' <<<"$cvejsondata" 2>/dev/null) v2score=$(jq -r '.vulnerabilities[0].cve.metrics.cvssMetricV2[0].cvssData.baseScore' <<<"$cvejsondata" 2>/dev/null) v2severity=$(jq -r '.vulnerabilities[0].cve.metrics.cvssMetricV2[0].baseSeverity' <<<"$cvejsondata" 2>/dev/null) cvename=$(jq -r '.vulnerabilities[0].cve.cisaVulnerabilityName | select(length>0)' <<<"$cvejsondata" 2>/dev/null) cvedesc=$(jq -r '.vulnerabilities[0].cve.descriptions[0].value | select(length>0)' <<<"$cvejsondata" 2>/dev/null) # apply formatting to cvedesc to fit the terminal width cvedesc_len=${#cvedesc} available_width=$(( terminal_width - 58 )); formatted_cvedesc="" if [ "$cvedesc_len" -gt "$available_width" ]; then # iterate over cvedesc until it fits in the terminal width, inserting newlines while [ "$cvedesc_len" -gt 0 ]; do wordbreak_pointer="$available_width" if [ "$cvedesc_len" -gt "$available_width" ]; then while [ "${cvedesc:$wordbreak_pointer:1}" != " " ] && [ "$wordbreak_pointer" -gt 0 ]; do ((wordbreak_pointer--)) done [[ "$wordbreak_pointer" -eq 0 ]] && wordbreak_pointer="$available_width" fi cvedesc_line=${cvedesc:0:$wordbreak_pointer} if [ -n "$formatted_cvedesc" ]; then formatted_cvedesc+="\n" formatted_cvedesc+=$(printf "${default}${red}%49s ${default}${dim}%s" "┆" "$cvedesc_line") else formatted_cvedesc="$cvedesc_line" fi ((wordbreak_pointer++)) cvedesc=${cvedesc:$wordbreak_pointer} cvedesc_len=${#cvedesc} done else formatted_cvedesc="$cvedesc" fi cvescore="" cveseverity="" if [ -n "$v3score" ] && [ -n "$v3severity" ]; then cvescore="$v3score" cveseverity="$v3severity" elif [ -n "$v2score" ] && [ -n "$v2severity" ]; then cvescore="$v2score" cveseverity="$v2severity" fi if [ "${#cvescore}" -eq 1 ]; then cvescore+=".0" elif [ "${#cvescore}" -eq 2 ]; then cvescore=" $cvescore" fi case "${cveseverity}" in "LOW") cvetext="${greenbg} ${cvescore} (LOW) ${default}${red}" ;; "MEDIUM") cvetext="${yellowbg} ${cvescore} (MEDIUM) ${default}${red}" ;; "HIGH") cvetext="${redbg} ${cvescore} (HIGH) ${default}${red}" ;; "CRITICAL") cvetext="${redbg} ${cvescore} (CRITICAL) ${default}${red}" ;; *) cvescore="N/A" cvetext="${lightgreybg} ${cvescore} (UNKNOWN) ${default}${red}" ;; esac cvestats_text+=$(printf "${red}%10s host(s) → %-15s %-14s" "$vulnhits" "$cvetext" "$cvenum") [[ -n "$cvename" ]] && cvestats_text+=" • ${dim}${cvename}${default}" cvestats_text+="\n" cvestats_text+=$(printf "${red}%49s Desc : ${default}${dim}%s${default}" "├" "$formatted_cvedesc") cvestats_text+="\n" cvestats_text+=$(printf "${red}%49s Info : ${blue}${dim}%s${default}" "└" "https://nvd.nist.gov/vuln/detail/$cvenum") cvestats_text+="\n\n" done fi StatusbarMessage echo -e "$cvestats_text" echo -e "$final_stats" } BulkGeolocate(){ # identify target(s) type targetlist_allips=$(echo -e "$userinput" | grep -Eo "$ipv4v6regex" | sort) top10_ipv4=$(grep -v ":" <<<"$targetlist_allips" | uniq -c | sort -rn | head -n 10) top10_ipv6=$(grep ":" <<<"$targetlist_allips" | uniq -c | sort -rn | head -n 10) targetlist=$(uniq <<<"$targetlist_allips") if [ -n "$targetlist" ]; then target_count=$(echo -e "$targetlist" | wc -l) else PrintErrorAndExit "no valid targets found" fi if [ "$target_count" -gt 1 ]; then # user passed multiple targets "e.g. asn -g 1.1.1.1 8.8.8.8" userinput="multiple targets" fi BoxHeader "Geolocation lookup for $userinput" # TODO switch to ipinfo.io for bulk geolocation lookups when a token is present (free tier does not support batch queries) geolocation_json_output="" countrylist="" firstip=1 while true; do lastip=$(( firstip + 99 )) if [ "$lastip" -ge "$target_count" ]; then lastip="$target_count" LAST_BATCH=true fi ipbatch=$(echo -en "$targetlist" | awk "NR==$firstip,NR==$lastip") StatusbarMessage "Collecting geolocation data for IPs ${bluebg}${firstip}-${lastip}${lightgreybg} of ${bluebg}$target_count${lightgreybg} total" ipmap_targets=$(echo -en "$ipbatch" | tr '\n' ',') ipapi_targets=$(jq -c --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$ipbatch") ipmap_output=$(docurl -m15 -s "https://ipmap.ripe.net/api/v1/locate/all?resources=$ipmap_targets") if [[ -n $(jq 'select (.error != null) | .error' <<<"$ipmap_output") ]]; then ipmap_output="" fi # Note: the free IP-API tier only supports unencrypted HTTP, not HTTPS ipapi_output=$(docurl -m4 -s "http://ip-api.com/batch?fields=query,status,message,country,countryCode,regionName,city" --data ''"$ipapi_targets"'') for target in $ipbatch; do ip_geo_countryname="" json_geo_city="" json_geo_region="" json_geo_cc="" if [ -n "$ipmap_output" ]; then IS_ANYCAST=$(jq '.metadata.service.contributions."'"$target"'".latency.metadata.anycast' <<<"$ipmap_output") [[ "$IS_ANYCAST" = true ]] && anycast_tag="${yellowbg} ANYCAST ${default}" || anycast_tag="" ip_geo_countryname=$(jq -r '.data."'"$target"'" | select( .countryName != null ) | .countryName' <<<"$ipmap_output") json_geo_city="$(jq -r '.data."'"$target"'" | select( .cityName != null ) | .cityName' <<<"$ipmap_output")" json_geo_region="$(jq -r '.data."'"$target"'" | select( .stateName != null ) | .stateName' <<<"$ipmap_output")" json_geo_cc="$(jq -r '.data."'"$target"'" | select( .countryCodeAlpha2 != null ) | .countryCodeAlpha2' <<<"$ipmap_output")" fi if [ -n "$ip_geo_countryname" ] && [ -n "$json_geo_city" ] && [ -n "$json_geo_region" ] && [ -n "$json_geo_cc" ]; then ip_geo_data="$json_geo_city, $json_geo_region, $json_geo_cc" else # incomplete/no data on RIPE IPMap, fallback to IP-API ipapi_status=$(jq -r '.[] | select(.query == "'"$target"'") | .status' <<<"$ipapi_output") if [ "$ipapi_status" = "fail" ] && [ "$JSON_OUTPUT" = false ]; then ip_geo_countryname="Unknown" ip_geo_data="N/A" else ip_geo_countryname=$(jq -r '.[] | select(.query == "'"$target"'") | .country' <<<"$ipapi_output") if [ -z "$ip_geo_countryname" ] && [ "$JSON_OUTPUT" = false ]; then ip_geo_countryname="Unknown" ip_geo_data="N/A" else json_geo_city="$(jq -r '.[] | select(.query == "'"$target"'") | .city' <<<"$ipapi_output")" json_geo_region="$(jq -r '.[] | select(.query == "'"$target"'") | .regionName' <<<"$ipapi_output")" json_geo_cc="$(jq -r '.[] | select(.query == "'"$target"'") | .countryCode' <<<"$ipapi_output")" if [ -n "$json_geo_city" ] && [ -n "$json_geo_region" ] && [ -n "$json_geo_cc" ]; then ip_geo_data="$json_geo_city, $json_geo_region, $json_geo_cc" else ip_geo_data="Unknown" fi fi fi fi if [ "$ip_geo_countryname" = "Unknown" ]; then namecolor="${red}" countrycolor="${red}" else namecolor="${white}" countrycolor="${green}" fi if [ "$JSON_OUTPUT" = true ]; then [[ -n "$geolocation_json_output" ]] && geolocation_json_output+="," geolocation_json_output+="{\"ip\":\"$target\"" geolocation_json_output+=",\"city\":\"$json_geo_city\"" geolocation_json_output+=",\"region\":\"$json_geo_region\"" geolocation_json_output+=",\"country\":\"$ip_geo_countryname\"" geolocation_json_output+=",\"cc\":\"$json_geo_cc\"" geolocation_json_output+=",\"hits\":$(grep -c "$target" <<<"$targetlist_allips")" [[ "$IS_ANYCAST" = true ]] && geolocation_json_output+=",\"is_anycast\": true" geolocation_json_output+="}" else final_output+=$(printf "${white}%-16s${default}: ${namecolor}%s${default} (${countrycolor}%s${default}) %s" "$target" "$ip_geo_data" "$ip_geo_countryname" "$anycast_tag\n") countrylist+="\n$ip_geo_countryname" fi done [[ "$LAST_BATCH" = true ]] && break firstip=$(( lastip++ )) done StatusbarMessage [[ "$JSON_OUTPUT" = true ]] && return echo -en "${final_output}" # top 10 IPs if [ -n "$top10_ipv4" ]; then BoxHeader "Top 10 IPv4 by number of hits" for entry in $top10_ipv4; do iphits=$(awk '{print $1}' <<<"$entry") ipaddr=$(awk '{ print substr($0, index($0,$2)) }' <<<"$entry") printf "${white}%16s appears ${magenta}%s${default} time%s\n" "$ipaddr" "$iphits" "$([[ "$iphits" != "1" ]] && echo -n "s")" done fi if [ -n "$top10_ipv6" ]; then BoxHeader "Top 10 IPv6 by number of hits" for entry in $top10_ipv6; do iphits=$(awk '{print $1}' <<<"$entry") ipaddr=$(awk '{ print substr($0, index($0,$2)) }' <<<"$entry") printf "${white}%16s appears ${magenta}%s${default} time%s\n" "$ipaddr" "$iphits" "$([[ "$iphits" != "1" ]] && echo -n "s")" done fi # draw countries bar chart BoxHeader "Country stats" DrawChart "$countrylist" "IP" } DrawChart(){ # draws a bar chart for the occurrences of values from a list. # $1 must be a list of values (not necessarily sorted) # $2 must be the unit name of the bars (singular. A final 's' is added for bars whose value is > 1) # [optional] $3 can be the number of items to display (i.e. TOP n) inputlist="$1" unitname="$2" [[ -n "$3" ]] && topn="$3" || topn="0" sorted_input="" declare -A valuearray # associative array declare -a sorted_input # simple array, to be used as an index into valuearray for item in $(echo -e "$inputlist" | sort | grep -Ev '^$' | uniq -c | sort -rn); do itemhits=$(awk '{print $1}' <<<"$item") itemname=$(awk '{ print substr($0, index($0,$2)) }' <<<"$item") valuearray["$itemname"]="$itemhits" sorted_input+=("$itemname") done total_unique_names="${#sorted_input[@]}" longest_itemname=$(echo -e "$inputlist" | wc -L) count=0 while true; do item=${sorted_input[$count]} # incrementally choose color for the bar and index for the item array entry - skip 0 (black) [[ "$MONOCHROME_MODE" = false ]] && tput setaf $(( count + 1 )) itemhits="${valuearray[$item]}" spacing="$itemhits" [[ "$itemhits" -gt "$terminal_width" ]] && spacing=$(( terminal_width-(14+longest_itemname) )) printf " %${longest_itemname}s " "$item" printf "█%.0s" $(seq "$spacing") printf " %s ${unitname}%s\n" "$itemhits" "$([[ "$itemhits" != "1" ]] && echo -n "s")" (( count++ )) if [ "$topn" != "0" ] && [ "$count" -ge "$topn" ]; then break elif [ "$count" -ge "$total_unique_names" ]; then break fi done echo -e "${default}" } PrintJsonOutput(){ DisableColors endtime=$(date +%s) runtime=$((endtime-starttime)) json_to_print="{\"target\":\"$userinput\"," json_to_print+="\"target_type\":\"$json_target_type\"," json_to_print+="\"result\":\"$status_json_output\"," json_to_print+="\"reason\":\"$reason_json_output\"," json_to_print+="\"version\":\"$ASN_VERSION\"," json_to_print+="\"request_time\":\"$json_request_time\"," json_to_print+="\"request_duration\":$runtime," json_to_print+="\"api_tokens\":{" json_to_print+="\"ipqualityscore\":$json_IQS_TOKEN," json_to_print+="\"ipinfo\":$json_IPINFO_TOKEN," json_to_print+="\"cloudflare\":$json_CLOUDFLARE_TOKEN" json_to_print+="}," json_to_print+="\"result_count\":$json_resultcount," if [ "$RECON_MODE" = true ] || [ "$BGP_UPSTREAM_MODE" = true ]; then # we already have an array as a final json output, append it as-is json_to_print+="\"results\":${final_json_output}}" else json_to_print+="\"results\":[${final_json_output}]}" fi [[ "$JSON_PRETTY" = true ]] && json_to_print=$(jq -M '.' <<<"$json_to_print") || json_to_print=$(jq -c '.' <<<"$json_to_print") echo -e "$json_to_print" } ResolveWellKnownPort(){ # input: port number, output: service name [[ -n "$WELL_KNOWN_PORTS" ]] && awk "/\t$1\/(tc|ud)p/{print \$1; exit}" <<<"$WELL_KNOWN_PORTS" } RPKILookup(){ # $1=asn, $2=prefix found_rpkivalidity="" INVALID_ROA=false rpki_apioutput=$(docurl -s "https://stat.ripe.net/data/rpki-validation/data.json?resource=$1&prefix=$2&sourceapp=nitefood-asn") is_valid_json=$(jq type <<<"$rpki_apioutput" 2>/dev/null) if [ -n "$is_valid_json" ]; then found_rpkivalidity=$(jq -r '.data.status' <<<"$rpki_apioutput" | tr '[:lower:]' '[:upper:]') found_rpkiroacount=$(jq '.data.validating_roas | length' <<<"$rpki_apioutput") roacount_json_output="$found_rpkiroacount" found_rpkiprefix=$(jq -r '.data.validating_roas[0].prefix' <<<"$rpki_apioutput") found_rpkiorigins=$(jq -r '.data.validating_roas[] | select (.prefix=="'"$found_rpkiprefix"'") | .origin' <<<"$rpki_apioutput") found_rpkiorigin="[" origin_count=0 for origin in $found_rpkiorigins; do [[ "$origin_count" -gt 0 ]] && found_rpkiorigin+=", " found_rpkiorigin+="AS$origin" (( origin_count++ )) done found_rpkiorigin+="]" found_rpkimaxlength=$(jq -r '.data.validating_roas[0].max_length' <<<"$rpki_apioutput") # TODO: iterate over ROAs (.data.validatingRoas[]) to give deeper source RIR insight #found_rpkisource=$(echo "$rpki_apioutput" | jq -r '.data.validatingRoas[0].source') case "$found_rpkivalidity" in "VALID") [[ "${found_rpkiroacount}" -gt 1 ]] && s="s" || s="" rpki_output="${green}✓ VALID (${found_rpkiroacount} ROA$s found)${default}" roavalidity_json_output="valid" ;; "UNKNOWN") rpki_output="${yellow}✓ UNKNOWN (no ROAs found)${default}" roavalidity_json_output="unknown" ;; "INVALID_ASN") INVALID_ROA=true roavalidity_json_output="invalid" if [ "$found_rpkiorigin" = "0" ]; then rpki_output="${red}❌ ${found_rpkivalidity} (no Origin authorized to announce Prefix '${found_rpkiprefix}' with Max-Length=${found_rpkimaxlength})${default}" else rpki_output="${red}❌ ${found_rpkivalidity} (expected Origin(s): ${found_rpkiorigin} for Prefix '${found_rpkiprefix}' with Max-Length=${found_rpkimaxlength})${default}" fi ;; "INVALID_LENGTH") INVALID_ROA=true roavalidity_json_output="invalid" rpki_output="${red}❌ ${found_rpkivalidity} (expected Max-Length=${found_rpkimaxlength} for Prefix '${found_rpkiprefix}')${default}" ;; esac else rpki_output="${yellow}? (WRONG RPKI DATA or problem accessing RIPEStat API)${default}" roacount_json_output="-1" roavalidity_json_output="unknown (API error)" fi } IsIXP() { # input ($1) is an IPv4/v6. ixp_full_ix_data="" ixp_data="" ixp_geo="" input_is_ipv6=false if [ "$IXP_DETECTION" = true ]; then echo -e "$1" | grep -q ':' && input_is_ipv6=true # Update IXP prefixes from PeeringDB if necessary # use the appropriate (v4/v6) IXP prefix dataset if [ "$input_is_ipv6" = false ]; then # if input is an IPv4, speedup lookups further by grabbing only IXP prefixes starting with the same two octets # we can afford filtering based on the first two octets since the largest individual IXP prefix is around /20 first_octets=$(echo "${1}." | cut -d '.' -f 1,2) # see if the PEERINGDB_CACHED_DATASETS array already an entry for this prefix's first two octets. # If not, fetch the relevant IXP prefix dataset from PeeringDB and store it in the PEERINGDB_CACHED_DATASETS if [ -n "${PEERINGDB_CACHED_DATASETS[$first_octets]}" ]; then peeringdb_dataset="${PEERINGDB_CACHED_DATASETS[$first_octets]}" else peeringdb_ipv4_dataset=$(docurl -s "https://www.peeringdb.com/api/ixpfx?prefix__startswith=$first_octets&protocol__in=IPv4") peeringdb_dataset="$peeringdb_ipv4_dataset" # store the IXP dataset in the PEERINGDB_CACHED_DATASETS array PEERINGDB_CACHED_DATASETS[$first_octets]="$peeringdb_dataset" fi else # only fetch IPv6 dataset once, since we don't filter it for prefixes if [ -z "$peeringdb_ipv6_dataset" ]; then peeringdb_ipv6_dataset=$(docurl -s "https://www.peeringdb.com/api/ixpfx?protocol__in=IPv6") fi peeringdb_dataset="$peeringdb_ipv6_dataset" fi ixp_prefixes=$(jq -r '.data[].prefix' <<<"$peeringdb_dataset" 2>/dev/null) # search for input prefix through PeeringDB IXP prefix list for prefix in $ixp_prefixes; do if echo "$1" | grepcidr -f <(echo "$prefix") &>/dev/null; then # the IP is part of an IXP prefix ixlan_id=$(jq -r '.data[] | select(.prefix == "'"$prefix"'") | .ixlan_id' <<<"$peeringdb_dataset") # see if the PEERINGDB_CACHED_IXP_DATA array already has an entry with the IXP details for this ixlan_id. # If not, fetch the full IXP data from PeeringDB and store it in the PEERINGDB_CACHED_IXP_DATA if [ -n "${PEERINGDB_CACHED_IXP_DATA[$ixlan_id]}" ]; then ixp_full_ix_data="${PEERINGDB_CACHED_IXP_DATA[$ixlan_id]}" else # Query PeeringDB to match an IXP for that prefix. ixp_full_ix_data=$(docurl -s "https://www.peeringdb.com/api/ix/$ixlan_id") # store the IXP data in the PEERINGDB_CACHED_IXP_DATA array PEERINGDB_CACHED_IXP_DATA[$ixlan_id]="$ixp_full_ix_data" fi ixp_data=$(jq -r '.data[0].name, .data[0].name_long' <<<"$ixp_full_ix_data" | paste -sd '|' - | awk -F'|' '{print $1 " (" $2 ")"}') ixp_geo=$(jq -r '.data[0].org.city' <<<"$ixp_full_ix_data") ixp_state=$(jq -r '.data[0].org.state' <<<"$ixp_full_ix_data") ixp_country=$(jq -r '.data[0].org.country' <<<"$ixp_full_ix_data") ip_type_data=" ${lightgreybg} IXP ${default}" [[ -n "$ixp_state" ]] && ixp_geo+=", $ixp_state" [[ -n "$ixp_country" ]] && ixp_geo+=" ($ixp_country)" [[ "$IS_ASN_CHILD" = true ]] && ixp_data="$ixp_data" break fi done fi } GetIXPresence(){ asn="$1" ixps="" outputix="" json_ixps="" netlist=$(docurl -s "https://www.peeringdb.com/api/net?asn__in=$asn" | jq -r '.data[].id') if [ -n "$netlist" ]; then for net in $netlist; do if [ "$IS_ASN_CHILD" = true ] && [ "$JSON_OUTPUT" = false ]; then ixps+=$(docurl -s "https://www.peeringdb.com/api/net/$net" | jq -r '.data[].netixlan_set[] | "\(.name)"') else ixps+=$(docurl -s "https://www.peeringdb.com/api/net/$net" | jq -r '.data[].netixlan_set[].name') fi done if [ -n "$ixps" ]; then for ix in $(echo "$ixps" | sort -u); do [[ -n "$outputix" ]] && outputix+=" • " outputix+="${blue}${ix}${default}" done else outputix="${redbg} NONE ${default}" fi else outputix="${redbg} NONE ${default}" fi if [ "$JSON_OUTPUT" = true ]; then # json output json_ixps=$(jq -c --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$ixps") else # normal output echo -e "$outputix" fi } GetCAIDARank(){ # see if we already have CAIDA rank cached for this AS asn="$1" if [ -n "${CAIDARANK_CACHED_AS_DATA[$asn]}" ]; then caida_data="${CAIDARANK_CACHED_AS_DATA[$asn]}" else caida_data=$(docurl -s "https://api.asrank.caida.org/dev/restful/asns/${asn}") # store the rank in the CAIDARANK_CACHED_AS_DATA array CAIDARANK_CACHED_AS_DATA[$asn]="$caida_data" fi caida_asrank_text="" caida_asrank=$(jq -r '.data.asn.rank' <<<"$caida_data") if [ "$caida_asrank" != "null" ]; then caida_rir=$(jq -r '.data.asn.source' <<<"$caida_data") case "${caida_rir}" in "AFRINIC") caida_rir="AFRINIC ${dim}(Africa)${default}" ;; "APNIC") caida_rir="APNIC ${dim}(Asia Pacific)${default}" ;; "ARIN") caida_rir="ARIN ${dim}(USA, Canada, many Caribbean and North Atlantic islands)${default}" ;; "LACNIC") caida_rir="LACNIC ${dim}(Latin America and the Caribbean)${default}" ;; "RIPE") caida_rir="RIPE ${dim}(Europe, the Middle East and parts of Central Asia)${default}" ;; esac caida_customercone=$(jq -r '.data.asn.cone.numberAsns' <<<"$caida_data") caida_degree_total=$(jq -r '.data.asn.asnDegree.total' <<<"$caida_data") caida_degree_customer=$(jq -r '.data.asn.asnDegree.customer' <<<"$caida_data") caida_degree_peer=$(jq -r '.data.asn.asnDegree.peer' <<<"$caida_data") caida_degree_provider=$(jq -r '.data.asn.asnDegree.provider' <<<"$caida_data") if (( caida_asrank <= 10 )); then caida_asrank_text=" ${lightgreybg} TOP 10 AS ${default}" elif (( caida_asrank <= 35 )); then caida_asrank_text=" ${lightgreybg} TOP 35 AS ${default}" elif (( caida_asrank <= 100 )); then caida_asrank_text=" ${lightgreybg} TOP 100 AS ${default}" elif (( caida_asrank <= 500)); then caida_asrank_text=" ${lightgreybg} TOP 500 AS ${default}" elif (( caida_asrank <= 1000)); then caida_asrank_text=" ${lightgreybg} TOP 1000 AS ${default}" fi caida_asrank_recap="${dim}#${white}${caida_asrank}${caida_asrank_text}" else caida_asrank="N/A" caida_customercone="N/A" caida_rir="N/A" caida_customercone="N/A" caida_degree_total="N/A" caida_degree_provider="N/A" caida_degree_peer="N/A" caida_degree_customer="N/A" caida_asrank_recap="${dim}N/A" fi } GetCloudflareHijacksAndLeaks(){ # query Cloudflare Radar API for BGP hijacks or route leaks involving the target ASN in the past 12 months asn="$1" cf_hijacks_text="${dim}${red}N/A${default}${dim} [Cloudflare query timed out or API error]" cf_leaks_text="${dim}${red}N/A${default}${dim} [Cloudflare query timed out or API error]" cf_hijack_query_success=false cf_leak_query_success=false if [ -z "$CLOUDFLARE_TOKEN" ]; then cf_hijacks_text="${dim}${red}N/A${default}${dim} [Cloudflare API token missing]${default}" cf_leaks_text="${dim}${red}N/A${default}${dim} [Cloudflare API token missing]${default}" return fi StatusbarMessage "Retrieving BGP hijacks and leaks history for AS${asn} (${target_asname})" cf_hijacks_json_output=$(docurl -m 10 -s -H "Authorization: Bearer $CLOUDFLARE_TOKEN" "https://api.cloudflare.com/client/v4/radar/bgp/hijacks/events?dateRange=52w&involvedAsn=$asn") cf_leaks_json_output=$(docurl -m 10 -s -H "Authorization: Bearer $CLOUDFLARE_TOKEN" "https://api.cloudflare.com/client/v4/radar/bgp/leaks/events?dateRange=52w&involvedAsn=$asn") if [ -n "$cf_hijacks_json_output" ]; then cf_hijacks_count=$(jq -r '.result_info.total_count' <<<"$cf_hijacks_json_output" 2>/dev/null) if [ -z "$cf_hijacks_count" ]; then StatusbarMessage return fi cf_hijacks_as_hijacker_count=$(jq -r ".result.events | map(select (.hijacker_asn == $asn)) | length" <<<"$cf_hijacks_json_output" 2>/dev/null) if [ -z "$cf_hijacks_as_hijacker_count" ]; then StatusbarMessage return fi cf_hijacks_as_victim_count=$((cf_hijacks_count - cf_hijacks_as_hijacker_count)) if [ "$cf_hijacks_count" -gt 0 ]; then # at least one hijack incident involving this AS [[ "$cf_hijacks_count" -gt 1 ]] && s="s" || s="" cf_hijacks_text="${white}Involved in ${magenta}$cf_hijacks_count${white} BGP hijack incident${s}" if [ "$cf_hijacks_as_hijacker_count" -gt 0 ] && [ "$cf_hijacks_as_victim_count" -gt 0 ]; then # mixed situations involving this AS (both hijacker and victim) cf_hijacks_text="${cf_hijacks_text} (of which ${red}$cf_hijacks_as_hijacker_count${white} as a hijacker and ${green}$cf_hijacks_as_victim_count${white} as a victim)${default}" elif [ "$cf_hijacks_as_hijacker_count" -gt 0 ]; then # this AS was always a hijacker cf_hijacks_text="${cf_hijacks_text} ${red}(always as a hijacker)${default}" else # this AS was always a victim cf_hijacks_text="${cf_hijacks_text} ${green}(always as a victim)${default}" fi else # no hijack incidents involving this AS cf_hijacks_text="${green}None${default}" fi fi cf_hijack_query_success=true if [ -n "$cf_leaks_json_output" ]; then cf_leaks_count=$(jq -r '.result_info.total_count' <<<"$cf_leaks_json_output" 2>/dev/null) if [ -z "$cf_leaks_count" ]; then StatusbarMessage return fi if [ "$cf_leaks_count" -gt 0 ]; then # at least one route leak incident involving this AS [[ "$cf_leaks_count" -gt 1 ]] && s="s" || s="" cf_leaks_text="${white}Involved in ${yellow}$cf_leaks_count${white} BGP route leak incident${s}${default}" else # no route leak incidents involving this AS cf_leaks_text="${green}None${default}" fi fi cf_leak_query_success=true StatusbarMessage } IPGeoRepLookup(){ if [ -n "$mtr_output" ] && [ "$DETAILED_TRACE" = false ]; then # skip geolocation and reputation lookups for individual trace hops in non-detailed mode return fi ip_geo_data="" ip_geo_countryname="" geo_city_json_output="" geo_region_json_output="" geo_country_json_output="" geo_cc_json_output="" # local ipmap_city # local ipmap_region # local ipmap_cc # local ipmap_country_name # local ipmap_location_data local ipinfo_city local ipinfo_region local ipinfo_cc local ipinfo_countryname local ipapi_city local ipapi_region local ipapi_cc local ipapi_countryname local ipapi_output local ipapi_status local ipapi_errmsg StatusbarMessage "Collecting geolocation and classification data" # fetch preferred geolocation and anycast data from ipinfo.io (pass token regardless of whether IPINFO_TOKEN is set, ipinfo API does not complain for an empty token) ipinfo_output=$(docurl -m4 -s "https://ipinfo.io/$1?token=${IPINFO_TOKEN}") # fetch fallback geolocation and ip type (is mobile?) data from ip-api.com # Note: the free IP-API tier only supports unencrypted HTTP, not HTTPS ipapi_output=$(docurl -m4 -s "http://ip-api.com/json/$1?fields=status,message,country,countryCode,regionName,city,mobile,proxy,hosting") ipapi_status=$(jq -r '.status' <<<"$ipapi_output") if [ "$ipapi_status" = "fail" ]; then ipapi_errmsg=$(jq -r '.message' <<<"$ipapi_output") ip_rep_data="${red}IP-API ERROR:${default} ${ipapi_errmsg}" else IS_MOBILE=$(jq -r '.mobile' <<<"$ipapi_output") IS_PROXY=$(jq -r '.proxy' <<<"$ipapi_output") IS_HOSTING=$(jq -r '.hosting' <<<"$ipapi_output") fi IS_ANYCAST="" if jq '.anycast' <<<"$ipinfo_output" | grep -q true; then IS_ANYCAST=true fi # ipmap_location_data=$(jq 'select (.location != null) | .location' <<<"$ipmap_output") # if [ -n "$ipmap_location_data" ]; then # # RIPE IPmap has (at least some) geo data about this address, check city/region # ipmap_city=$(jq -r 'select (.cityNameAscii != null) | .cityNameAscii' <<<"$ipmap_location_data") # ipmap_region=$(jq -r 'select (.stateName != null) | .stateName' <<<"$ipmap_location_data") # fi ipinfo_city=$(jq -r 'select (.city != null) | .city' <<<"$ipinfo_output") ipinfo_region=$(jq -r 'select (.region != null) | .region' <<<"$ipinfo_output") ipinfo_cc=$(jq -r 'select (.country != null) | .country' <<<"$ipinfo_output") ipinfo_countryname=$(jq -r ".${ipinfo_cc}" <<<"$COUNTRY_MAP_CACHE") flag_icon_cc="" # will be used in HTML reports served over HTTP if [ -n "$ipinfo_city" ]; then # populate json output fields from ipinfo output geo_city_json_output="$ipinfo_city" geo_region_json_output="$ipinfo_region" geo_country_json_output="$ipinfo_countryname" geo_cc_json_output="$ipinfo_cc" ip_geo_data="$ipinfo_city, $ipinfo_region ($ipinfo_cc)" flag_icon_cc=$(tr '[:upper:]' '[:lower:]' <<<"$ipinfo_cc") ip_geo_countryname="$ipinfo_countryname" elif [ "$ipapi_status" = "success" ]; then # ipinfo has no data about this address, fallback to ip-api.com ipapi_city=$(jq -r '.city' <<<"$ipapi_output") ipapi_region=$(jq -r '.regionName' <<<"$ipapi_output") ipapi_cc=$(jq -r '.countryCode' <<<"$ipapi_output") ipapi_countryname=$(jq -r '.country' <<<"$ipapi_output") # populate json output fields from ip-api output geo_city_json_output="$ipapi_city" geo_region_json_output="$ipapi_region" geo_country_json_output="$ipapi_countryname" geo_cc_json_output="$ipapi_cc" if [ -n "$ipapi_city" ] && [ -n "$ipapi_region" ] && [ -n "$ipapi_cc" ]; then ip_geo_data="$ipapi_city, $ipapi_region ($ipapi_cc)" ip_geo_countryname="$ipapi_countryname" flag_icon_cc=$(tr '[:upper:]' '[:lower:]' <<<"$ipapi_cc") elif [ -n "$ipapi_region" ] && [ -n "$ipapi_countryname" ]; then ip_geo_data="$ipapi_region ($ipapi_countryname)" ip_geo_countryname="$ipapi_countryname" elif [ -n "$ipapi_countryname" ]; then ip_geo_data="$ipapi_countryname" ip_geo_countryname="$ipapi_countryname" fi fi # IP type identification ip_type_data="" if [ "$IS_ANYCAST" = true ]; then ip_type_data+=" ${yellowbg} Anycast IP ${default}" ip_type_json_output+=",\"is_anycast\":true" else ip_type_json_output+=",\"is_anycast\":false" fi if [ "$IS_MOBILE" = true ]; then ip_type_data+=" ${yellowbg} Mobile network IP ${default}" ip_type_json_output+=",\"is_mobile\":true" else ip_type_json_output+=",\"is_mobile\":false" fi if [ "$IS_PROXY" = true ]; then ip_type_data+=" ${yellowbg} Proxy host ${default}" ip_type_json_output+=",\"is_proxy\":true" else ip_type_json_output+=",\"is_proxy\":false" fi # fetch detailed DC informations (incolumitas.com Datacenter IP Address API) incolumitas_dcdata=$(docurl -m2 -s "https://api.incolumitas.com/datacenter?ip=$1") dcname=$(jq -r 'select (.datacenter.datacenter != null) | .datacenter.datacenter' <<<"$incolumitas_dcdata" 2>/dev/null) dcregion=$(jq -r 'select (.datacenter.region != null) | .datacenter.region' <<<"$incolumitas_dcdata" 2>/dev/null) if [ -n "$dcname" ]; then # incolumitas.com has details regarding this DC ip_type_data+=" ${yellowbg} DC ${default}${yellow} $dcname" ip_type_json_output+=",\"is_dc\":true" ip_type_json_output+=",\"dc_details\":{\"dc_name\":\"$dcname\"" if [ -n "$dcregion" ]; then ip_type_data+=" ($dcregion)" ip_type_json_output+=",\"dc_region\":\"$dcregion\"" fi ip_type_json_output+="}" ip_type_data+="${default}" elif [ "$IS_HOSTING" = true ]; then # fallback to IP-API DC detection ip_type_data+=" ${yellowbg} Hosting/DC ${default}" ip_type_json_output+=",\"is_dc\":true" else ip_type_json_output+=",\"is_dc\":false" fi # Reputation lookup (stopforumspam.org), noise classification (greynoise.io) and threat analisys (ipqualityscore.com) ip_rep_data="${green}✓ NONE${default}" json_rep="none" json_iqs_threat_score="" json_iqs_threat_tags="" is_blacklisted=$(docurl -m4 -s "https://api.stopforumspam.org/api?json&ip=$1" | jq -r '.ip.appears') if [ "$is_blacklisted" = "1" ]; then # IP is blacklisted by StopForumSpam json_rep="bad" ip_rep_data="${red}❌ BAD (on stopforumspam.org)${default}" fi if [ -n "$IQS_TOKEN" ]; then if [ "$is_blacklisted" = "1" ] || [ "$IQS_ALWAYS_QUERY" = true ]; then # Lookup detailed reputation data on IPQualityScore iqs_query_url="https://ipqualityscore.com/api/json/ip/$IQS_TOKEN/$1" [[ -n "$IQS_CUSTOM_SETTINGS" ]] && iqs_query_url+="?$IQS_CUSTOM_SETTINGS" iqs_output=$(docurl -m4 -s "$iqs_query_url") iqs_success=$(jq -r '.success' <<<"$iqs_output") if [ "$iqs_success" = true ]; then iqs_score=$(jq -r '.fraud_score' <<<"$iqs_output") iqs_proxy=$(jq -r '.proxy' <<<"$iqs_output") iqs_vpn=$(jq -r '.active_vpn' <<<"$iqs_output") iqs_tor=$(jq -r '.active_tor' <<<"$iqs_output") iqs_recentabuse=$(jq -r '.recent_abuse' <<<"$iqs_output") iqs_bot=$(jq -r '.bot_status' <<<"$iqs_output") iqs_crawler=$(jq -r '.is_crawler' <<<"$iqs_output") json_iqs_threat_score+=",\"threat_score\":\"$iqs_score\"" if [ "$iqs_score" -lt 40 ]; then ip_rep_data="✓ GOOD" ip_rep_color="$green" json_rep="good" elif [ "$iqs_score" -lt 75 ]; then ip_rep_data="✓ AVERAGE" ip_rep_color="$green" json_rep="average" elif [ "$iqs_score" -lt 85 ]; then ip_rep_data="! SUSPICIOUS" ip_rep_color="$yellow" json_rep="suspicious" else ip_rep_data="❌ BAD" ip_rep_color="$red" json_rep="bad" fi ip_rep_data="${ip_rep_color}${ip_rep_data} (Threat Score ${iqs_score}%)${default}" [[ "$iqs_recentabuse" = true ]] && ip_rep_data+=" ${redbg} RECENT ABUSER ${default}"; json_iqs_threat_tags+=",\"is_recent_abuser\": true" [[ "$iqs_bot" = true ]] && ip_rep_data+=" ${redbg} BOT ${default}"; json_iqs_threat_tags+=",\"is_bot\": true" [[ "$iqs_proxy" = true ]] && ip_rep_data+=" ${redbg} PROXY ${default}"; json_iqs_threat_tags+=",\"is_proxy\": true" [[ "$iqs_vpn" = true ]] && ip_rep_data+=" ${redbg} VPN ${default}"; json_iqs_threat_tags+=",\"is_vpn\": true" [[ "$iqs_tor" = true ]] && ip_rep_data+=" ${redbg} TOR EXIT NODE ${default}"; json_iqs_threat_tags+=",\"is_tor\": true" [[ "$iqs_crawler" = true ]] && ip_rep_data+=" ${redbg} CRAWLER ${default}"; json_iqs_threat_tags+=",\"is_crawler\": true" else iqs_errmsg=$(jq -r '.message' <<<"$iqs_output") ip_rep_data+=" ${redbg} ERR ${default} (IpQualityScore API said: $iqs_errmsg)" fi fi fi # GreyNoise lookup greynoise_data=$(docurl -m5 -s "https://api.greynoise.io/v3/community/$1") gn_noisy=false gn_riot="" gn_classification="" gn_name="" gn_json_is_knowngood="" gn_json_is_knownbad="" gn_json_aka="" if [ -n "$greynoise_data" ]; then gn_noisy=$(jq -r 'select (.noise != null) | .noise' <<<"$greynoise_data") gn_riot=$(jq -r 'select (.riot != null) | .riot' <<<"$greynoise_data") gn_classification=$(jq -r 'select (.classification != null) | .classification' <<<"$greynoise_data") gn_name=$(jq -r 'select (.name != null) | .name' <<<"$greynoise_data") if [ "$gn_name" = "unknown" ]; then gn_name="${default}" else gn_json_aka="$gn_name" gn_name="as \"$gn_name\" ${default}" fi if [ "$gn_riot" = "true" ] || [ "$gn_classification" = "benign" ]; then # GreyNoise known-good ip_rep_data="${green}✓${default} ${greenbg} KNOWN GOOD $gn_name" json_rep="good" gn_json_is_knowngood=true [[ "$gn_noisy" = "true" ]] && ip_rep_data+=" ${yellowbg} SEEN SCANNING ${default}" elif [ "$gn_classification" = "malicious" ]; then # GreyNoise known-bad [[ "$is_blacklisted" != "1" ]] && ip_rep_data="${red}❌ BAD${default}" # tag the IP with a bad REP, it wasn't caught by StopForumSpam earlier ip_rep_data+=" ${redbg} SCANNER $gn_name" json_rep="bad" gn_json_is_knownbad=true else # GreyNoise not listed, or tagged as "noisy" but not explicitly good or bad [[ "$gn_noisy" = "true" ]] && ip_rep_data+=" ${yellowbg} SEEN SCANNING ${default}" fi fi StatusbarMessage } IPShodanLookup(){ # Shodan InternetDB lookup if [ -n "$mtr_output" ] && [ "$DETAILED_TRACE" = false ]; then # skip Shodan lookup for individual trace hops in non-detailed mode return fi ip_shodan_cpe_data="" ip_shodan_ports_data="" ip_shodan_tags_data="" ip_shodan_cve_data="" shodan_cpes_json_output="" shodan_ports_json_output="" shodan_tags_json_output="" shodan_vulns_json_output="" StatusbarMessage "Collecting open ports, CPE and CVE data" shodan_data=$(docurl -m5 -s "https://internetdb.shodan.io/$1" | grep -v "No information available") if [ -n "$shodan_data" ]; then shodan_cpes=$(jq -r '.cpes[]' <<<"$shodan_data" 2>/dev/null) shodan_cpes_json_output=$(jq -cM 'select (.cpes | length > 0) | .cpes' <<<"$shodan_data" 2>/dev/null) shodan_ports=$(jq -r '.ports[]' <<<"$shodan_data" 2>/dev/null) shodan_ports_json_output=$(jq -cM 'select (.ports | length > 0) | .ports' <<<"$shodan_data" 2>/dev/null) shodan_tags=$(jq -r '.tags[]' <<<"$shodan_data" 2>/dev/null) shodan_tags_json_output=$(jq -cM 'select (.tags | length > 0) | .tags' <<<"$shodan_data" 2>/dev/null) shodan_cve_count=$(jq '.vulns | length' <<<"$shodan_data" 2>/dev/null) shodan_vulns_json_output=$(jq -cM 'select (.vulns | length > 0) | .vulns' <<<"$shodan_data" 2>/dev/null) # fetch CPE types, possible values: # a for Applications, h for Hardware, o for Operating Systems for cpe in $shodan_cpes; do cpetype=$(echo "$cpe" | cut -d ':' -f 2) cpename=$(echo "$cpe" | cut -d ':' -f 3-) case "${cpetype}" in "/a") type="APP" ;; "/o") type="O/S" ;; "/h") type="H/W" ;; *) type="UNK" ;; esac ip_shodan_cpe_data+="${white} [${blue}$type: ${white}$cpename]${default}" done # fetch open ports ip_shodan_ports_data="" for port in $shodan_ports; do [[ -n "$ip_shodan_ports_data" ]] && ip_shodan_ports_data+="," || ip_shodan_ports_data=" ${green}Open ports:" ip_shodan_ports_data+=" $port" done [[ -n "$ip_shodan_ports_data" ]] && ip_shodan_ports_data+="${default}" # fetch Shodan tags ip_shodan_tags_data="" for tag in $shodan_tags; do ip_shodan_tags_data+=" ${lightgreybg} $tag ${default}" done # fetch Shodan CVE count ip_shodan_cve_data="" if [ -n "$shodan_cve_count" ] && [ "$shodan_cve_count" != "0" ]; then [[ "$shodan_cve_count" != "1" ]] && vulntext="VULNERABILITIES" || vulntext="VULNERABILITY" ip_shodan_cve_data+=" ${redbg} $shodan_cve_count $vulntext FOUND ${default} (check " [[ "$IS_ASN_CHILD" = true ]] && ip_shodan_cve_data+="Shodan)" || ip_shodan_cve_data+="https://internetdb.shodan.io/$1)" fi fi StatusbarMessage } PwhoisListPrefixesForOrg(){ # list all IPv4 prefixes on pWhois based on an Org-Name # $1 = Org-Name handle [[ "$HAVE_IPCALC" = false ]] && return org="$1" full_org_search_data=$(whois -h whois.pwhois.org "registry org-name=$org") orgids=$(echo -e "$full_org_search_data" | grep -i -E -B1 "Org-Name: $org$" | grep "Org-ID" | cut -d ':' -f 2- | sed 's/^ //g') # assemble bulk pWhois query pwhois_bulk_query="begin\n" for orgid in $orgids; do pwhois_bulk_query+="netblock org-id=${orgid}\n" done pwhois_bulk_query+="end" # query pWhois for prefix in $(echo -e "$pwhois_bulk_query" | ncat whois.pwhois.org 43 | grep -E "^\*>"); do iprange=$(echo -e "$prefix" | cut -d '>' -f 2 | cut -d '|' -f 1 | tr -d ' ') IpcalcDeaggregate "$iprange" done } BGPUpstreamLookup(){ # retrieve likely upstream/transit autonomous system(s) for this IP address final_json_output="[]" if [ "$IS_BOGON" = true ]; then PrintErrorAndExit "Error: IP $1 is a bogon address, cannot continue" elif [ -z "$found_asname" ]; then PrintErrorAndExit "Error: IP $1 is not part of an announced prefix, cannot continue" fi BoxHeader "Recently observed upstream/transit AS for $1" # fetch all observed ASPATHs involving this IP address full_json_data=$(docurl -s "https://stat.ripe.net/data/bgp-updates/data.json?resource=$1&sourceapp=nitefood-asn") found_prefix=$(jq -r '.data.resource' <<<"$full_json_data") updates=$(jq '.data.updates[].attrs.path' <<<"$full_json_data") GetCAIDARank "$found_asn" asnlist=$(jq -r '. as $aspaths | $aspaths[first((range(1;length) | select($aspaths[.] == '"$found_asn"')) - 1)] // empty' <<<"$updates" | sort | uniq -c | sort -rn) tot=0 for line in $asnlist; do count=$(awk '{print $1}' <<<"$line") tot=$(( tot+count )) done [ -z "$asnlist" ] && total_upstreams_count=0 || total_upstreams_count=$(wc -l <<<"$asnlist") red_upstreams_count=0 green_upstreams_count=0 yellow_upstreams_count=0 white_upstreams_count=0 dim_upstreams_count=0 [[ "$caida_asrank" = "N/A" ]] && caida_asrank=99999 if (( caida_asrank < 100 )) || (( total_upstreams_count > 50 )); then # origin AS is in the top 100, therefore less likely to need transits (given its large customer cone), # or the prefix is widely announced (likely also by well-connected IXP RS peers). # Raise the thresholds to infer transit relationship status (need a higher percentage of BGP updates) green_probability=85 yellow_probability=75 white_probability=65 else green_probability=66 yellow_probability=33 white_probability=25 fi # create a static list of the largest known transit ASNs. # The list includes most of the top 35 CAIDA ranked ASNs (https://asrank.caida.org/?page_number=1&page_size=40&sort=rank) # Note: the list excludes Hurricane Electric (AS6939) because HE is known to largely reannounce most prefixes it receives # (even from IXP RS/SFI peers who do not subscribe a transit relationship with them) static_transits_array=( 3356 1299 174 6762 2914 6461 6453 3257 3491 9002 1273 5511 4637 12956 7473 1239 3320 701 7018 6830 7922 ) legend="\t${red}██ most likely transit\t( very large / Tier 1 upstream AS )${default}" legend+="\n\t${green}██ very likely transit\t( >= ${green_probability}% BGP updates from this AS )${default}" legend+="\n\t${yellow}██ likely transit\t( >= ${yellow_probability}% BGP updates from this AS )${default}" legend+="\n\t██ potentially transit\t( >= ${white_probability}% BGP updates from this AS )${default}" legend+="\n\t${dim}██ unlikely transit\t( < ${white_probability}% BGP updates from this AS )${default}" if [ "$JSON_OUTPUT" = false ]; then echo -e "\nLegend:\n$legend\n" echo -e "${bluebg} Target : ${default} ${blue}$1${default} ${dim}(matching prefix: ${white}$found_prefix${dim})${default}" echo -e "${bluebg} Origin AS : ${default} ${red}[AS$found_asn] ${green}$found_asname${default}" echo -e "${bluebg} CAIDA AS rank: ${default} ${caida_asrank_recap}${default}\n" else final_json_output=$(jq '. += [{ "prefix": "'"$found_prefix"'", "origin_as": "'"$found_asn"'", "origin_as_name": "'"$found_asname"'", "origin_as_rank": '"$caida_asrank"' }]' <<<"$final_json_output") final_json_output=$(jq '.[0] += {"upstreams_count": '"$total_upstreams_count"',"upstreams":[]}' <<<"$final_json_output") fi for line in $asnlist; do is_tier1=false count=$(awk '{print $1}' <<<"$line") asn=$(awk '{print $2}' <<<"$line") # TODO use bulk Team Cymru query to lookup asnames asname=$(host -t TXT "AS${asn}.asn.cymru.com" | grep -v "NXDOMAIN" | awk -F'|' 'NR==1{print substr($NF,2,length($NF)-2)}') integer_probability=$(( count*100/tot )) probability=$(jq -n "$count*100/$tot") if grep -qw "$asn" <<<"${static_transits_array[@]}" && (( caida_asrank > 35 )); then # apply static transit list to this AS only if it's not among the top 35 ASNs (per CAIDA rankings). # Larger networks (ranked < 35) are more likely to arrange SFI relationships with other Tier-1 networks, # rather than pay for transit is_tier1=true probability_color="${red}" (( red_upstreams_count++ )) elif (( integer_probability >= "$green_probability" )); then probability_color="${green}" (( green_upstreams_count++ )) elif (( integer_probability >= "$yellow_probability" )); then probability_color="${yellow}" (( yellow_upstreams_count++ )) elif (( integer_probability >= "$white_probability" )); then probability_color="${default}" (( white_upstreams_count++ )) else probability_color="${dim}" (( dim_upstreams_count++ )) fi if [ "$JSON_OUTPUT" = true ]; then final_json_output=$(jq '.[0].upstreams += [{"asn":"'"$asn"'", "asname":"'"$asname"'", "probability":'"$(printf "%.2f" "$probability")"', "is_tier1":'"$is_tier1"'}]' <<<"$final_json_output") else printf "${probability_color}██\tAS%-6s (%6.2f%%) - %s${default}\n" "$asn" "$probability" "$asname" fi done # count each type of upstreams and try to determine if this prefix is multihomed # echo "dbg green: $green_upstreams_count, yellow: $yellow_upstreams_count, white: $white_upstreams_count, dim: $dim_upstreams_count" MULTIPLE_UPSTREAMS=false multiple_string="multiple" if (( total_upstreams_count >= 20 )); then MULTIPLE_UPSTREAMS=true multiple_string="several" elif (( green_upstreams_count >= 2 )) || (( yellow_upstreams_count >= 2 )) || (( white_upstreams_count >= 2 )) || (( red_upstreams_count >= 2 )); then MULTIPLE_UPSTREAMS=true fi if [ "$JSON_OUTPUT" = false ]; then if [ "$MULTIPLE_UPSTREAMS" = true ]; then echo -e "\n${bluebg} INFO ${default}${blue} This prefix seems to be reannounced by ${underline}$multiple_string ($total_upstreams_count) upstream ASNs${default}${blue}. Possible reasons include:\n\t- ${underline}BGP multihoming${default}${blue} (if few upstreams announce this prefix)\n\t- ${underline}Tier 1 origin AS${default}${blue} or highly reachable prefix${default}${blue} (if many upstreams announce this prefix)\n\t- ${underline}Anycast prefix${default}\n" fi else json_resultcount="1" final_json_output=$(jq '.[0] += {"multiple_upstreams": '"$MULTIPLE_UPSTREAMS"'}' <<<"$final_json_output") fi if (( total_upstreams_count == 0 )); then echo -e "\n${lightgreybg} Prefix has not been observed in the DFZ recently ${default}\n" fi } AsnServerListener(){ DisableColors BoxHeader "ASN Lookup Server v$ASN_VERSION on $HOSTNAME" if [ "$ASN_DEBUG" = true ]; then server_user_uid=$(id -u) server_user_name=$(id -n -u "$server_user_uid") echo -e "\n- ${yellow}[DBG]${default} Server UID : ${blue}${server_user_uid} (${server_user_name})${default}" >&2 echo -en "- ${yellow}[DBG]${default} Server BIND_ADDR :" >&2 [[ -z "$ASN_SRV_BINDADDR" ]] && echo -en " ${green}not specified (default v4/v6)${default}" || echo -en " $ASN_SRV_BINDADDR" >&2 echo -en "\n- ${yellow}[DBG]${default} Server BIND_PORT : $ASN_SRV_BINDPORT" >&2 [[ "$ASN_SRV_BINDPORT" = "$DEFAULT_SERVER_BINDPORT" ]] && echo -en " ${green}(default)${default}" >&2 echo -e "\n- ${yellow}[DBG]${default} Ncat options : '${blue}${userinput}${default}'\n" >&2 fi CLOUD_SHELL_MARK="${red}❌ NO${default}" # fetch external IP and ASN to include in the HTML reports StatusbarMessage "Detecting host external IP and ASN" WhatIsMyIP if [ "$HAVE_IPV6" = true ]; then found_asn=$(docurl -s "https://stat.ripe.net/data/whois/data.json?resource=$local_wanip&sourceapp=nitefood-asn" | jq -r '.data.irr_records[0] | map(select(.key | match ("origin"))) | .[].value') WhoisASN "$found_asn" [[ -z "$ASN_SRV_BINDADDR" ]] && ASN_SRV_BINDADDR="$DEFAULT_SERVER_BINDADDR_v6" else LookupASNAndRouteFromIP "$local_wanip" [[ -z "$ASN_SRV_BINDADDR" ]] && ASN_SRV_BINDADDR="$DEFAULT_SERVER_BINDADDR_v4" fi if [ -z "$found_asn" ]; then found_asn="N/A" found_asname="(Unknown)" fi server_country=$(echo "${found_asname##*,}" | tr -d ' ') [[ -z "$server_country" ]] && server_country="(Unknown)" # prepare the server URL (for the JS bookmarklet) if [ "$ASN_SRV_BINDADDR" = "0.0.0.0" ] || [ "$ASN_SRV_BINDADDR" = "::" ]; then INTERNAL_ASNSERVER_ADDRESS="$local_wanip:$ASN_SRV_BINDPORT" else INTERNAL_ASNSERVER_ADDRESS="$ASN_SRV_BINDADDR:$ASN_SRV_BINDPORT" fi BOOKMARKLET_URL="http://${INTERNAL_ASNSERVER_ADDRESS}/asn_bookmarklet" # detect if we're running in Google Cloud Shell environment if [ "$GOOGLE_CLOUD_SHELL" = true ] && [ -n "$WEB_HOST" ]; then # on Google Cloud Shell, the $WEB_HOST environment variable contains the external hostname to reach the server # the format is https://- (cheers https://stackoverflow.com/a/70255668) INTERNAL_ASNSERVER_ADDRESS="${ASN_SRV_BINDPORT}-${WEB_HOST}" BOOKMARKLET_URL="https://${INTERNAL_ASNSERVER_ADDRESS}/asn_bookmarklet" CLOUD_SHELL_MARK="${green}✓ YES${default}" fi StatusbarMessage if [ "$HAVE_IPV6" = true ]; then [[ "$IS_HEADLESS" = true ]] && ipv6_mark="YES" || ipv6_mark="${green}✓ YES${default}" else [[ "$IS_HEADLESS" = true ]] && ipv6_mark="NO" || ipv6_mark="${red}❌ NO${default}" fi # properly show [IP]:PORT notation in case of IPv6 binding if grep -q ':' <<<"$ASN_SRV_BINDADDR"; then DISPLAY_ASN_SRV_BINDADDR="[${ASN_SRV_BINDADDR}]" else DISPLAY_ASN_SRV_BINDADDR="${ASN_SRV_BINDADDR}" fi # prepare API tokens status line [[ "$json_IQS_TOKEN" = true ]] && API_TOKENS_STATUS="${green}✓ IQS${default}" || API_TOKENS_STATUS="${red}❌ IQS${default}" [[ "$json_IPINFO_TOKEN" = true ]] && API_TOKENS_STATUS+=" • ${green}✓ IPINFO${default}" || API_TOKENS_STATUS+=" • ${red}❌ IPINFO${default}" [[ "$json_CLOUDFLARE_TOKEN" = true ]] && API_TOKENS_STATUS+=" • ${green}✓ CLOUDFLARE${default}" || API_TOKENS_STATUS+=" • ${red}❌ CLOUDFLARE${default}" echo -e "\n- Server ext. IP : ${blue}${local_wanip}${default}" \ "\n- Server Country : ${blue}${server_country}${default}" \ "\n- Server ASN : ${red}[AS${found_asn}]${default} ${green}$found_asname${default}" \ "\n- Server has IPv6 : ${ipv6_mark}" \ "\n- Running on GCP : ${CLOUD_SHELL_MARK}" \ "\n- API Tokens : ${API_TOKENS_STATUS}" \ "\n- Bookmarklet URL : ${BOOKMARKLET_URL}" \ "\n\n[$(date +"%F %T")] ${bluebg} INFO ${default} ASN Lookup Server listening on ${white}${DISPLAY_ASN_SRV_BINDADDR}:${ASN_SRV_BINDPORT}${default}" server_country="$(echo -e "$server_country" | tr '[:upper:]' '[:lower:]')" trap ' [[ "$IS_HEADLESS" = false ]] && echo -en "\r" >&2 echo -e "[$(date +"%F %T")] ${bluebg} INFO ${default} ASN Lookup Server requested shutdown, terminating..." >&2 exit 0 ' INT TERM #* Assemble the ncat listener command. Also pass on MONOCHROME/DEBUG preferences #* to the listener child (which will output to stderr on server's console) #* and print informations about client requests (1 child spawned per client connected) read -r -d '' ncat_cmd <<- END_OF_NCAT_CMD ncat -k -l $ASN_SRV_BINDADDR $ASN_SRV_BINDPORT $userinput --sh-exec " export NCAT_REMOTE_ADDR; \ found_asn=\"$found_asn\" \ found_asname=\"$found_asname\" \ server_country=\"$server_country\" \ INTERNAL_CONNHANDLER_CHILD=false \ INTERNAL_ASNSERVER_CONNHANDLER=true \ INTERNAL_ASNSERVER_ADDRESS="$INTERNAL_ASNSERVER_ADDRESS" \ MONOCHROME_MODE=\"$MONOCHROME_MODE\" \ ASN_DEBUG=\"$ASN_DEBUG\" \ \"$0\" " END_OF_NCAT_CMD #! Start the ncat listener and serve each client request #! by respawning the script with $INTERNAL_ASNSERVER_CONNHANDLER => true #! for every incoming connection eval "$ncat_cmd" PrintErrorAndExit "ERROR: The ncat server crashed or couldn't start. Try passing the '-v' option to see precisely what is being passed to it." } Ctrl_C() { if [ "$NO_ERROR_ON_INTERRUPT" = true ]; then StatusbarMessage tput sgr0 ShowMenu else PrintErrorAndExit "Interrupted" fi } BoxHeader() { # cheers https://unix.stackexchange.com/a/70616 # no output if in json mode [[ "$JSON_OUTPUT" = true ]] && return local message="$*" if [ "$IS_ASN_CHILD" = true ]; then echo "BOXHEADER $message" # the BOXHEADER tag will be transformed by the ASN server into an html element else if [ "$IS_HEADLESS" = true ]; then echo "// $message //" else echo -e "\n${white}╭─${message//?/─}─╮\n│ ${yellow}${message}${white} │\n╰─${message//?/─}─╯" tput sgr 0 fi fi } StatusbarMessage() { # invoke without parameters to delete the status bar message # suppress status bar displaying for headless, json or debug runs if [ "$IS_HEADLESS" = true ] || [ "$JSON_OUTPUT" = true ] || [ "$ASN_DEBUG" = true ]; then return fi if [ -n "$statusbar_message" ]; then # delete previous status bar message blank_line=$(printf "%.0s " $(seq "$terminal_width")) printf "\r%s\r" "$blank_line" >&2 fi if [ -n "$1" ]; then statusbar_message="$1" max_msg_size=$((terminal_width-23)) if [ "${#statusbar_message}" -gt "${max_msg_size}" ]; then statusbar_message="${lightgreybg}${statusbar_message:0:$max_msg_size}${lightgreybg}..." else statusbar_message="${lightgreybg}${statusbar_message}" fi statusbar_message+="${lightgreybg} (press CTRL-C to cancel)...${default}" echo -en "$statusbar_message" >&2 fi } WhatIsMyIP() { # only lookup local WAN IP once [[ -n "$local_wanip" ]] && return # retrieve local WAN IP (v6 takes precedence) from ifconfig.co local_wanip=$(docurl -s "https://ifconfig.co") # handle ifconfig.co serving a captcha-type redirect page (e.g. when coming from some AWS networks) grep -qE '^' <<<"$local_wanip" && local_wanip=$(docurl -s "https://api64.ipify.org") # check if we default to an IPv6 internet connection if echo "$local_wanip" | grep -q ':'; then HAVE_IPV6=true fi } DisableColors() { # avoid colors for headless server runs (e.g. systemd logs/status), # for listener runs, json output and monochrome mode if [ "$IS_HEADLESS" = true ] || [ "$JSON_OUTPUT" = true ]; then # disable all colors for headless/json green="" magenta="" yellow="" white="" blue="" red="" black="" lightyellow="" lightred="" lightblue="" lightgreybg="" bluebg="" redbg="" greenbg="" yellowbg="" dim="" default="" elif [ "$MONOCHROME_MODE" = true ]; then # set all colors to white, leave black/white/dim/default intact green="$white" magenta="$white" yellow="$white" blue="$white" red="$white" lightyellow="$white" lightred="$white" lightblue="$white" bluebg="$lightgreybg" redbg="$lightgreybg" greenbg="$lightgreybg" yellowbg="$lightgreybg" fi } CoreutilsFixup() { # check for GNU coreutils alternatives (improve command predictability on FreeBSD/MacOS systems) if [ -x "$(command -v gdate)" ]; then date() { gdate "$@"; } export -f date fi if [ -x "$(command -v gsed)" ]; then sed() { gsed "$@"; } export -f sed fi if [ -x "$(command -v gawk)" ]; then awk() { gawk "$@"; } export -f awk fi if [ -x "$(command -v gbase64)" ]; then base64() { gbase64 "$@"; } export -f base64 fi if [ -x "$(command -v gwc)" ]; then wc() { gwc "$@"; } export -f wc fi if [ "$IS_ASN_CHILD" = true ] || [ "$IS_ASN_CONNHANDLER" = true ]; then # suppress the tput command during headless runs tput() { :; } export -f tput fi } IpcalcVersionCheck() { # check for ipcalc version to accomodate both v1.0.0+ (CentOS/RHEL/Rocky 9) and v0.41+ (Debian derivatives) IPCALC_FLAG="" if [ "$HAVE_IPCALC" = true ]; then ipcalc_version=$(ipcalc -v | sed 's/ipcalc //') ipcalc_major=$(echo "$ipcalc_version" | cut -d '.' -f 1) case "${ipcalc_major}" in "0") ipcalc_minor=$(echo "$ipcalc_version" | cut -d '.' -f 2) if [ "$ipcalc_minor" -ge 5 ]; then IPCALC_FLAG="-r" else HAVE_IPCALC=false missing_tools+="\n - ipcalc" disabled_features+="\n - CIDR deaggregation (due to incompatible ipcalc version - v0.5+ for Debian-based or v1.0.0+ for RHEL-based required, but you have v$ipcalc_version)" fi ;; "1") IPCALC_FLAG="-d" ;; esac fi } IpcalcDeaggregate() { ipcalc_parm=$(tr -d ' ' <<<"$1") if grep -q "/" <<<"$1"; then # consider input to be already a CIDR block, return it unchanged echo "$1" else ipcalc ${IPCALC_FLAG} "$ipcalc_parm" 2>/dev/null | grep -iv "deaggregate" | awk '{print $NF}' | tail -n1 fi } CheckPrerequisites() { saveIFS="$IFS" IFS=' ' prerequisite_tools="jq whois host curl" # mandatory tools optional_tools="nmap mtr ipcalc grepcidr ncat aha" # optional tools missing_tools="" disabled_features="" HARD_FAIL=false HAVE_IPCALC=true HAVE_NMAP=true IXP_DETECTION=true UNABLE_TO_SERVE=false # BASH version check bash_major=$(echo "${BASH_VERSION}" | cut -d '.' -f 1) bash_minor=$(echo "${BASH_VERSION}" | cut -d '.' -f 2) bash_version_too_low=false if [ "$bash_major" -lt 4 ]; then bash_version_too_low=true elif [ "$bash_major" -eq 4 ] && [ "$bash_minor" -lt 2 ]; then bash_version_too_low=true fi [[ "$bash_version_too_low" = true ]] && PrintErrorAndExit "Error: BASH version must be >= 4.2 (you are running v${BASH_VERSION})" # Mandatory tools checking (hard fail if not found) for tool in $prerequisite_tools; do if [ -z "$(command -v "$tool")" ]; then missing_tools+="\n - $tool" HARD_FAIL=true fi done # Optional tools checking (no hard fail if not found, but some features disabled) for tool in $optional_tools; do if [ -z "$(command -v "$tool")" ]; then missing_tools+="\n - $tool" case "$tool" in "mtr") disabled_feat="AS path tracing" MTR_TRACING=false ;; "nmap") disabled_feat="Recon mode (Shodan scanning)" HAVE_NMAP=false ;; "ipcalc") disabled_feat="CIDR deaggregation" HAVE_IPCALC=false ;; "grepcidr") disabled_feat="IXP prefix detection" IXP_DETECTION=false ;; "ncat"|"aha") disabled_feat="ASN Lookup Server" UNABLE_TO_SERVE=true ;; esac disabled_features+="\n - ${disabled_feat}" fi done IpcalcVersionCheck if [ -n "$missing_tools" ]; then if [ "$JSON_OUTPUT" = false ]; then BoxHeader "! WARNING !" echo -e "\nThe following tools were not found on this system:" \ "${red} ${missing_tools}${default}" >&2 if [ -n "$disabled_features" ]; then echo -e "\nThe following features will be disabled:" \ "${yellow}${disabled_features}${default}" >&2 fi echo -e "\nPlease install the necessary prerequisite packages\nfor your system by following these instructions:" \ "\n\n>> ${blue}https://github.com/nitefood/asn#prerequisite-packages${default} <<\n" >&2 [[ "$HARD_FAIL" = true ]] && PrintErrorAndExit "Can not continue without (at least) the following tools: ${prerequisite_tools// /, }" read -srp "${lightgreybg}Press ENTER to continue...${default}" >&2 else [[ "$HARD_FAIL" = true ]] && PrintErrorAndExit "Can not continue without (at least) the following tools: ${prerequisite_tools// /, }" fi fi IQS_TOKEN="" ; json_IQS_TOKEN="false" IPINFO_TOKEN="" ; json_IPINFO_TOKEN="false" CLOUDFLARE_TOKEN="" ; json_CLOUDFLARE_TOKEN="false" IFS=$'\n' # Read tokens token from possible config files on disk for asn_config_file in $(tr ':' '\n' <<<"$IQS_TOKEN_FILES"); do if [ -r "$asn_config_file" ]; then IQS_TOKEN=$(tr -d ' \n\r\t' < "$asn_config_file") json_IQS_TOKEN="true" break fi done for asn_config_file in $(tr ':' '\n' <<<"$IPINFO_TOKEN_FILES"); do if [ -r "$asn_config_file" ]; then IPINFO_TOKEN=$(tr -d ' \n\r\t' < "$asn_config_file") json_IPINFO_TOKEN="true" break fi done for asn_config_file in $(tr ':' '\n' <<<"$CLOUDFLARE_TOKEN_FILES"); do if [ -r "$asn_config_file" ]; then CLOUDFLARE_TOKEN=$(tr -d ' \n\r\t' < "$asn_config_file") json_CLOUDFLARE_TOKEN="true" break fi done if [ "$JSON_OUTPUT" = false ]; then if [ -z "$IQS_TOKEN" ] || [ -z "$IPINFO_TOKEN" ] || [ -z "$CLOUDFLARE_TOKEN" ]; then # warn the user about the absence of external API token(s) if [ "$IS_HEADLESS" = true ]; then line="------------------------------------------------------------" echo -e "\n${line}\nWARNING: At least one external (free) API token is missing." \ "\nCheck https://github.com/nitefood/asn#api-tokens for\ninstructions on how to enable them." \ "\n${line}" >&2 else line="────────────────────────────────────────────────────" echo -e "\n${dim}${yellow}${line}" \ "\nWarning: ${dim}${yellow}At least one external API token is missing." \ "\nTo enable full script functionalities, please visit" \ "\n${blue}https://github.com/nitefood/asn#api-tokens" >&2 echo -e "${yellow}${line}${default}" >&2 fi fi if [ "$ASN_DEBUG" = true ]; then echo "" for token in "IQS_TOKEN" "IPINFO_TOKEN" "CLOUDFLARE_TOKEN"; do if [ -z "${!token}" ]; then DebugPrint "${dim}${white}$token: ${red}❌ NOT FOUND${white}" else DebugPrint "${dim}${white}$token: ${green}✓ OK${white}" fi done fi fi CoreutilsFixup IFS="$saveIFS" } DebugPrint(){ # Debug print helper function. will display debug string in terminal mode, or full client debug info in headless mode. Will append output to $ASN_LOGFILE logfile if [ "$ASN_DEBUG" = true ]; then # strip CRLFs dbgstring=$(echo -e "$1" | tr -d '\r\n') if [ "$IS_HEADLESS" = false ]; then # command line tool mode echo -e "${default}[$(date +'%F %T')] ${lightgreybg} DEBUG ${default} $dbgstring" >&2 echo -e "\n────────────────────────────────────────────────────────────────────────────────────────────────────────" \ "\n>>>> ${default}[$(date +'%F %T')] ${lightgreybg} DEBUG ${default} $dbgstring" >> "$ASN_LOGFILE" else # server mode if [ -z "$host" ]; then target='N/A' else target="$host" fi if [ -z "$reqid" ]; then requestid='N/A' else requestid="$reqid" fi echo -e "${default}[$(date +'%F %T')] ${lightgreybg} DEBUG ${default} $dbgstring [CLIENT: ${yellow}$NCAT_REMOTE_ADDR${default}, " \ "TARGET: ${magenta}$target${default}, REQID: ${blue}$requestid${default}]" >&2 \ echo -e "${default}[$(date +'%F %T')] ${lightgreybg} DEBUG ${default} $dbgstring [CLIENT: ${yellow}$NCAT_REMOTE_ADDR${default}, " \ "TARGET: ${magenta}$target${default}, REQID: ${blue}$requestid${default}]" >> "$ASN_LOGFILE" fi fi } StripAnsi() { # portable ANSI colors strip helper fn (for termbin sharing) - cheers https://unix.stackexchange.com/a/140255 # shellcheck disable=SC2001 echo -e "$1" | sed "s,$(printf '\033')\\[[0-9;]*[a-zA-Z],,g" } HandleNcatClientConnection() { # ╭─────────────────────────────────────╮ # │ MAIN NCAT CLIENT CONNECTION HANDLER │ # ╰─────────────────────────────────────╯ DisableColors # HTTP response headers http_ok='HTTP/1.0 200 OK\r\nContent-Type: text/html\r\n\r\n' http_ok_json='HTTP/1.0 200 OK\r\nContent-Type: application/json\r\n' http_ko='HTTP/1.0 400\r\nContent-Type: text/html\r\n\r\n

400 Bad Request

\nYour request could not be processed.' http_ko_json='HTTP/1.0 400\r\nContent-Type: application/json\r\n' # Javascript bookmarklet js_bookmarklet='javascript:(function(){var asnserver="'"${INTERNAL_ASNSERVER_ADDRESS}"'",target=window.location.hostname,' js_bookmarklet+='width=screen.width-screen.width/7,height=screen.height-screen.height/4,left=window.innerWidth/2-width/2,top=window.innerHeight/2-height/2;' js_bookmarklet+='window.open("http://"+asnserver+"/asn_lookup&"+target,"newWindow","width="+width+",height="+height+",top="+top+",left="+left)})();' # HTML bookmarklet page read -r -d '' html_bookmarklet_page <<- END_OF_BOOKMARKLET_HTML [ASN] Server configuration (client-side)
		


- ASN Server // Browser Integration - 1) Drag and drop the yellow link below to your bookmarks toolbar: ASN LOOKUP 2) Close this page and click the new bookmark while viewing any website. A lookup and trace for that host should start in a pop-up window.

ASN Lookup Server v${ASN_VERSION} running on $HOSTNAME
END_OF_BOOKMARKLET_HTML # HTML opening tags # shellcheck disable=SC2016 html_header=' [ASN] Lookup
	'

	# HTML closing tags
	html_footer='
		
		
	
	'

	CoreutilsFixup

	# a client just connected
	echo -e "[$(date +'%F %T')] ${bluebg}  INFO   ${default} Incoming connection by client ${yellow}$NCAT_REMOTE_ADDR${default}" >&2

	# read input from client (URL being accessed through the client browser)
	read -r line
	DebugPrint "RECEIVED new client request: '$line'"

	# handle 'asn_bookmarklet' command. This will show a web page for easy dragging&dropping of the bookmarklet to the favorites toolbar
	if (echo -e "$line" | grep -Eq "^GET /asn_bookmarklet[?& ]"); then
		DebugPrint "SERVING bookmarklet page to client"
		echo -e "${http_ok}${html_bookmarklet_page}"

	# handle 'termbin_share' command. This will decode the input and send it to termbin, returning a html link to the client
	elif (echo -e "$line" | grep -Eq "^GET /termbin_share&"); then
		# send HTTP 200 OK header to client
		echo -e "${http_ok}"
		# decode input data
		input_data="$(echo -e "$line" | cut -d '&' -f 2- | cut -d ' ' -f 1 | base64 -d | gunzip)"
		termbin_url=""
		if [ -n "$input_data" ]; then
			# share on termbin.com and output the url to the client
			termbin_url="$(echo -e "$input_data" | timeout 10 ncat termbin.com 9999 | tr -d '\r\n\0')" # sanitize trailing characters in termbin output link
		fi
		# analyze the sharing attempt result
		if [ -n "$termbin_url" ]; then
			# sharing successful, output the link
			echo -e "${termbin_url}\n"
			# log the successful sharing attempt
			echo -e "[$(date +'%F %T')] ${greenbg} SHAREOK ${default} Successfully shared termbin link ${blue}${termbin_url}${default} with client ${yellow}$NCAT_REMOTE_ADDR${default}" >&2
			exit 0
		elif [ -z "$input_data" ]; then
			sharefailreason="invalid input data"
		elif [ -z "$termbin_url" ]; then
			sharefailreason="termbin.com error"
		else
			sharefailreason="unknown reason"
		fi
		# log the failed sharing attempt
		echo -e "[$(date +'%F %T')] ${redbg} SHAREKO ${default} Error sharing termbin link with client ${yellow}$NCAT_REMOTE_ADDR${default} ($sharefailreason)" >&2
		# close the client connection without any output
		exit 1

	elif (grep -Eq "^GET /asn_lookup(_json[p]?)?&" <<<"$line"); then
		OUTPUT_TYPE=""
		starttime=$(date +'%s')
		startdatetime=$(date +'%F %T')
		reportdatetime=$(date +'%F %T (%Z)')

		# detect output method requested by the user
		method=$(echo -e "$line" | cut -d '&' -f 1)
		if grep -q "jsonp" <<<"$method"; then
			# user requested pretty-print json output from the server
			OUTPUT_TYPE="jsonp"
		elif grep -q "json" <<<"$method"; then
			# user requested compact json output from the server
			OUTPUT_TYPE="json"
		fi
		# handle 'asn_lookup' command. This will start a lookup for the target.
		# 1) trim down target length to at most 100 characters
		# 2) extract target from client request (cut on ampersand first, space next, e.g.: 'GET /asn_lookup&google.com HTTP/1.1' should return google.com)
		# 3) convert any '%3A' to ':' (for IPv6 searches)
		# 4) convert any '%2F' to '/' (for URL searches)
		# 5) strip leading slash (/google.com -> google.com)
		# 6) sanitize by stripping any character except letters, numbers, and dot/colon/dash [.:-])
		target=$(echo -e "$line" | cut -d '&' -f 2- | awk -F' ' '{print substr($1,0,100)}')
		host=$(echo -e "$target" | sed -e 's/%3[aA]/:/g' -e 's/%2[fF]/\//g' -e 's/^\///' -e 's/[^0-9a-zA-Z/\.:-]//g')

		# validate target by checking if it's an:
		# 1) IP address
		# 2) hostname (beginning with any alphanumeric character and including at least one dot '.')
		# 3) AS number (with or without case-insensitive 'AS' prefix)
		is_valid_target=$(echo -e "$host" | grep -Eo "$ipv4v6regex|^([0-9a-zA-Z])+.*\.|^([aA][sS])?[0-9]{1,6}$")

		# avoid invalid/malformed targets
		if [ -n "$is_valid_target" ]; then
			# this is a valid client request, set up request identifier
			reqid=$(date +'%N')

			echo -e "[$startdatetime] ${yellowbg} STARTED ${default} Lookup request by client ${yellow}$NCAT_REMOTE_ADDR${default} for target ${magenta}$host${default} (Request ID: ${blue}$reqid${default})" >&2

			if [ "$OUTPUT_TYPE" = "json" ] || [ "$OUTPUT_TYPE" = "jsonp" ]; then
				# send HTTP 200 OK response with JSON Content-Type
				echo -e "${http_ok_json}"
			else
				# send HTTP 200 OK response and display HTML headers
				echo -e "${http_ok}${html_header}"
			fi
			countryname=$(docurl -m2 -s "https://restcountries.com/v3.1/alpha/$server_country" | jq -r '.[].name.common')
			if [ -n "$countryname" ] && [ "$countryname" != "null" ]; then
				title_tag="title='$countryname'"
			else
				title_tag=""
			fi

			if [ -z "$OUTPUT_TYPE" ]; then
				# default HTML mode, display ASN server info table
				echo -e "
					
ASN Lookup Server Report
Lookup Target Client IP Lookup Server ASN Lookup Server Hostname Date and time
$host $NCAT_REMOTE_ADDR $found_asn ($found_asname ) $HOSTNAME $reportdatetime

" fi # spawn ASN child and output results to the client DebugPrint "Spawning child" INTERNAL_CONNHANDLER_CHILD=true INTERNAL_ASNSERVER_CONNHANDLER=false INTERNAL_ASNSERVER_OUTPUT_TYPE="$OUTPUT_TYPE" TERM=dumb "$0" "$host" | { #* ╭──────────────────────────────────────────────╮ #* │ Main child output parsing loop command group │ #* ╰──────────────────────────────────────────────╯ if [ -n "$OUTPUT_TYPE" ]; then # JSON/JSONP mode while IFS= read -r outputline; do echo -e "$outputline" done else # HTML mode TextHeader() { headerwidth="150" padchar="_" padding="$(printf '%0.1s' ' '{1..500})" printf "%${headerwidth}s\n\n" "" | tr " " "${padchar}" printf '%*.*s %s %*.*s\n' 0 "$(((headerwidth-2-${#1})/2))" "$padding" "$1" 0 "$(((headerwidth-1-${#1})/2))" "$padding" printf "%${headerwidth}s\n" "" | tr " " "${padchar}" } # initialize textual trace log variable tracedata="" while IFS= read -r outputline; do # DebugPrint "[$reqid] CHILD OUTPUT: $outputline" "false" if echo -e "$outputline" | grep -Eq "^BOXHEADER "; then # child instance is displaying a BoxHeader, transform it into a message=$(echo -e "$outputline" | cut -d ' ' -f 2-) echo -e "
$message
\n" # append boxheader to tracefile # tracedata+="$(BoxHeader "$message")\n" tracedata+=$(TextHeader "[ $message ]") tracedata+="\n\n" if echo -e "$message" | grep -q "Trace to "; then echo -e "" fi elif grep -Eq "#COUNTRYCODE" <<<"$outputline"; then # child instance is indicating a country code for the current IP, map it to a SVG and display it on the HTML report # alternative URLs for the flag SVGs: # JSdelivr CDN: https://cdn.jsdelivr.net/npm/flag-icon-css/flags/4x3/$cc.svg # Github Pages: https://lipis.github.io/flag-icon-css/flags/4x3/$cc.svg # CloudflareJS: https://cdnjs.cloudflare.com/ajax/libs/flag-icon-css/3.5.0/flags/4x3/$cc.svg cc=$(echo -e "$outputline" | awk '{print $NF}') # lookup full country name countryname=$(docurl -m2 -s "https://restcountries.com/v3.1/alpha/$cc" | jq -r '.[].name.common') if [ -n "$countryname" ] && [ "$countryname" != "null" ]; then title_tag="title='$countryname'" else title_tag="" fi target_flag_img="" outputline=$(echo -e "$outputline" | sed -e 's/#COUNTRYCODE.*//') tracedata+="$outputline\n" outputline=$(echo -e "$outputline" | tr -d '\r\n' | aha -b -n) printf "%s%s\n" "$outputline" "$target_flag_img" elif grep -Eq "]*>/,""); print }' <<<"$outputline") tracedata+="$href_stripped\n" printf "%s\n" "$outputline" | aha -b -n | sed -e 's/<//g' -e 's/"/"/g' -e 's/&/&/g' elif grep -Eq "#PERFORMWHOIS" <<<"$outputline"; then # child instance is asking us to perform a whois lookup for the target domain # and show it to the user in the form of a tooltip targetdomain=$(echo -e "$outputline" | awk '{print $NF}') targetdomaindata=$(whois -H "$targetdomain" | grep -Ev "^\*") hostio_href="host.io" hostio_link=" $hostio_href🔗" read -r -d '' whoisbutton <<- END_OF_MODAL_HTML END_OF_MODAL_HTML outputline=$(echo -e "$outputline" | sed -e 's/#PERFORMWHOIS.*//') tracedata+="${outputline}\n" outputline+="($whoisbutton•$hostio_link )" printf "%s\n" "$outputline" | aha -b -n | sed -e 's/<//g' -e 's/"/"/g' -e 's/&/&/g' else tracedata+="$outputline\n" printf "%s\n" "$outputline" | aha -b -n fi printf "" # cheers https://stackoverflow.com/a/55471426 done #* Child asn instance finished, finish up the html # append text footer to tracedata (will only be displayed when sharing results on termbin) footermsg="Report generated by ASN Lookup Server v${ASN_VERSION} (https://github.com/nitefood/asn) on $reportdatetime" footermsglen=${#footermsg} footerhr=$(printf "%150s" "" | tr " " "_") tracedata+="$(printf "%s\n\n%*s\n" "$footerhr" $(((footermsglen+150)/2)) "$footermsg")" # print closing headers endtime=$(date +%s) runtime=$((endtime-starttime)) echo -e "
" \ "
" \ "" \ "Generated by ASN Lookup Server v${ASN_VERSION} running on $HOSTNAME in ${runtime}s (request ID: $reqid)" \ "" \ "

" \ "" \ "What is that?" \ "\n" \ "
" \ "" # create a hidden div with the gzipped+b64 encoded trace output for optional termbin sharing tracedata_encoded="$(StripAnsi "$tracedata" | gzip | base64 -w0)" echo -e "" echo -e "$html_footer" fi } # end the ncat client connection for this request ID childretval="$?" if [ "$childretval" -eq 0 ]; then DebugPrint "Child process completed successfully" [[ -z "$OUTPUT_TYPE" ]] && echo -e "" # log lookup request completed on server side echo -e "[$(date +"%F %T")] ${greenbg}COMPLETED${default} Lookup request by client ${yellow}$NCAT_REMOTE_ADDR${default} for target ${magenta}$host${default} (Request ID: ${blue}$reqid${default})" >&2 else DebugPrint "Child process failed with exit code $childretval (SIG$(kill -l $childretval))" echo -e "[$(date +"%F %T")] ${redbg} FAILED ${default} Lookup request by client ${yellow}$NCAT_REMOTE_ADDR${default} for target ${magenta}$host${default} failed with exit code $childretval (SIG$(kill -l $childretval)) (Request ID: ${blue}$reqid${default})" >&2 fi if [ "$childretval" -eq 141 ]; then DebugPrint "Client has gone away?" fi else # send HTTP code 400 Bad Request to client if [ "$JSON_OUTPUT" = false ]; then echo -e "$http_ko" else echo -e "$http_ko_json" userinput="$host" json_resultcount=0 final_json_output="" PrintErrorAndExit "bad request" fi # log lookup request ignored on server side echo -e "[$(date +"%F %T")] ${redbg} IGNORED ${default} Malformed data in request by client ${yellow}$NCAT_REMOTE_ADDR${default} for target ${magenta}$host${default}" >&2 fi else # ignore spurious browser requests (e.g. 'GET /favicon.ico') or scans. # Log the bad request on server side, sleep for 1 second, and drop the connection reqbytes=$(echo -e "$line" | awk -F$'\r\n' '{print substr($1,0,30)}') echo -e "[$(date +"%F %T")] ${redbg} IGNORED ${default} Ignored request by client ${yellow}$NCAT_REMOTE_ADDR${default}. Request data (first 30 bytes): ${red}${reqbytes}${default}" >&2 sleep 1 fi } #! ╭───────────────────────╮ #! │ Main asn script start │ #! ╰───────────────────────╯ IFS=$'\n\t' # Color scheme green=$'\e[38;5;035m' magenta=$'\e[38;5;207m' yellow=$'\e[38;5;142m' white=$'\e[38;5;007m' blue=$'\e[38;5;038m' red=$'\e[38;5;203m' black=$'\e[38;5;016m' lightyellow=$'\e[38;5;220m' lightred=$'\e[38;5;167m' lightblue=$'\e[38;5;109m' lightgreybg=$'\e[48;5;252m'${black} bluebg=$'\e[48;5;038m'${black} redbg=$'\e[48;5;210m'${black} greenbg=$'\e[48;5;035m'${black} yellowbg=$'\e[48;5;142m'${black} dim=$'\e[2m' underline=$'\e[4m' default=$'\e[0m' # HTML color codes matching ANSI colors used by the script htmlwhite="#cccccc" htmlblack="#1e1e1e" htmllightgray="#d5d5d5" htmlred="#ff5f5f" htmldarkred="#b74d4d" htmlblue="#00afd7" htmlyellow="#afaf00" htmlgreen="#00af5f" htmldarkgreen="#058505" htmlmagenta="#ff5fff" [[ "$TERM" = "dumb" ]] && IS_HEADLESS=true || IS_HEADLESS=false ipv4v6regex='[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}|(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|'\ '([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|'\ '([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|'\ ':((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|'\ '(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|'\ '1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))' # cheers https://stackoverflow.com/a/17871737 # Get terminal width. If running headless in a child instance (or in server mode) set it to a "sane" value to display appropriate HTML spacing in reports if [ "$IS_HEADLESS" = true ]; then terminal_width=233 else terminal_width=$(tput cols) trap 'terminal_width=$(tput cols)' SIGWINCH fi # Check if this script instance was launched by the ASN server connection handler if [ "$INTERNAL_CONNHANDLER_CHILD" = true ]; then IS_ASN_CHILD=true else IS_ASN_CHILD=false fi # Check if this script instance was spawned by the ncat listener if [ "$INTERNAL_ASNSERVER_CONNHANDLER" = true ]; then IS_ASN_CONNHANDLER=true HandleNcatClientConnection exit 0 else IS_ASN_CONNHANDLER=false fi # External API tokens for ipqualityscore.com (IP reputation & threat analisys lookup), # ipinfo.io (IP geolocation lookup) and Cloudflare Radar (BGP hijacks and route leaks historical data) # Files will be parsed in the order they are declared (first path found takes precedence) IQS_TOKEN_FILES="$HOME/.asn/iqs_token:/etc/asn/iqs_token" IPINFO_TOKEN_FILES="$HOME/.asn/ipinfo_token:/etc/asn/ipinfo_token" CLOUDFLARE_TOKEN_FILES="$HOME/.asn/cloudflare_token:/etc/asn/cloudflare_token" # SIGINT trapping NO_ERROR_ON_INTERRUPT=false trap Ctrl_C INT # PeeringDB list of IXP prefixes peeringdb_dataset="" peeringdb_ipv6_dataset="" # Well known ports list [[ -r "/etc/services" ]] && WELL_KNOWN_PORTS=$(cat /etc/services) || WELL_KNOWN_PORTS="" # default options (configurable via $HOME/.asnrc) MTR_TRACING=true ADDITIONAL_INETNUM_LOOKUP=true DETAILED_TRACE=false MTR_ROUNDS=5 MAX_CONCURRENT_SHODAN_REQUESTS=10 SHODAN_SHOW_TOP_N=5 MONOCHROME_MODE=false ASN_DEBUG=false JSON_OUTPUT=false JSON_PRETTY=false DEFAULT_SERVER_BINDADDR_v4="127.0.0.1" DEFAULT_SERVER_BINDADDR_v6="::1" DEFAULT_SERVER_BINDPORT="49200" IQS_ALWAYS_QUERY=false IQS_CUSTOM_SETTINGS="" # e.g. "strictness=1&allow_public_access_points=false" - see https://www.ipqualityscore.com/documentation/proxy-detection/overview -> "Note About Front End IP Lookups" declare -A PEERINGDB_CACHED_DATASETS declare -A PEERINGDB_CACHED_IXP_DATA declare -A CAIDARANK_CACHED_AS_DATA # to build the CC->CountryName mapping cache: # COUNTRY_MAP_CACHE=$(docurl -m4 -s "https://restcountries.com/v3.1/all?fields=name,cca2" | jq -c 'map({(.cca2): .name.common}) | add | to_entries | sort_by(.key) | from_entries') COUNTRY_MAP_CACHE='{"AD":"Andorra","AE":"United Arab Emirates","AF":"Afghanistan","AG":"Antigua and Barbuda","AI":"Anguilla","AL":"Albania","AM":"Armenia","AO":"Angola", "AQ":"Antarctica","AR":"Argentina","AS":"American Samoa","AT":"Austria","AU":"Australia","AW":"Aruba","AX":"Åland Islands","AZ":"Azerbaijan","BA":"Bosnia and Herzegovina", "BB":"Barbados","BD":"Bangladesh","BE":"Belgium","BF":"Burkina Faso","BG":"Bulgaria","BH":"Bahrain","BI":"Burundi","BJ":"Benin","BL":"Saint Barthélemy","BM":"Bermuda", "BN":"Brunei","BO":"Bolivia","BQ":"Caribbean Netherlands","BR":"Brazil","BS":"Bahamas","BT":"Bhutan","BV":"Bouvet Island","BW":"Botswana","BY":"Belarus","BZ":"Belize","CA":"Canada", "CC":"Cocos (Keeling) Islands","CD":"DR Congo","CF":"Central African Republic","CG":"Republic of the Congo","CH":"Switzerland","CI":"Ivory Coast","CK":"Cook Islands","CL":"Chile", "CM":"Cameroon","CN":"China","CO":"Colombia","CR":"Costa Rica","CU":"Cuba","CV":"Cape Verde","CW":"Curaçao","CX":"Christmas Island","CY":"Cyprus","CZ":"Czechia","DE":"Germany", "DJ":"Djibouti","DK":"Denmark","DM":"Dominica","DO":"Dominican Republic","DZ":"Algeria","EC":"Ecuador","EE":"Estonia","EG":"Egypt","EH":"Western Sahara","ER":"Eritrea","ES":"Spain", "ET":"Ethiopia","FI":"Finland","FJ":"Fiji","FK":"Falkland Islands","FM":"Micronesia","FO":"Faroe Islands","FR":"France","GA":"Gabon","GB":"United Kingdom","GD":"Grenada", "GE":"Georgia","GF":"French Guiana","GG":"Guernsey","GH":"Ghana","GI":"Gibraltar","GL":"Greenland","GM":"Gambia","GN":"Guinea","GP":"Guadeloupe","GQ":"Equatorial Guinea", "GR":"Greece","GS":"South Georgia","GT":"Guatemala","GU":"Guam","GW":"Guinea-Bissau","GY":"Guyana","HK":"Hong Kong","HM":"Heard Island and McDonald Islands","HN":"Honduras", "HR":"Croatia","HT":"Haiti","HU":"Hungary","ID":"Indonesia","IE":"Ireland","IL":"Israel","IM":"Isle of Man","IN":"India","IO":"British Indian Ocean Territory","IQ":"Iraq", "IR":"Iran","IS":"Iceland","IT":"Italy","JE":"Jersey","JM":"Jamaica","JO":"Jordan","JP":"Japan","KE":"Kenya","KG":"Kyrgyzstan","KH":"Cambodia","KI":"Kiribati","KM":"Comoros", "KN":"Saint Kitts and Nevis","KP":"North Korea","KR":"South Korea","KW":"Kuwait","KY":"Cayman Islands","KZ":"Kazakhstan","LA":"Laos","LB":"Lebanon","LC":"Saint Lucia", "LI":"Liechtenstein","LK":"Sri Lanka","LR":"Liberia","LS":"Lesotho","LT":"Lithuania","LU":"Luxembourg","LV":"Latvia","LY":"Libya","MA":"Morocco","MC":"Monaco","MD":"Moldova", "ME":"Montenegro","MF":"Saint Martin","MG":"Madagascar","MH":"Marshall Islands","MK":"North Macedonia","ML":"Mali","MM":"Myanmar","MN":"Mongolia","MO":"Macau", "MP":"Northern Mariana Islands","MQ":"Martinique","MR":"Mauritania","MS":"Montserrat","MT":"Malta","MU":"Mauritius","MV":"Maldives","MW":"Malawi","MX":"Mexico","MY":"Malaysia", "MZ":"Mozambique","NA":"Namibia","NC":"New Caledonia","NE":"Niger","NF":"Norfolk Island","NG":"Nigeria","NI":"Nicaragua","NL":"Netherlands","NO":"Norway","NP":"Nepal","NR":"Nauru", "NU":"Niue","NZ":"New Zealand","OM":"Oman","PA":"Panama","PE":"Peru","PF":"French Polynesia","PG":"Papua New Guinea","PH":"Philippines","PK":"Pakistan","PL":"Poland", "PM":"Saint Pierre and Miquelon","PN":"Pitcairn Islands","PR":"Puerto Rico","PS":"Palestine","PT":"Portugal","PW":"Palau","PY":"Paraguay","QA":"Qatar","RE":"Réunion","RO":"Romania", "RS":"Serbia","RU":"Russia","RW":"Rwanda","SA":"Saudi Arabia","SB":"Solomon Islands","SC":"Seychelles","SD":"Sudan","SE":"Sweden","SG":"Singapore", "SH":"Saint Helena, Ascension and Tristan da Cunha","SI":"Slovenia","SJ":"Svalbard and Jan Mayen","SK":"Slovakia","SL":"Sierra Leone","SM":"San Marino","SN":"Senegal","SO":"Somalia", "SR":"Suriname","SS":"South Sudan","ST":"São Tomé and Príncipe","SV":"El Salvador","SX":"Sint Maarten","SY":"Syria","SZ":"Eswatini","TC":"Turks and Caicos Islands","TD":"Chad", "TF":"French Southern and Antarctic Lands","TG":"Togo","TH":"Thailand","TJ":"Tajikistan","TK":"Tokelau","TL":"Timor-Leste","TM":"Turkmenistan","TN":"Tunisia","TO":"Tonga", "TR":"Turkey","TT":"Trinidad and Tobago","TV":"Tuvalu","TW":"Taiwan","TZ":"Tanzania","UA":"Ukraine","UG":"Uganda","UM":"United States Minor Outlying Islands","US":"United States", "UY":"Uruguay","UZ":"Uzbekistan","VA":"Vatican City","VC":"Saint Vincent and the Grenadines","VE":"Venezuela","VG":"British Virgin Islands","VI":"United States Virgin Islands", "VN":"Vietnam","VU":"Vanuatu","WF":"Wallis and Futuna","WS":"Samoa","XK":"Kosovo","YE":"Yemen","YT":"Mayotte","ZA":"South Africa","ZM":"Zambia","ZW":"Zimbabwe"}' #* #* Parse command line options #* if [[ $# -lt 1 ]]; then PrintUsage exit 1 fi # read optional preferences from "$HOME/.asnrc" (only for non-headless runs) if [ "$IS_HEADLESS" = false ]; then rcfile="/${HOME}/.asnrc" if [ -r "$rcfile" ]; then # shellcheck disable=SC1090 . "$rcfile" fi fi status_json_output="ok" reason_json_output="success" json_request_time=$(date "+%Y-%m-%dT%H:%M:%S") starttime="$(date +%s)" final_json_output="" json_target_type="unknown" json_resultcount=0 # optspec contains: # - options followed by a colon: parameter is mandatory # - first colon: disable getopts' own error reporting # in this mode, getopts sets optchar to: # '?' -> unknown option # ':' -> missing mandatory parameter to the option optspec=":hvmljJsgn:t:d:o:a:c:u:" FORCE_ORGSEARCH=false SUGGEST_SEARCH=false SERVER_MODE=false RECON_MODE=false COUNTRY_BLOCK_MODE=false GEOLOCATE_ONLY_MODE=false BGP_UPSTREAM_MODE=false OPTIONS_PRESENT=false # will be set to true if getopts enters the loop (detects an option being passed) while getopts "$optspec" optchar; do { GetFullParamsFromCurrentPosition(){ # # Helper function that retrieves all the command line # parameters starting from position $OPTIND (current # option's argument as being parsed by getopts) # # 1) first param is set to current option's param (space-separated) # 2) then append (if any exist) the following command line params. # # this allows for invocations such as 'asn -o NAME WITH SPACES' # without having to quote NAME WITH SPACES # The function requires passing the original $@ as parameter # so as to not confuse it with the function's own $@. # # in the above example, $OPTARG="NAME", $OPTIND="3", ${@:$OPTIND}=array("WITH" "SPACES") # userinput="$OPTARG" for option in "${@:$OPTIND}"; do userinput+=" $option" done } userinput="" OPTIONS_PRESENT=true case "${optchar}" in "n") MTR_TRACING=false ADDITIONAL_INETNUM_LOOKUP=false GetFullParamsFromCurrentPosition "$@" ;; "t") MTR_TRACING=true GetFullParamsFromCurrentPosition "$@" ;; "d") MTR_TRACING=true DETAILED_TRACE=true GetFullParamsFromCurrentPosition "$@" ;; "o") FORCE_ORGSEARCH=true GetFullParamsFromCurrentPosition "$@" ;; "a") SUGGEST_SEARCH=true GetFullParamsFromCurrentPosition "$@" ;; "s") MTR_TRACING=false RECON_MODE=true GetFullParamsFromCurrentPosition "$@" ;; "c") COUNTRY_BLOCK_MODE=true GetFullParamsFromCurrentPosition "$@" ;; "g") GEOLOCATE_ONLY_MODE=true GetFullParamsFromCurrentPosition "$@" ;; "u") BGP_UPSTREAM_MODE=true GetFullParamsFromCurrentPosition "$@" ;; "m") MONOCHROME_MODE=true ;; "j") MTR_TRACING=false # json output for mtr traces not implemented yet JSON_OUTPUT=true # GetFullParamsFromCurrentPosition "$@" ;; "J") MTR_TRACING=false # json output for mtr traces not implemented yet JSON_OUTPUT=true JSON_PRETTY=true # GetFullParamsFromCurrentPosition "$@" ;; "l") SERVER_MODE=true argcount=1 for option in "${@:$OPTIND}"; do # one ore more parameters were passed to -l, check if # is an IP, PORT or "IP PORT" pair. # pass the rest of the command line as direct # ncat arguments. # Not passing everything directly (-l has the same # functionality in ncat) because asn should have more # sensible defaults (i.e. bind to 127.0.0.1 and not # 0.0.0.0 by default). # handle special case (if invocation was 'asn -l -v ' instead of 'asn -v -l ') [[ "$option" = "-v" ]] && { ASN_DEBUG=true; continue; } if [ "$argcount" -eq 1 ]; then listen_opt1="$option" [[ "${listen_opt1:0:1}" = "-" ]] && { listen_opt1=""; userinput+=" $option"; argcount=99999; } # next lines will be treated as ncat options} elif [ "$argcount" -eq 2 ]; then listen_opt2="$option" [[ "${listen_opt2:0:1}" = "-" ]] && { listen_opt2=""; userinput+=" $option"; argcount=99999; } # next lines will be treated as ncat options} else userinput+=" $option" fi (( argcount++ )) done # analyze the args ASN_SRV_BINDADDR="" ASN_SRV_BINDPORT="" for passedarg in $listen_opt1 $listen_opt2; do if grep -Eq ":|\." <<<"$passedarg"; then # it's an IP address ASN_SRV_BINDADDR="$passedarg" else # it's a port ASN_SRV_BINDPORT="$passedarg" fi done # fallback to default port if none was passed. # The rest is already in $userinput, ncat will use it for its own args [[ -z "$ASN_SRV_BINDPORT" ]] && ASN_SRV_BINDPORT="$DEFAULT_SERVER_BINDPORT" # trim the leading whitespace from ncat options userinput="${userinput#' '}" # '-l' must be the last option (since it includes optional args), bail the getopts loop break ;; "h") PrintUsage exit 0 ;; "v") ASN_DEBUG=true rm -f "$ASN_LOGFILE" 2>/dev/null ;; *) if [ "$OPTERR" = 1 ] && [ -t 0 ]; then [[ "$optchar" = "?" ]] && PrintUsage "Error: unknown option '-$OPTARG'" [[ "$optchar" = ":" ]] && PrintUsage "Error: option '-$OPTARG' requires an argument" exit 1 fi ;; esac } done if [ -t 0 ] || [ "$IS_HEADLESS" = true ]; then # input is from the terminal (or this is a headless instance) if [ "$OPTIONS_PRESENT" = false ]; then # shellcheck disable=SC2124 userinput="$@" elif [ -z "$userinput" ] && [ "$SERVER_MODE" = false ]; then # an option was passed, but it was not one that requires a param (e.g. -m for monochrome mode) # fetch the actual target GetFullParamsFromCurrentPosition "$@" fi else # script was invoked with input from stdin (e.g. "asn -s < iplist") # read IP list from stdin and trim blanks and comments userinput=$(cat | grep -Ev '^$|^[ \t]*#') fi # trim leading whitespace from userinput userinput=$(awk '{ sub(/^[ \t]+/, ""); print }' <<<"$userinput") # trim trailing newline from userinput userinput=$(echo -en "$userinput") # check if we still don't have a target if [ -z "$userinput" ] && [ "$SERVER_MODE" = false ]; then PrintUsage "Error: no target specified" fi [[ "$MONOCHROME_MODE" = true ]] && DisableColors # options consistency check: # enable JSON_OUTPUT if the user has JSON_PRETTY=true in the preferences file # also explicitly disable tracing if the user forgot to pass "-n" [[ "$JSON_PRETTY" = true ]] && JSON_OUTPUT=true [[ "$JSON_OUTPUT" = true ]] && MTR_TRACING=false #* Check prerequisite and optional tools CheckPrerequisites local_wanip="" HAVE_IPV6=false if [ "$SERVER_MODE" = true ]; then # user passed the "-l" switch [[ "$UNABLE_TO_SERVE" = true ]] && PrintErrorAndExit "ERROR: Can not start the listening server. Please install the necessary tools." AsnServerListener fi # handle output type for child instances # supported output types: json, jsonp if [ -n "$INTERNAL_ASNSERVER_OUTPUT_TYPE" ]; then MTR_TRACING=false case "${INTERNAL_ASNSERVER_OUTPUT_TYPE}" in "json") JSON_OUTPUT=true ;; "jsonp") JSON_OUTPUT=true JSON_PRETTY=true ;; esac fi if [ "$RECON_MODE" = true ]; then # user passed the "-s" switch final_json_output="[]" [[ "$HAVE_NMAP" = false ]] && PrintErrorAndExit "Nmap is required to use -s, but not available on this system" # launch Shodan scan ShodanRecon if [ "$JSON_OUTPUT" = true ]; then json_resultcount=$(jq '. | length' <<<"$shodan_json_output") final_json_output="$shodan_json_output" PrintJsonOutput else tput sgr0 fi exit 0 fi if [ "$GEOLOCATE_ONLY_MODE" = true ]; then # user passed the "-g" switch final_json_output="[]" # launch bulk geolocation lookup BulkGeolocate if [ "$JSON_OUTPUT" = true ]; then json_resultcount=$(jq '. | length' <<<"[$geolocation_json_output]") final_json_output="$geolocation_json_output" PrintJsonOutput else tput sgr0 fi exit 0 fi if [ "$COUNTRY_BLOCK_MODE" = true ]; then # user passed the "-c" switch # check if user passed a country code (.xx) if [ "${userinput::1}" == "." ]; then cc=$(echo "$userinput" | cut -d '.' -f 2-) restcountries_output=$(docurl -s "https://restcountries.com/v3.1/alpha/$cc") else # Urlencode the spaces in user's input countrystring=${userinput// /%20} # Look up the country code restcountries_output=$(docurl -s "https://restcountries.com/v3.1/name/${countrystring}") fi have_results=$(jq 'if type=="array" then true else false end' <<<"$restcountries_output") [[ "$have_results" = false ]] && PrintErrorAndExit "No countries matching '$userinput'" matches=$(jq -r '. | length' <<<"$restcountries_output") if [ "$matches" -gt 1 ]; then [[ "$JSON_OUTPUT" = true ]] && PrintErrorAndExit "Multiple countries found, please specify a valid country code" echo -e "\n${yellow}Multiple countries found, please specify a valid country code:${default}\n" for (( i=0; i<"$matches"; i++ )); do formalname=$(jq -r ".[$i] | (.name.official)" <<<"$restcountries_output") commonname=$(jq -r ".[$i] | (.name.common)" <<<"$restcountries_output") cc=$(jq -r ".[$i] | (.cca2)" <<<"$restcountries_output") printf "%-55s : ${green}%s${default} (aka: %s)\n" "$formalname" "$cc" "$commonname" done echo "" tput sgr0 exit 0 fi cc=$(jq -r 'select(.[].cca2 != null) | .[].cca2' <<<"$restcountries_output" | tr '[:upper:]' '[:lower:]') [[ -z "$cc" ]] && PrintErrorAndExit "No country matching '$userinput' found" countryname=$(jq -r '.[].name.common' <<<"$restcountries_output") population=$(jq -r '.[].population' <<<"$restcountries_output") country_ipv4_blocks=$(docurl -s "https://raw.githubusercontent.com/herrbischoff/country-ip-blocks/master/ipv4/$cc.cidr") country_ipv6_blocks=$(docurl -s "https://raw.githubusercontent.com/herrbischoff/country-ip-blocks/master/ipv6/$cc.cidr") country_ipv4_blocks_count=$(wc -l <<<"$country_ipv4_blocks") country_ipv6_blocks_count=$(wc -l <<<"$country_ipv6_blocks") if [ "$country_ipv4_blocks" = "404: Not Found" ]; then country_ipv4_blocks="" country_ipv4_blocks_count=0 fi if [ "$country_ipv6_blocks" = "404: Not Found" ]; then country_ipv6_blocks="" country_ipv6_blocks_count=0 fi # calculate total number of IPs allocated to the country json_country_ipv4_ip_count=0 cidrlist=$(echo -e "$country_ipv4_blocks" | sed -e 's/^.*\///g' | sort | uniq -c | sort -nr) for cidr in $cidrlist; do cidr_total_ips=$(awk '{printf "%d", (2**(32-$2) * $1)}' <<<"$cidr") json_country_ipv4_ip_count=$(( json_country_ipv4_ip_count + cidr_total_ips )) done ips_per_capita=$(awk "BEGIN {printf \"%.2g\", $json_country_ipv4_ip_count/$population}") country_ipv4_ip_count=$(printf "%'d" "$json_country_ipv4_ip_count") population=$(printf "%'d" "$population") if [ "$JSON_OUTPUT" = true ]; then json_country_ipv4_blocks=$(jq -c --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$country_ipv4_blocks") json_country_ipv6_blocks=$(jq -c --slurp --raw-input 'split("\n") | map(select(length > 0))' <<<"$country_ipv6_blocks") json_resultcount="1" json_target_type="country" final_json_output="{\"country_name\":\"$countryname\"" final_json_output+=",\"country_code\":\"$cc\"" final_json_output+=",\"ipv4_blocks\":$country_ipv4_blocks_count" final_json_output+=",\"ipv4_total_ips\":$json_country_ipv4_ip_count" final_json_output+=",\"ipv4_per_capita\":$ips_per_capita" final_json_output+=",\"ipv4\":${json_country_ipv4_blocks}" final_json_output+=",\"ipv6_blocks\":$country_ipv6_blocks_count" final_json_output+=",\"ipv6\":${json_country_ipv6_blocks}" final_json_output+="}" PrintJsonOutput else BoxHeader "CIDR blocks allocated to $countryname" BoxHeader "IPv4" echo -e "$country_ipv4_blocks" | pr -4 -T -W$((terminal_width/2)) BoxHeader "IPv6" echo -e "$country_ipv6_blocks" | pr -4 -T -W$((terminal_width/2)) BoxHeader "IP Statistics for $countryname (.$cc)" echo -e "\n- ${green}$country_ipv4_blocks_count${default} IPv4 blocks found\n- ${yellow}$country_ipv6_blocks_count${default} IPv6 blocks found" echo -e "- Population: ${population}\n- Total IPv4 addresses: ${blue}$country_ipv4_ip_count${default}${default} (~${blue}$ips_per_capita${default} IPs per person)" BoxHeader "CIDR distribution (IPv4)" for cidr in $cidrlist; do cidrcount=$(awk '{print $1}' <<<"$cidr") cidrsize=$(awk '{print $2}' <<<"$cidr") [[ "$cidrcount" = "1" ]] && s="" || s="s" printf "%10s x ${magenta}/%s${default} %s\n" "$cidrcount" "$cidrsize" "block$s" done echo "" fi exit 0 fi BoxHeader "ASN lookup for $userinput" DebugPrint "ASN v$ASN_VERSION started with target: '$userinput'" if [ "$FORCE_ORGSEARCH" = true ]; then # user passed the "-o" switch ORG_FILTER=false declare -a orgfilters_array declare -a excl_orgfilters_array SearchByOrg "$userinput" exit 0 fi if [ "$SUGGEST_SEARCH" = true ]; then # user passed the "-s" switch RIPESuggestASN "$userinput" exit 0 fi input=$(echo "$userinput" | sed 's/\/.*//g' | grep -Eo "$ipv4v6regex") if [ -z "$input" ]; then # Input is not an IP Address if [ "$BGP_UPSTREAM_MODE" = true ]; then echo -e "\n${red} Error: the ${blue}'-u'${red} option requires an IPv4 or IPv6 address as input${default}\n" exit 1 fi # Check if it is a number (ASN) asn=$(echo "$userinput" | sed 's/[a|A][s|S]//g' | grep -E "^[0-9]*$") if [ -z "$asn" ]; then # Input is not an ASN either. See if it's a hostname (includes at least one dot) if echo "$userinput" | grep -q "\."; then # filter the input in case it's a URL, extracting relevant hostname/IP data userinput=$(awk -F[/:] '{gsub(".*//", ""); gsub(".*:.*@", ""); print $1}' <<<"$userinput") # run the IP regex again in case input is an IP(v6) URL (e.g. https://1.2.3.4/) and skip resolution if an IP is found ip=$(echo "$userinput" | grep -Eo "$ipv4v6regex") if [ -z "$ip" ]; then if [ "$IS_ASN_CHILD" = true ] && [ "$JSON_OUTPUT" = false ]; then # this instance is an ASN server child (not in JSON mode) targetdomain=$(rev <<<"$userinput" | cut -d '.' -f 1,2 | rev) echo -e "\n${blue}- Resolving \"$userinput\"... #PERFORMWHOIS $targetdomain\n" elif [[ "$JSON_OUTPUT" = false ]]; then # normal output echo -e -n "\n${blue}- Resolving \"$userinput\"... " fi json_target_type="hostname" ip=$(ResolveHostnameToIPList "$userinput") if [ -z "$ip" ]; then resolver_error="${red}Error: unable to resolve hostname${default}" if [ "$IS_ASN_CHILD" = true ] && [ "$JSON_OUTPUT" = false ]; then echo -e "$resolver_error\n" elif [ "$IS_ASN_CHILD" = false ] && [ "$JSON_OUTPUT" = false ]; then # normal output echo -e "${resolver_error}\n\n(Hint: if you wanted to search by organization, try the ${blue}'-o'${default} switch)\n" >&2 else # json output (both ASN child and normal instance) PrintErrorAndExit "unable to resolve hostname" fi exit 1 fi fi numips=$(echo "$ip" | wc -l) [[ $numips = 1 ]] && s="" || s="es" [[ "$JSON_OUTPUT" = false ]] && echo -e "${blue}$numips IP address$s found:${default}\n" # grab the longest IP to properly size output padding longest=0 for singleip in $ip; do [[ ${#singleip} -gt $longest ]] && longest=${#singleip} done (( longest++ )) # output actual results ip_to_trace="" WhatIsMyIP if [ "$IS_ASN_CHILD" = true ] && [ "$numips" -gt 1 ] && [ "$JSON_OUTPUT" = false ]; then # Running headless as an ASN server connhandler child, not in JSON mode, and more than 1 IP detected. # Speed up the operation and only process the first appropriate IP (v4/v6) if [ "$HAVE_IPV6" = true ] && grep -q ':' <<<"$ip"; then ip_to_trace=$(echo "$ip" | grep -m1 ':') else ip_to_trace=$(echo "$ip" | grep -v -m1 ':') fi LookupASNAndRouteFromIP "$ip_to_trace" WhoisIP "$ip_to_trace" PrintReputationAndShodanData "$ip_to_trace" echo -e "\n${bluebg} INFO ${default} multiple IP support is ${red}disabled${default} for remote traces. The remaining $((numips-1)) IPs have been ignored:" count=1 for singleip in $(echo "$ip" | grep -v "$ip_to_trace"); do ignoredip="$singleip" if [[ count -lt $((numips-1)) ]]; then echo -e " ├ ${white}${ignoredip}${default}" else echo -e " └ ${white}${ignoredip}${default}" fi ((count++)) done echo "" TraceASPath "$ip_to_trace" else for singleip in $ip; do if [ -n "$ip_to_trace" ] && [ "$JSON_OUTPUT" = true ]; then # add a comma to separate json output in case we hit multiple dns lookup results final_json_output+="," fi LookupASNAndRouteFromIP "$singleip" WhoisIP "$singleip" PrintReputationAndShodanData "$singleip" # save the first IP from the dns lookup result [[ -z "$ip_to_trace" ]] && ip_to_trace="$singleip" done [[ "$JSON_OUTPUT" = true ]] && PrintJsonOutput # Check if AS path tracing is requested if [ "$MTR_TRACING" = true ]; then # In case of multiple IPs (DNS RR), trace the first one. # Additionally, if we're on an IPv6 connection, default to # tracing to the first resolved IPv6 address (if any) if [ "$HAVE_IPV6" = true ]; then first_ipv6=$(echo "$ip" | grep -m1 ':') [[ -n "$first_ipv6" ]] && ip_to_trace="$first_ipv6" fi TraceASPath "$ip_to_trace" fi fi if [ "$JSON_OUTPUT" = false ]; then tput sgr0 echo "" fi exit 0 else # not an IP, not an ASN, not a hostname. Consider it an Organization name ORG_FILTER=false declare -a orgfilters_array declare -a excl_orgfilters_array SearchByOrg "$userinput" fi else # Input is an ASN json_target_type="asn" WhoisASN "$asn" if [ -z "$found_asname" ]; then PrintErrorAndExit "Error: no data found for AS${asn}" fi GetCAIDARank "$asn" target_asname="$found_asname" if [ "$JSON_OUTPUT" = true ]; then # JSON output GetIXPresence "$asn" QueryRipestat "$asn" GetCloudflareHijacksAndLeaks "$asn" final_json_output+="{" final_json_output+="\"asn\":\"${asn}\"" final_json_output+=",\"asname\":\"${target_asname//\"/\\\"}\"" final_json_output+=",\"asrank\":${caida_asrank//\"/\\\"}" final_json_output+=",\"org\":\"${found_org//\"/\\\"}\"" final_json_output+=",\"holder\":\"${found_holder//\"/\\\"}\"" final_json_output+=",\"abuse_contacts\":${json_abuse_contacts}" final_json_output+=",\"registration_date\":\"${found_createdate}\"" final_json_output+=",\"ixp_presence\":$json_ixps" # peer count if [ -n "$ripestat_routing_data" ]; then final_json_output+=",\"prefix_count_v4\":${ripestat_ipv4}" final_json_output+=",\"prefix_count_v6\":${ripestat_ipv6}" final_json_output+=",\"bgp_peer_count\":${ripestat_bgp}" fi # BGP incident (hijacks/leaks) summary if [ "$cf_hijack_query_success" = true ]; then final_json_output+=",\"bgp_hijack_incidents\":{\"total\":${cf_hijacks_count}" final_json_output+=",\"as_hijacker\":${cf_hijacks_as_hijacker_count}" final_json_output+=",\"as_victim\":${cf_hijacks_as_victim_count}" final_json_output+="}" fi if [ "$cf_leak_query_success" = true ]; then final_json_output+=",\"bgp_leak_incidents\":{\"total\":${cf_leaks_count}" final_json_output+="}" fi # peer list if [ -n "$ripestat_neighbours_data" ]; then final_json_output+=",\"bgp_peers\":{" final_json_output+="\"upstream\":$json_upstream_peers" final_json_output+=",\"downstream\":$json_downstream_peers" final_json_output+=",\"uncertain\":$json_uncertain_peers" final_json_output+="}" fi # announced prefixes final_json_output+=",\"announced_prefixes\":$json_ripe_prefixes" final_json_output+=",\"inetnums\":{" final_json_output+="\"v4\":$json_ipv4_aggregated_inetnums" final_json_output+=",\"v6\":$json_ipv6_aggregated_inetnums}" final_json_output+=",\"inetnums_announced_by_other_as\":{" final_json_output+="\"v4\":[$json_ipv4_other_inetnums]" final_json_output+=",\"v6\":[$json_ipv6_other_inetnums]}" final_json_output+="}" PrintJsonOutput else # normal output echo -en "\n${bluebg} AS Number ──>${default} ${red}${asn}" if [ "$IS_ASN_CHILD" = true ]; then ripestat_link="RIPEStat" he_link="HE.NET" bgpview_link="BGPView" bgptools_link="BGPTools" peeringdb_link="PeeringDB" echo -en " ($ripestat_link🔗 • $he_link🔗 • $bgpview_link🔗 • $bgptools_link🔗 • $peeringdb_link🔗)" fi echo "" echo -en "${bluebg} AS Name ──>${default} ${green}${target_asname}" if [ "$IS_ASN_CHILD" = true ]; then # signal to the parent connhandler the correct country flag to display for this ASN flag_icon_cc=$(echo "${target_asname##*,}" | tr -d ' ' | tr '[:upper:]' '[:lower:]') [[ -n "$flag_icon_cc" ]] && echo -n " #COUNTRYCODE $flag_icon_cc" fi printf "\n" echo -e "${bluebg} Organization ──>${default} ${yellow}${found_holder} (${found_org})" echo -e "${bluebg} CAIDA AS Rank ──>${default} ${caida_asrank_recap}${default}" echo -e "${bluebg} Abuse contact ──>${default} ${blue}${found_abuse_contact}" echo -e "${bluebg} AS Reg. date ──>${default} ${white}${found_createdate}" echo -e "${bluebg} RIR (Region) ──>${default} ${white}${caida_rir}" echo -en "${bluebg} Peering @IXPs ──>${default} " GetIXPresence "$asn" echo "" BoxHeader "BGP informations for AS${asn} (${target_asname})" GetCloudflareHijacksAndLeaks "$asn" echo "" echo -e "${bluebg} BGP Neighbors ────>${default} ${green}${caida_degree_total}${default} ${dim}(${default}${caida_degree_provider}${dim} Transits • ${default}${caida_degree_peer}${dim} Peers • ${default}${caida_degree_customer}${dim} Customers)${default}" echo -e "${bluebg} Customer cone ────>${default} ${green}${caida_customercone} ${default}${dim}(# of ASNs observed in the customer cone for this AS)${default}" echo -e "${bluebg} BGP Hijacks (past 1y) ────>${default} $cf_hijacks_text" echo -e "${bluebg} BGP Route leaks (past 1y) ──>${default} $cf_leaks_text" radar_href="https://radar.cloudflare.com/routing/as$asn?dateRange=52w" [[ "$IS_ASN_CHILD" = true ]] && radar_href="View on Cloudflare Radar🔗" echo -e "${bluebg} In-depth BGP incident info ─>${default} ${dim}${blue}➜ ${radar_href}${default}" echo "" BoxHeader "Prefix informations for AS${asn} (${target_asname})" echo "" QueryRipestat "${asn}" if [ -n "$ripestat_routing_data" ]; then echo -e "${bluebg} IPv4 Prefixes ────>${default} ${green}${ripestat_ipv4}" echo -e "${bluebg} IPv6 Prefixes ────>${default} ${yellow}${ripestat_ipv6}" fi if [ -n "$ripestat_neighbours_data" ]; then [[ -n "$upstream_peers" ]] && upstream_peers=$(echo -e "$upstream_peers") || upstream_peers="${redbg} NONE ${default}" [[ -n "$downstream_peers" ]] && downstream_peers=$(echo -e "$downstream_peers") || downstream_peers="${redbg} NONE ${default}" [[ -n "$uncertain_peers" ]] && uncertain_peers=$(echo -e "$uncertain_peers") || uncertain_peers="${redbg} NONE ${default}" echo "" BoxHeader "Peering informations for AS${asn} (${target_asname})" echo -e "\n${green}──────────────── Upstream Peers ────────────────${default}\n\n${upstream_peers}" echo -e "\n${yellow}─────────────── Downstream Peers ───────────────${default}\n\n${downstream_peers}" echo -e "\n${white}─────────────── Uncertain Peers ───────────────${default}\n\n${uncertain_peers}" fi echo "" BoxHeader "Aggregated IP resources for AS${asn} (${target_asname})" echo -e "\n${green}───── IPv4 ─────${default}" [[ -n "$ipv4_inetnums" ]] && echo -e "${green}${ipv4_inetnums}${default}" || echo -e "\n${redbg} NONE ${default}" echo -e "\n${yellow}───── IPv6 ─────${default}" [[ -n "$ipv6_inetnums" ]] && echo -e "${yellow}${ipv6_inetnums}${default}" || echo -e "\n${redbg} NONE ${default}" tput sgr0 echo "" fi exit 0 fi else # Input is an IP address grep -q ":" <<<"$input" && json_target_type="ipv6" || json_target_type="ipv4" # Perform IP lookup LookupASNAndRouteFromIP "$input" (( longest=${#input}+1 )) if [ "$BGP_UPSTREAM_MODE" = true ]; then BGPUpstreamLookup "$input" [[ "$JSON_OUTPUT" = true ]] && PrintJsonOutput exit 0 fi WhoisIP "$input" PrintReputationAndShodanData "$input" [[ "$JSON_OUTPUT" = true ]] && PrintJsonOutput # Perform AS path tracing if requested [[ "$MTR_TRACING" = true ]] && TraceASPath "$input" if [ "$JSON_OUTPUT" = false ]; then tput sgr0 echo "" fi exit 0 fi asn-0.78.0/asn.1000066400000000000000000000123401466111623400132360ustar00rootroot00000000000000.TH ASN 1 "July 2024" "0.77.0" "User Commands" .SH NAME asn \- ASN / RPKI validity / BGP stats / IPv4v6 / Prefix / ASPath / Organization / IP reputation lookup tool .SH SYNOPSIS .B asn [\fIOPTIONS\fR] [\fITARGET\fR] .br .B asn [\fB-v\fR] \fB-l\fR [\fISERVER OPTIONS\fR] .SH DESCRIPTION ASN is a tool for performing various lookup operations related to ASN (Autonomous System Number), RPKI validity, BGP statistics, IPv4/v6 addresses, prefixes, ASPaths, organizations, and IP reputations. .SH OPTIONS .TP \fB-t\fR .B (enable trace) .br Enable AS path trace to the .B TARGET (this is the default behavior) .TP \fB-n\fR .B (no trace|no additional INETNUM lookups) .br Disable tracing the AS path to the .B TARGET (for IP targets) or .br Disable additional (unannounced / announced by other AS) INETNUM lookups for the .B TARGET (for AS targets) .TP \fB-d\fR .B (detailed) Output detailed hop info during the AS path trace to the .B TARGET .br This option also enables RPKI validation/BGP hijacking detection for every hop .TP \fB-a\fR .B (ASN Suggest) .br Lookup AS names and numbers matching .B TARGET .TP \fB-u\fR .B (Transit/Upstream lookup) .br Inspect BGP updates and ASPATHs for the .B TARGET address/prefix and identify possible transit/upstream autonomous systems .TP \fB-c\fR .B (Country CIDR) .br Lookup all IPv4/v6 CIDR blocks allocated to the .B TARGET country .TP \fB-g\fR .B (Bulk Geolocate) .br Geolocate all IPv4/v6 addresses passed as .B TARGET .br This mode supports multiple targets, stdin input and IP extraction from input, e.g. .br .B asn -g < /var/log/apache2/error.log or .B echo 1.1.1.1 2.2.2.2 | asn -g .TP \fB-s\fR .B (Shodan scan) .br Query Shodan's InternetDB for CVE/CPE/Tags/Ports/Hostnames data about TARGET .br This mode supports multiple targets and stdin input, e.g. .br .B asn -s < iplist or .B echo 1.1.1.0/24 google.com | asn -s .TP \fB-o\fR .B (organization search) .br Force .B TARGET to be treated as an Organization Name .TP \fB-m\fR .B (monochrome output) .br Disable colored output .TP \fB-v\fR .B (verbose) .br Enable (and log to $HOME/asndebug.log) debug messages (URLs being queried and variable names being assigned) .br API call response data (i.e. the JSON output) is logged to the logfile. .TP \fB-j\fR .B (compact JSON output) .br Set output to compact JSON mode (ideal for machine parsing) .TP \fB-J\fR .B (pretty-printed JSON output) .br Set output to pretty-printed JSON mode .TP \fB-h\fR .B (help) .br Show this help screen .TP \fB-l\fR .B (lookup server) .br Launch the script in server mode. See .B SERVER OPTIONS below .SH TARGET .TP \fB\fR Lookup matching ASN and BGP announcements/neighbours data. .TP \fB\fR Lookup matching route(4/6), IP reputation, and ASN data. .TP \fB\fR Lookup matching ASN data. .TP \fB\fR Lookup matching IP, route, and ASN data. Supports multiple IPs - e.g. DNS RR. .TP \fB\fR Extract hostname/IP from the URL and lookup relative data. Supports any protocol prefix, non-standard ports, and prepended credentials. .TP \fB\fR Search by company name and lookup network ranges exported by (or related to) the company. .SH SERVER OPTIONS .TP \fB\&BIND_ADDRESS\fR IP address (v4/v6) to bind the listening server to (e.g., 'asn -l 0.0.0.0'). .br Default value: .B 127.0.0.1 (IPv4) or ::1 (IPv6). .TP \fB\&BIND_PORT\fR TCP Port to bind the listening server to (e.g., 'asn -l 12345'). .br Default value: .B 49200. .TP \fB\&BIND_ADDRESS BIND_PORT\fR IP address and port to bind the listening server to (e.g., 'asn -l ::1 12345'). .TP .B \fB-v\fR .B (verbose) .br Enable verbose output and debug messages in server mode. .TP .B \fB--allow\fR host[,host,...] Allow only given hosts to connect to the server. .TP .B \fB--allowfile\fR file A file of hosts allowed to connect to the server. .TP .B \fB--deny\fR host[,host,...] Deny given hosts from connecting to the server. .TP .B \fB--denyfile\fR file A file of hosts denied from connecting to the server. .TP .B \fB-m\fR, \fB--max-conns\fR The maximum number of simultaneous connections accepted by the server. 100 is the default. .SH NOTES Every option in server mode (after -l) is passed directly to the ncat listener. Refer to .B man ncat for more details on the available commands. Unless specified, the default IP:PORT values of 127.0.0.1:49200 (for IPv4) or [::1]:49200 (for IPv6) will be used (e.g., 'asn -l'). .SH EXAMPLES Example server usage: .PP .B asn -l (starts server on default IP(v4/v6):PORT) .PP .B asn -l 0.0.0.0 --allow 192.168.0.0/24,192.168.1.0/24,192.168.2.245 (binds to all available IPv4 interfaces on the default port, allowing only connections from the three specified subnets) .PP .B asn -l :: 2222 --allow 2001:DB8::/32 (binds to all available IPv6 interfaces on port 2222, allowing only connections from the specified prefix) .PP .B asn -v -l 0.0.0.0 --allowfile "~/goodips.txt" -m 5 (verbose mode, bind to all IPv4 interfaces, use an allowfile with allowed addresses, accept a maximum of 5 concurrent connections) .SH "SEE ALSO" To file a bug report or feature request, please open an issue on the project homepage: .B https://github.com/nitefood/asn .SH "AUTHOR" Project author: nitefood .B .PP This manpage was created by Marcos Rodrigues de Carvalho (aka oday) .B asn-0.78.0/cloudshell_bootstrap.sh000077500000000000000000000043231466111623400171670ustar00rootroot00000000000000#!/usr/bin/env bash read -r -d '' banner <<- END_OF_BANNER #########################################################################################################" # Helper script to prepare the GCP environment (install prerequisite packages / install the ASN script) # # Project homepage: https://github.com/nitefood/asn # ######################################################################################################### END_OF_BANNER green=$'\e[38;5;035m' blue=$'\e[38;5;038m' red=$'\e[38;5;203m' black=$'\e[38;5;016m' greenbg=$'\e[48;5;035m'${black} dim=$'\e[2m' default=$'\e[0m' clear sudo mkdir -p /etc/asn echo -e "${dim}$banner${default}\n" echo -en "Enter your IPQualityScore API token (or press Enter to skip): " read -sr IQS_TOKEN echo -en "\nEnter your ipinfo.io API token (or press Enter to skip): " read -sr IPINFO_TOKEN echo -en "\nEnter your Cloudflare API token (or press Enter to skip): " read -sr CLOUDFLARE_TOKEN if [ -n "$IQS_TOKEN" ]; then echo -en "\n\n- Enabling IPQualityScore API..." echo "$IQS_TOKEN" | sudo tee /etc/asn/iqs_token &>/dev/null echo "${green}OK${default}" else echo -e "\n\n- IPQualityScore API ${red}DISABLED${default}" fi if [ -n "$IPINFO_TOKEN" ]; then echo -en "- Enabling ipinfo.io API..." echo "$IPINFO_TOKEN" | sudo tee /etc/asn/ipinfo_token &>/dev/null echo "${green}OK${default}" else echo -e "- ipinfo.io API ${red}DISABLED${default}" fi if [ -n "$CLOUDFLARE_TOKEN" ]; then echo -en "- Enabling Cloudflare API..." echo "$CLOUDFLARE_TOKEN" | sudo tee /etc/asn/cloudflare_token &>/dev/null echo "${green}OK${default}" else echo -e "- Cloudflare API ${red}DISABLED${default}" fi echo -en "- Installing prerequisite packages..." sudo apt update &>/dev/null sudo apt -y install curl whois bind9-host mtr-tiny jq ipcalc grepcidr nmap ncat aha &>/dev/null echo -e "${green}OK${default}" echo -en "- Installing the asn script..." sudo install -m 755 asn /usr/bin echo -e "${green}OK${default}" echo -e "\n${greenbg} All done ${default}\n" echo -e "Example usage:\n\tServer mode : ${blue}asn -l${default}\n\tASPath trace: ${blue}asn 1.1.1.1${default}\n\nFor a full feature list visit ${blue}https://github.com/nitefood/asn${default}\n\n"