pax_global_header00006660000000000000000000000064150017110720014504gustar00rootroot0000000000000052 comment=7cba7f5d0b381134d92a0f11b642fdfb1c5f8cb1 openvas-scanner-23.17.0/000077500000000000000000000000001500171107200147605ustar00rootroot00000000000000openvas-scanner-23.17.0/.clang-format000066400000000000000000000025331500171107200173360ustar00rootroot00000000000000# clang-format configuration for Greenbone C code # # Minimum required clang-format version: 6.0 --- AlignAfterOpenBracket: Align AlignConsecutiveAssignments: 'false' AlignConsecutiveDeclarations: 'false' AlignEscapedNewlines: Left AlignOperands: 'true' AlignTrailingComments: 'true' AllowAllParametersOfDeclarationOnNextLine: 'false' AllowShortBlocksOnASingleLine: 'false' AllowShortCaseLabelsOnASingleLine: 'false' AllowShortFunctionsOnASingleLine: None AllowShortIfStatementsOnASingleLine: 'false' AllowShortLoopsOnASingleLine: 'false' AlwaysBreakAfterReturnType: All AlwaysBreakBeforeMultilineStrings: 'false' BinPackArguments: 'true' BinPackParameters: 'true' BreakBeforeBinaryOperators: NonAssignment BreakBeforeBraces: GNU BreakBeforeTernaryOperators: 'true' BreakStringLiterals: 'true' ColumnLimit: '80' ContinuationIndentWidth: '2' DerivePointerAlignment: 'false' IncludeBlocks: Regroup IndentCaseLabels: 'false' IndentWidth: '2' IndentWrappedFunctionNames: 'false' KeepEmptyLinesAtTheStartOfBlocks: 'false' Language: Cpp MaxEmptyLinesToKeep: '1' PointerAlignment: Right ReflowComments: 'true' SortIncludes: 'true' SpaceAfterCStyleCast: 'true' SpaceBeforeAssignmentOperators: 'true' SpaceBeforeParens: Always SpaceInEmptyParentheses: 'false' SpacesInCStyleCastParentheses: 'false' SpacesInParentheses: 'false' SpacesInSquareBrackets: 'false' UseTab: Never ... openvas-scanner-23.17.0/.codecov.yml000066400000000000000000000000151500171107200171770ustar00rootroot00000000000000comment: off openvas-scanner-23.17.0/.devcontainer/000077500000000000000000000000001500171107200175175ustar00rootroot00000000000000openvas-scanner-23.17.0/.devcontainer/Dockerfile000066400000000000000000000045721500171107200215210ustar00rootroot00000000000000FROM rust:latest ARG UID=1000 ARG GID=1000 RUN apt-get update && apt-get install -y \ sudo \ git \ rsync \ pipx \ redis-server \ clangd \ # Runtime dependencies, required for .devcontainer nmap \ snmp \ netdiag \ pnscan \ # net-tools is required by some nasl plugins. # nasl_pread: Failed to execute child process “netstat” (No such file or directory) net-tools # Add prepare-user-dirs.sh and execcute it COPY prepare-user-dirs.sh /prepare-user-dirs.sh COPY build-cmake-project.sh /usr/local/bin/build-cmake-project.sh RUN chmod +x /usr/local/bin/build-cmake-project.sh COPY build-openvas /usr/local/bin/build-openvas RUN chmod +x /usr/local/bin/build-openvas COPY build-gvm-libs /usr/local/bin/build-gvm-libs RUN chmod +x /usr/local/bin/build-gvm-libs COPY github-clone.sh /usr/local/bin/github-clone RUN chmod +x /usr/local/bin/github-clone RUN bash /prepare-user-dirs.sh && rm /prepare-user-dirs.sh USER user RUN python3 -m pipx install greenbone-feed-sync # installing gvm-libs and openvas-scanner RUN github-clone greenbone/gvm-libs RUN github-clone greenbone/openvas-scanner RUN github-clone greenbone/openvas-smb RUN sudo sh /workspaces/greenbone/gvm-libs/.github/install-dependencies.sh RUN sudo sh /workspaces/greenbone/openvas-smb/.github/install-openvas-smb-dependencies.sh RUN sudo sh /workspaces/greenbone/openvas-scanner/.github/install-openvas-dependencies.sh RUN build-gvm-libs RUN build-openvas # Currently we don't install scannerctl and openvasd as they don't have dependencies # that must be preloaded in order to function. # WORKDIR /workspaces/openvas/rust/scannerctl # RUN cargo install --path . # WORKDIR /workspaces/openvas/rust/openvasd # RUN cargo install --path . USER redis RUN sed 's/redis-openvas/redis/' /workspaces/greenbone/openvas-scanner/config/redis-openvas.conf | tee /etc/redis/redis.conf USER user # We clean up the cloned repositories as they are usually mounted into the container RUN rm -r /workspaces/greenbone # RUN sudo apt-get clean && sudo rm -rf /var/lib/apt/lists/* SHELL ["/bin/bash", "-c"] RUN rustup component add rust-analyzer rust-src ENV PATH="/home/user/.cargo/bin:${PATH}" ENV PATH="/home/user/.local/bin:${PATH}" RUN echo "alias start_redis='redis-server /etc/redis/redis.conf'" >> /home/user/.bashrc ENV start_redis="redis-server /etc/redis/redis.conf" WORKDIR /workspaces CMD ["/bin/bash"] openvas-scanner-23.17.0/.devcontainer/Makefile000066400000000000000000000052071500171107200211630ustar00rootroot00000000000000# TODO: # - add update script # - change install-nvim to adapt update script to also update neovim # Get the UID and GID of the user those will be used within the Dockerfile to share the same id between host and container. UID := $(shell id -u) GID := $(shell id -g) MF_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # if podman exists, use it instead of docker ifneq (,$(shell which podman)) CONTAINERR=podman else CONTAINERR=docker endif # disable docker hints, who needs that? export DOCKER_CLI_HINTS=false .PHONY: build command-exists = $(CONTAINERR) exec -it openvas-dev command -v $(1) >/dev/null 2>&1 && echo "exists" || echo "not exists" # @if [ "$$(basename $$SHELL)" = "fish" ]; then \ get-shell = $(basename $(notdir $(SHELL))) build: $(CONTAINERR) build \ --build-arg UID=$(UID) \ --build-arg GID=$(GID) \ -t openvas-dev:latest \ . start: $(CONTAINERR) start openvas-dev create: $(CONTAINERR) create -it \ --name openvas-dev \ -v $(HOME)/.ssh:/home/user/.ssh\ -v $(HOME)/.config:/home/user/.config\ -v $(HOME)/.gitconfig:/home/user/.gitconfig \ openvas-dev:latest is-running: $(CONTAINERR) ps -q --filter "name=openvas-dev" | grep -q . enforce-running: $(MAKE) is-running || $(MAKE) start || $(MAKE) create && $(MAKE) start install-fish: enforce-running $(CONTAINERR) exec -it openvas-dev /bin/bash -c "sudo apt update" $(CONTAINERR) exec -it openvas-dev /bin/bash -c "sudo apt install -y fish" # doesn't work because of attached tty on create there is no reinit of the shell #$(CONTAINERR) exec -it openvas-dev /bin/bash -c "sudo chsh -s /usr/bin/fish user" install-pyright: enforce-running $(CONTAINERR) exec -it openvas-dev /bin/bash -c "pipx install pyright" install-rg-fzf: enforce-running $(CONTAINERR) exec -it openvas-dev /bin/bash -c "sudo apt update" $(CONTAINERR) exec -it openvas-dev /bin/bash -c "sudo apt install -y ripgrep fzf" install-nvim: install-rg-fzf $(CONTAINERR) exec -it openvas-dev /bin/bash -c "sudo apt install -y ninja-build gettext cmake unzip curl build-essential nodejs" $(CONTAINERR) exec -it openvas-dev /bin/bash -c "github-clone neovim/neovim" $(CONTAINERR) exec -it openvas-dev /bin/bash -c "cd /workspaces/neovim/neovim && make CMAKE_BUILD_TYPE=RelWithDebInfo && sudo make install" enter: enforce-running @if $(call command-exists,fish); then \ $(MAKE) enter-fish; \ else \ $(MAKE) enter-bash; \ fi enter-bash: enforce-running $(CONTAINERR) exec -it openvas-dev /bin/bash # TODO: detect running shell and use that enter-fish: enforce-running $(CONTAINERR) exec -it openvas-dev /usr/bin/fish stop: -$(CONTAINERR) stop openvas-dev rm: stop $(CONTAINERR) rm openvas-dev openvas-scanner-23.17.0/.devcontainer/build-cmake-project.sh000066400000000000000000000004451500171107200236770ustar00rootroot00000000000000#/bin/sh [ -d "$1" ] && WORKD_DIR="$1" || ( echo "Usage: $0 " exit 1 ) cd $WORKD_DIR set -ex cmake -B build -DCMAKE_EXPORT_COMPILE_COMMANDS=ON cmake --build build --target install LDCONFIG="ldconfig" if [ "$(id -u)" -ne 0 ]; then LDCONFIG="sudo ldconfig" fi $LDCONFIG openvas-scanner-23.17.0/.devcontainer/build-gvm-libs000066400000000000000000000003251500171107200222570ustar00rootroot00000000000000#!/bin/bash owner=${1:-greenbone} if [ -d "/workspaces/$owner" ]; then target_dir="/workspaces/$owner/gvm-libs" else target_dir="/workspaces/gvm-libs" fi /usr/local/bin/build-cmake-project.sh "$target_dir"openvas-scanner-23.17.0/.devcontainer/build-openvas000066400000000000000000000003441500171107200222130ustar00rootroot00000000000000#!/bin/bash owner=${1:-greenbone} if [ -d "/workspaces/$owner" ]; then target_dir="/workspaces/$owner/openvas-scanner" else target_dir="/workspaces/openvas-scanner" fi /usr/local/bin/build-cmake-project.sh "$target_dir"openvas-scanner-23.17.0/.devcontainer/devcontainer.json000066400000000000000000000000571500171107200230750ustar00rootroot00000000000000{ "build": { "dockerfile": "Dockerfile" }, } openvas-scanner-23.17.0/.devcontainer/github-clone.sh000077500000000000000000000016771500171107200224510ustar00rootroot00000000000000#!/bin/bash if [ -z "$1" ]; then echo "Error: Repository name is not provided." exit 1 fi IFS='/' read -r owner repo <<< "$1" parent_dir="/workspaces" if [ ! -d "$parent_dir" ]; then echo "Parent directory '$parent_dir' does not exist. Creating it." mkdir -p "$parent_dir" fi owner_dir="$parent_dir/$owner" if [ ! -d "$owner_dir" ]; then echo "Owner directory '$owner_dir' does not exist. Creating it." mkdir -p "$owner_dir" fi target_dir="/workspaces/$1" if [ -d "$target_dir" ]; then echo "Error: Target directory '$target_dir' already exists." exit 1 fi if ls $HOME/.ssh/id_* &>/dev/null; then if git clone git@github.com:$1.git "$target_dir"; then echo "Cloning with SSH URL successful." else echo "Warning: Cloning with SSH URL failed. Falling back to HTTPS URL." git clone https://github.com/$1.git "$target_dir" fi else git clone https://github.com/$1.git "$target_dir" fi openvas-scanner-23.17.0/.devcontainer/prepare-user-dirs.sh000066400000000000000000000026021500171107200234240ustar00rootroot00000000000000#!/bin/sh # This scripts creates the dirs defined in dirs and sets the rights to the given user and id. # This script creates a user with a $UID as well as a group with $GID # afterwards it creates set of directories, assigns ownership to a newly created user and group, and configures sudo permissions for the user. # This is done to allow cmake --build build --target install to work without permission issues. dirs=" /workspaces /run/gvm /var/log/gvm /etc/openvas /var/lib/openvas /usr/local/lib /usr/local/share/man/man1/ /usr/local/share/man/man8/ /usr/local/include/gvm /usr/local/include/openvas /usr/local/share/openvas /usr/local/bin /usr/local/sbin /var/lib/openvas /var/lib/notus /var/lib/gvm /var/lib/openvasd /etc/openvasd /run/redis " set -ex groupadd --gid "$GID" "developer" || true # for the case that the GID already existed when we tried to create developer # this can happen when we reuse staff from a mac os host group_name=$(getent group "$GID" | cut -d: -f1) useradd --uid "$UID" --gid "$group_name" --shell /bin/bash --groups redis --create-home user for dir in ${dirs[@]}; do if [ ! -d $dir ]; then mkdir -p $dir fi chown -R user:$group_name $dir done # allow user to run sudo without password since it is intented as development # container it is assumed that the user wants to install or manipulate the container echo "user ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/user openvas-scanner-23.17.0/.docker/000077500000000000000000000000001500171107200163055ustar00rootroot00000000000000openvas-scanner-23.17.0/.docker/openvas.conf000066400000000000000000000001001500171107200206160ustar00rootroot00000000000000table_driven_lsc = yes mqtt_server_uri = tcp://mqtt-broker:1883 openvas-scanner-23.17.0/.docker/prod-oldstable.Dockerfile000066400000000000000000000047671500171107200232270ustar00rootroot00000000000000ARG VERSION=edge # this allows to work on forked repository ARG REPOSITORY=greenbone/openvas-scanner ARG GVM_LIBS_VERSION=oldstable FROM greenbone/openvas-smb:oldstable-edge AS openvas-smb FROM registry.community.greenbone.net/community/gvm-libs:${GVM_LIBS_VERSION} AS build COPY . /source RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \ bison \ build-essential \ clang \ clang-format \ clang-tools \ cmake \ curl \ lcov \ libcjson1 \ libgnutls28-dev \ libgpgme-dev \ libjson-glib-dev \ libksba-dev \ libpaho-mqtt-dev \ libpcap-dev \ libssh-gcrypt-dev \ libbsd-dev \ libgnutls30 \ libgssapi3-heimdal \ libkrb5-26-heimdal \ libasn1-8-heimdal \ libroken18-heimdal \ libhdb9-heimdal \ libpopt0 \ libcurl4-gnutls-dev \ libcurl4 \ # libcgreen1-dev \ libhiredis-dev \ libkrb5-dev \ && rm -rf /var/lib/apt/lists/* RUN curl -L -o cgreen.tar.gz https://github.com/cgreen-devs/cgreen/archive/refs/tags/1.6.3.tar.gz -k RUN tar -xzf cgreen.tar.gz && cd cgreen-1.6.3 RUN make install RUN ldconfig COPY --from=openvas-smb /usr/local/lib/ /usr/local/lib/ RUN cmake -DCMAKE_BUILD_TYPE=Release -DINSTALL_OLD_SYNC_SCRIPT=OFF -B/build /source RUN DESTDIR=/install cmake --build /build -- install # TODO: add rust? FROM registry.community.greenbone.net/community/gvm-libs:${GVM_LIBS_VERSION} RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \ bison \ libglib2.0-0 \ libjson-glib-1.0-0 \ libksba8 \ nmap \ libcap2-bin \ snmp \ netdiag \ pnscan \ libbsd0 \ rsync \ # net-tools is required by some nasl plugins. # nasl_pread: Failed to execute child process “netstat” (No such file or directory) net-tools \ # for openvas-smb support python3-impacket \ libgnutls30 \ libgssapi3-heimdal \ libkrb5-26-heimdal \ libasn1-8-heimdal \ libroken18-heimdal \ libhdb9-heimdal \ libpopt0 \ libcurl4 \ libcurl3-gnutls \ libhiredis0.14 \ zlib1g\ && rm -rf /var/lib/apt/lists/* COPY .docker/openvas.conf /etc/openvas/ COPY --from=build /install/ / COPY --from=openvas-smb /usr/local/lib/ /usr/local/lib/ COPY --from=openvas-smb /usr/local/bin/ /usr/local/bin/ RUN ldconfig # allow openvas to access raw sockets and all kind of network related tasks RUN setcap cap_net_raw,cap_net_admin+eip /usr/local/sbin/openvas # allow nmap to send e.g. UDP or TCP SYN probes without root permissions ENV NMAP_PRIVILEGED=1 RUN setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nmap openvas-scanner-23.17.0/.docker/prod-testing.Dockerfile000066400000000000000000000054561500171107200227270ustar00rootroot00000000000000ARG VERSION=edge # this allows to work on forked repository ARG REPOSITORY=greenbone/openvas-scanner ARG GVM_LIBS_VERSION=testing-edge FROM greenbone/openvas-smb:testing-edge AS openvas-smb FROM rust AS rust FROM registry.community.greenbone.net/community/gvm-libs:${GVM_LIBS_VERSION} AS build COPY . /source RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \ bison \ build-essential \ clang \ clang-format \ clang-tools \ cmake \ curl \ git \ lcov \ libgnutls28-dev \ libgpgme-dev \ libjson-glib-dev \ libksba-dev \ libpaho-mqtt-dev \ libpcap-dev \ libgcrypt-dev \ libssh-dev \ libbsd-dev \ libgnutls30t64 \ libgssapi3-heimdal \ libkrb5-26-heimdal \ libasn1-8-heimdal \ libroken19-heimdal \ libhdb9-heimdal \ libpopt0 \ libcurl4 \ libcurl4-gnutls-dev \ libhiredis-dev \ && rm -rf /var/lib/apt/lists/* COPY --from=openvas-smb /usr/local/lib/ /usr/local/lib/ RUN cmake -DCMAKE_BUILD_TYPE=Release -DINSTALL_OLD_SYNC_SCRIPT=OFF -B/build /source RUN DESTDIR=/install cmake --build /build -- install COPY --from=rust /usr/local/cargo/ /usr/local/cargo/ COPY --from=rust /usr/local/rustup/ /usr/local/rustup/ ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH RUN apt update && apt install -y ca-certificates RUN cargo build --release RUN cp target/release/openvasd /install/usr/local/bin RUN cp target/release/scannerctl /install/usr/local/bin FROM registry.community.greenbone.net/community/gvm-libs:${GVM_LIBS_VERSION} RUN apt-get update RUN apt-get install --no-install-recommends --no-install-suggests -y \ bison \ libglib2.0-0t64 \ libjson-glib-1.0-0 \ libksba8 \ nmap \ libcap2-bin \ snmp \ # not available in debian:testing 2024-04-29 # netdiag \ pnscan \ libbsd0 \ rsync \ # net-tools is required by some nasl plugins. # nasl_pread: Failed to execute child process “netstat” (No such file or directory) net-tools \ # for openvas-smb support python3-impacket \ libgnutls30t64 \ libgssapi3-heimdal \ libkrb5-26-heimdal \ libasn1-8-heimdal \ libroken19-heimdal \ libhdb9-heimdal \ libpopt0 \ libcurl4 \ libhiredis1.1.0 \ libcurl3t64-gnutls \ zlib1g RUN rm -rf /var/lib/apt/lists/* COPY .docker/openvas.conf /etc/openvas/ COPY --from=build /install/ / COPY --from=openvas-smb /usr/local/lib/ /usr/local/lib/ COPY --from=openvas-smb /usr/local/bin/ /usr/local/bin/ RUN ldconfig # allow openvas to access raw sockets and all kind of network related tasks RUN setcap cap_net_raw,cap_net_admin+eip /usr/local/sbin/openvas # allow nmap to send e.g. UDP or TCP SYN probes without root permissions ENV NMAP_PRIVILEGED=1 RUN setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nmap openvas-scanner-23.17.0/.docker/prod.Dockerfile000066400000000000000000000044671500171107200212550ustar00rootroot00000000000000ARG VERSION=edge # this allows to override gvm-libs for e.g. smoketests ARG GVM_LIBS=registry.community.greenbone.net/community/gvm-libs FROM rust AS rust FROM greenbone/openvas-smb AS openvas-smb FROM ${GVM_LIBS}:${VERSION} AS build COPY . /source RUN sh /source/.github/install-openvas-dependencies.sh COPY --from=openvas-smb /usr/local/lib/ /usr/local/lib/ RUN cmake -DCMAKE_BUILD_TYPE=Release -DINSTALL_OLD_SYNC_SCRIPT=OFF -B/build /source RUN DESTDIR=/install cmake --build /build -- install WORKDIR /source/rust COPY --from=rust /usr/local/cargo/ /usr/local/cargo/ COPY --from=rust /usr/local/rustup/ /usr/local/rustup/ ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH RUN apt update && apt install -y ca-certificates RUN cargo build --release RUN cp target/release/openvasd /install/usr/local/bin RUN cp target/release/scannerctl /install/usr/local/bin # Do we want to copy feed verifier as well? # RUN cp release/feed-verifier /install/bin FROM ${GVM_LIBS}:${VERSION} RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \ bison \ libglib2.0-0 \ libjson-glib-1.0-0 \ libksba8 \ nmap \ libcap2-bin \ snmp \ netdiag \ pnscan \ libbsd0 \ rsync \ # net-tools is required by some nasl plugins. # nasl_pread: Failed to execute child process “netstat” (No such file or directory) net-tools \ libgnutls30 \ libgssapi3-heimdal \ libkrb5-26-heimdal \ libasn1-8-heimdal \ libroken19-heimdal \ libhdb9-heimdal \ libpopt0 \ libcurl4 \ libcurl3-gnutls \ zlib1g \ libhiredis0.14 \ libssh-4 \ && rm -rf /var/lib/apt/lists/* COPY .docker/openvas.conf /etc/openvas/ # must be pre built within the rust dir and moved to the bin dir # usually this image is created within in a ci ensuring that the # binary is available. COPY --from=build /install/ / COPY --from=openvas-smb /usr/local/lib/ /usr/local/lib/ COPY --from=openvas-smb /usr/local/bin/ /usr/local/bin/ RUN ldconfig # allow openvas to access raw sockets and all kind of network related tasks RUN setcap cap_net_raw,cap_net_admin+eip /usr/local/sbin/openvas # allow nmap to send e.g. UDP or TCP SYN probes without root permissions ENV NMAP_PRIVILEGED=1 RUN setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nmap CMD /usr/local/bin/openvasd openvas-scanner-23.17.0/.docker/railguards/000077500000000000000000000000001500171107200204425ustar00rootroot00000000000000openvas-scanner-23.17.0/.docker/railguards/README.md000066400000000000000000000005271500171107200217250ustar00rootroot00000000000000# Railguards As long as openvas is a distributed monolith in - greenbone/gvm-libs - greenbone/openvas-smb - greenbone/openvas-scanner we need to verify that the dependencies play nicely together on our target: - debian:stable **WARNING** The Dockerfiles within this folder are not meant to be used outside of this very specific test case. openvas-scanner-23.17.0/.docker/railguards/debian_stable.Dockerfile000066400000000000000000000020171500171107200252070ustar00rootroot00000000000000# This Dockerfile is not meant to be actually used, it is meant for testing # the integrity when building: # - gvm-libs # - openvas-smb # - openvas-scanner # # together from a main branch. # # If it builds without error everything is as expected. FROM debian:stable # CLONE gvm-libs # CLONE openvas-smb # Install dependencies # check ld COPY . /source RUN apt update && apt install -y git RUN bash /source/.devcontainer/github-clone.sh greenbone/gvm-libs RUN bash /source/.devcontainer/github-clone.sh greenbone/openvas-smb # tests implicitely if there are dependencies conflicts RUN sh /workspaces/greenbone/gvm-libs/.github/install-dependencies.sh RUN sh /workspaces/greenbone/openvas-smb/.github/install-openvas-smb-dependencies.sh RUN sh /source/.github/install-openvas-dependencies.sh # build everything RUN sh /source/.devcontainer/build-cmake-project.sh /workspaces/greenbone/gvm-libs RUN sh /source/.devcontainer/build-cmake-project.sh /workspaces/greenbone/openvas-smb RUN sh /source/.devcontainer/build-cmake-project.sh /source openvas-scanner-23.17.0/.dockerignore000066400000000000000000000000511500171107200174300ustar00rootroot00000000000000.vscode/ .mergify.yml build/ rust/target openvas-scanner-23.17.0/.github/000077500000000000000000000000001500171107200163205ustar00rootroot00000000000000openvas-scanner-23.17.0/.github/CODEOWNERS000066400000000000000000000002611500171107200177120ustar00rootroot00000000000000# default reviewers * @greenbone/scanner-maintainers # devops .github/ @greenbone/scanner-maintainers .docker/ @greenbone/scanner-maintainers openvas-scanner-23.17.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001500171107200205035ustar00rootroot00000000000000openvas-scanner-23.17.0/.github/ISSUE_TEMPLATE/bug-report.md000066400000000000000000000040061500171107200231130ustar00rootroot00000000000000--- name: Bug Report about: Report an issue with openvas scanner title: '' labels: bug assignees: '' --- ### Expected behavior ### Actual behavior ### Steps to reproduce 1. 2. 3. ### GVM versions **gsa:** (gsad --version) **gvm:** (gvmd --version) **openvas:** (openvas --version) **gvm-libs:** **openvas-smb:** **ospd-openvas:** (ospd-openvas --version) ### Environment **Operating system:** **Installation method / source:** (packages, source installation) ### Logfiles ``` ``` openvas-scanner-23.17.0/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000002571500171107200224770ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: Greenbone Community Forum url: https://community.greenbone.net/c/gse about: Please ask and answer questions here. openvas-scanner-23.17.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000014071500171107200221230ustar00rootroot00000000000000**What**: **Why**: **How**: **Checklist**: - [ ] Tests - [ ] PR merge commit message adjusted openvas-scanner-23.17.0/.github/actions/000077500000000000000000000000001500171107200177605ustar00rootroot00000000000000openvas-scanner-23.17.0/.github/actions/compile-aarch64/000077500000000000000000000000001500171107200226365ustar00rootroot00000000000000openvas-scanner-23.17.0/.github/actions/compile-aarch64/action.yml000066400000000000000000000020711500171107200246360ustar00rootroot00000000000000name: "Compile Rust aarch64" runs: using: "composite" steps: - uses: ./.github/actions/setup-rust - run: cargo install cross || true shell: bash - run: CROSS_CONFIG=Cross.toml cross -v build --release --target aarch64-unknown-linux-gnu working-directory: rust shell: bash - name: "patch for debian stable" working-directory: rust run: | find . -type f -name "scannerctl" patchelf --replace-needed libpcap.so.1 libpcap.so.0.8 target/aarch64-unknown-linux-gnu/release/scannerctl patchelf --replace-needed libz.so libz.so.1 target/aarch64-unknown-linux-gnu/release/scannerctl shell: bash - run: mkdir -p assets/linux/arm64 shell: bash - run: mv rust/target/aarch64-unknown-linux-gnu/release/openvasd assets/linux/arm64/openvasd shell: bash - run: mv rust/target/aarch64-unknown-linux-gnu/release/scannerctl assets/linux/arm64/scannerctl shell: bash - run: mv rust/target/aarch64-unknown-linux-gnu/release/feed-verifier assets/linux/arm64/feed-verifier shell: bash openvas-scanner-23.17.0/.github/actions/compile-x86_64/000077500000000000000000000000001500171107200223445ustar00rootroot00000000000000openvas-scanner-23.17.0/.github/actions/compile-x86_64/action.yml000066400000000000000000000020571500171107200243500ustar00rootroot00000000000000name: "Compile Rust x86_64" runs: using: "composite" steps: - uses: ./.github/actions/setup-rust - run: cargo install cross || true shell: bash - run: CROSS_CONFIG=Cross.toml cross build --release --target x86_64-unknown-linux-gnu working-directory: rust shell: bash - name: "patch for debian stable" working-directory: rust shell: bash run: | find . -type f -name "scannerctl" patchelf --replace-needed libpcap.so.1 libpcap.so.0.8 target/x86_64-unknown-linux-gnu/release/scannerctl patchelf --replace-needed libz.so libz.so.1 target/x86_64-unknown-linux-gnu/release/scannerctl - run: mkdir -p assets/linux/amd64 shell: bash - run: mv rust/target/x86_64-unknown-linux-gnu/release/openvasd assets/linux/amd64/openvasd shell: bash - run: mv rust/target/x86_64-unknown-linux-gnu/release/scannerctl assets/linux/amd64/scannerctl shell: bash - run: mv rust/target/x86_64-unknown-linux-gnu/release/feed-verifier assets/linux/amd64/feed-verifier shell: bash openvas-scanner-23.17.0/.github/actions/setup-rust/000077500000000000000000000000001500171107200221135ustar00rootroot00000000000000openvas-scanner-23.17.0/.github/actions/setup-rust/action.yml000066400000000000000000000011751500171107200241170ustar00rootroot00000000000000name: "Setup Rust Environment" description: "Install necessary dependencies and set up Rust stable" runs: using: "composite" steps: - uses: actions/cache@v4 with: path: | ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ rust/target/ key: ${{ runner.os }}-cargo-${{ hashFiles('rust/Cargo.lock') }} - run: sudo apt update || true shell: bash - run: sudo apt-get install -y libpcap-dev patchelf shell: bash - run: rustup update stable && rustup default stable || rustup default stable shell: bash openvas-scanner-23.17.0/.github/check-c-formatting.sh000077500000000000000000000012541500171107200223260ustar00rootroot00000000000000#!/usr/bin/env sh # I wanted to get the merge base using {{ github.base_ref }}, however this is only # available for the event that opens the PR or edits it, not on pushes to the branch. # Comparing to main should be an OK alternative, since it will - at worst - do more # autoformatting than it otherwise would. [ -z "$1" ] && merge_base=main || merge_base="$1" git fetch origin $merge_base:refs/remotes/origin/$merge_base echo "$(clang-format --version)" (git diff --name-only "origin/$merge_base") | while read filename; do extension="${filename##*.}" if [ "$extension" = "c" ] || [ "$extension" = "h" ]; then clang-format -i -style=file "$filename" fi done openvas-scanner-23.17.0/.github/dependabot.yml000066400000000000000000000002731500171107200211520ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" groups: github-actions: patterns: - "*" openvas-scanner-23.17.0/.github/enhance_version.sh000066400000000000000000000011421500171107200220200ustar00rootroot00000000000000#!/bin/sh version="$1" type="$2" # Split version string into fields IFS='.' read -r field1 field2 field3 << EOF $version EOF # On major enhance major version, set minor and patch to 0 # On minor enhance minor version, set patch to 0 # On patch enhance patch version case "$type" in "major") field1=$(expr $field1 + 1) field2=0 field3=0 ;; "minor") field2=$(expr $field2 + 1) field3=0 ;; "patch") field3=$(expr $field3 + 1) ;; *) echo "Error: Invalid update type '$type'" >&2 return 1 ;; esac new_version="$field1.$field2.$field3" echo "$new_version" openvas-scanner-23.17.0/.github/install-openvas-dependencies.sh000077500000000000000000000017261500171107200244300ustar00rootroot00000000000000# This script installs openvas-dependencies assuming that gvm-libs is already installed. # Usually it is run within a gvm-libs image. #/bin/sh set -ex apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \ bison \ build-essential \ clang \ clang-format \ clang-tools \ cmake \ curl \ git \ lcov \ pkg-config \ libssl-dev \ libgnutls28-dev \ libgpgme-dev \ libjson-glib-dev \ libksba-dev \ libpaho-mqtt-dev \ libpcap-dev \ libgcrypt-dev \ libssh-dev \ libbsd-dev \ libgnutls30 \ libgssapi3-heimdal \ krb5-multidev \ libasn1-8-heimdal \ libroken19-heimdal \ libhdb9-heimdal \ libpopt0 \ libcurl4 \ libcurl4-gnutls-dev \ libhiredis-dev \ && rm -rf /var/lib/apt/lists/* curl -L -o cgreen.tar.gz https://github.com/cgreen-devs/cgreen/archive/refs/tags/1.6.3.tar.gz -k tar -xzf cgreen.tar.gz && cd cgreen-1.6.3 make install ldconfig openvas-scanner-23.17.0/.github/prepare-feed.sh000066400000000000000000000013641500171107200212170ustar00rootroot00000000000000#!/bin/sh # This script prepares the feed used for integration tests. # We don't use the download action because it is not capapble of a fork based # workflow. [ -z $FEED_DIR ] && FEED_DIR="/var/lib/openvas/plugins" DOCKER_CMD=docker FEED_IMAGE="registry.community.greenbone.net/community/vulnerability-tests" set -e printf "Copying feed $FEED_IMAGE " FEED_VERSION=$($DOCKER_CMD run --rm $FEED_IMAGE sh -c 'ls /var/lib/openvas/' | sort -r | head -n 1) printf "(version: $FEED_VERSION) to $FEED_DIR\n" # instanciate container CFP="/var/lib/openvas/$FEED_VERSION/vt-data/nasl/" CID=$($DOCKER_CMD create $FEED_IMAGE) rm -rf $FEED_DIR mkdir -p $FEED_DIR $DOCKER_CMD cp $CID:$CFP $FEED_DIR mv $FEED_DIR/nasl/* $FEED_DIR rm -r $FEED_DIR/nasl $DOCKER_CMD rm $CID openvas-scanner-23.17.0/.github/sign-assets.sh000066400000000000000000000017651500171107200211250ustar00rootroot00000000000000#!/bin/bash set -e # use own gpg_home to not intefere with other settings tmp= trap 'rm -rf "$tmp"' EXIT INT TERM HUP tmp=$(mktemp -d) export GNUPGHOME="$tmp" # enable gpg to work in container environments: # https://d.sb/2016/11/gpg-inappropriate-ioctl-for-device-errors printf "use-agent\npinentry-mode loopback" > $GNUPGHOME/gpg.conf printf "allow-loopback-pinentry" > $GNUPGHOME/gpg-agent.conf echo RELOADAGENT | gpg-connect-agent # store password, we need it multiple times read -s password # store to file mv "$1" "$GNUPGHOME/private.pgp" # import and gather key id key_id=$(echo "$password" | \ gpg --import --batch --armor --passphrase-fd 0 $GNUPGHOME/private.pgp 2>&1 | \ grep "key [A-Z0-9]*:" | \ head -n 1 | \ sed 's/.*key \([A-Z0-9]*\):.*/\1/') echo "key_id: $key_id" # Create a signed ASC for each file in the assets directory for file in assets/*; do if [ -f "$file" ]; then echo $password | gpg --default-key $key_id --batch --passphrase-fd 0 --detach-sign -a "$file" fi done openvas-scanner-23.17.0/.github/workflows/000077500000000000000000000000001500171107200203555ustar00rootroot00000000000000openvas-scanner-23.17.0/.github/workflows/README.md000066400000000000000000000075261500171107200216460ustar00rootroot00000000000000# Continuous Integration Workflow Documentation This document outlines the Continuous Integration (CI) pipeline, detailing how to trigger releases and the specific roles of various jobs within the workflow. ## Release Trigger Process To initiate a release, navigate to `Actions -> CI` in the GitHub repository, and click on `Run workflow`. Choose from the following options: - `major`: For a major release with incompatible changes. - `minor`: For a minor release introducing new features. - `patch`: For a patch release focusing on bug fixes and minor improvements. - `no_release`: To run the pipeline without releasing, updating the edge image. ## Jobs Overview The CI pipeline incorporates multiple jobs, each with a specific function in the development lifecycle. ### 1. Initialization (`init`) If the initialization fails it will prevent further execution of `build`. - **Purpose**: Sets the release type based on the input or event that triggered the workflow. - **Workflow File**: `init.yaml` ### 2. Unit Tests (`unittests`) - **Purpose**: Executes unit tests to validate code changes. - **Workflow File**: `tests.yml` If the unit tests fails it will prevent further execution of `build`. ### 3. Build (`build`) - **Purpose**: Compiles and builds the project, preparing it for testing and deployment. - **Dependencies**: Requires successful completion of `unittests`. - **Workflow File**: `build.yml` If the build fails it will prevent further execution of `functional`. ### 4. Linting (`linting`) - **Purpose**: Ensures code quality and consistency through linting. - **Workflow File**: `linting.yml` If linting fails it will not prevent execution of the other steps, as it may be that newer versions of the used tooling finds new linting issues that are not affecting the binary as much. ### 5. Functional Testing (`functional`) - **Purpose**: Conducts functional tests on the build. - **Dependencies**: Needs a successful `build`. - **Workflow File**: `functional.yaml` If the functional tests fail it will prevent further execution of `containerization`. ### 6. Containerization - **Purpose**: Packages the build into Docker containers. - **Jobs**: - **Container**: Uses `push-container.yml`. - **Container Testing**: Uses `push-container-testing.yml`. - **Container Oldstable**: Uses `push-container-oldstable.yml`. - **Dependencies**: Depends on `build`, `init`, and `functional`. If the `containerization` fails the smoketests cannot be executed. ### 7. Smoke Tests (`smoketests`) - **Purpose**: Conducts tests on helm chart based on the previously pushed docker image. - **Conditions**: Excluded during pull request events. - **Dependencies**: Relies on `container`, `build`, and `init`. - **Workflow File**: `smoketest.yaml` If the smoketests fail the helm chart will not be updated and releases be prevented. ### 8. Helm Chart Deployment (`helm`) - **Purpose**: Deploys Helm chart, assuming `IMAGE_REGISTRY` is configured. - **Conditions**: Triggered if `IMAGE_REGISTRY` is set. - **Dependencies**: Depends on `smoketests`, `container`, `build`, and `init`. - **Workflow File**: `push-helm-chart.yml` ### 9. Release (`release`) - **Purpose**: Handles the release process for different version types. - **Conditions**: Activated based on the release type set in `init`. - **Dependencies**: Requires `smoketests`, `container`, `build`, and `init`. - **Workflow File**: `release.yml` ## Secrets and Authentication The CI workflow employs GitHub secrets for secure authentication and interaction with external services such as DockerHub. ### Utilized Secrets - **DOCKERHUB_USERNAME**: DockerHub username. - **DOCKERHUB_TOKEN**: Token for DockerHub with write access to the registry. - **GREENBONE_BOT_TOKEN**: Token for Helm chart registry and GitHub repository operations. - **GREENBONE_BOT**: Username for git commits. - **GREENBONE_BOT_MAIL**: Email address for git commits. openvas-scanner-23.17.0/.github/workflows/auto_label.yml000066400000000000000000000004351500171107200232110ustar00rootroot00000000000000name: Labeler on: pull_request: permissions: pull-requests: write contents: read jobs: label: runs-on: self-hosted-generic steps: - uses: greenbone/actions/pr-conventional-commit-labeler@main with: configuration-toml: release_tag.toml openvas-scanner-23.17.0/.github/workflows/build.yml000066400000000000000000000015311500171107200221770ustar00rootroot00000000000000name: "Build" on: [workflow_call] jobs: C: runs-on: self-hosted-generic container: image: registry.community.greenbone.net/community/gvm-libs:stable steps: - uses: actions/checkout@v4 - name: install dependencies run: | sh .github/install-openvas-dependencies.sh - name: build run: | cmake -Bbuild -DCMAKE_C_COMPILER=/usr/share/clang/scan-build-14/libexec/ccc-analyzer cmake --build build Rust: runs-on: self-hosted-generic steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup-rust # we just check if it is compilable, the actual compilation is now done in: # - in the Dockefiles # - in functional.yml (x86_64) only # - in release.yml (aarch64, x86_64) - run: cargo check working-directory: rust openvas-scanner-23.17.0/.github/workflows/codeql.yml000066400000000000000000000017511500171107200223530ustar00rootroot00000000000000name: "CodeQL" on: push: branches: [ main ] pull_request: branches: [ main ] schedule: - cron: '30 5 * * 0' # 5:30h on Sundays jobs: analyze: name: Analyze runs-on: self-hosted-generic permissions: actions: read contents: read security-events: write container: image: registry.community.greenbone.net/community/gvm-libs:edge steps: - name: Checkout repository uses: actions/checkout@v4 - name: install dependencies run: | sh .github/install-openvas-dependencies.sh - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: 'c' # build between init and analyze ... - name: Configure and Compile openvas-scanner run: | mkdir build && cd build/ && cmake \ -DCMAKE_BUILD_TYPE=Release .. && make install working-directory: ${{ github.WORKSPACE }} - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 openvas-scanner-23.17.0/.github/workflows/control.yml000066400000000000000000000135531500171107200225670ustar00rootroot00000000000000name: CI on: workflow_dispatch: inputs: release: description: "Use 'major' for incompatible changes, 'minor' for new features, and 'patch' for fixes or 'no_release' to trigger the pipeline without doing a release." type: choice options: - "no_release" - "major" - "minor" - "patch" required: true default: "no_release" push: branches: [ main] tags: ["v*"] pull_request: types: - opened - synchronize - reopened - closed repository_dispatch: schedule: # rebuild image every sunday - cron: "0 0 * * 0" # Grants rights to push to the Github container registry. # The main workflow has to set the permissions. permissions: contents: read packages: write id-token: write pull-requests: write jobs: # sets the release kind when it wasn't triggered by an workflow dispatch # this prevents us from having to pass down all labels, event_name, etc # to init.yml adapt_release: runs-on: self-hosted-generic outputs: kind: ${{ steps.kind.outputs.kind}} steps: - name: "Debug" run: | echo "${{ github.event_name }}" echo "${{ github.event.pull_request.merged }}" echo "${{ github.event.pull_request.labels }}" - name: "set KIND = no_release" run: echo "KIND=no_release" >> $GITHUB_ENV - name: "override KIND = ${{ inputs.release }}" if: github.event_name == 'workflow_dispatch' run: echo "KIND=${{ inputs.release }}" >> $GITHUB_ENV - name: "override KIND = major" if: github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'major_release') run: echo "KIND=major" >> $GITHUB_ENV - name: "override KIND = minor" if: github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'minor_release') run: echo "KIND=minor" >> $GITHUB_ENV - name: "override KIND = patch" if: github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'patch_release') run: echo "KIND=patch" >> $GITHUB_ENV - id: kind run: | echo "kind=${{ env.KIND }}">> "$GITHUB_OUTPUT" init: needs: [adapt_release] uses: ./.github/workflows/init.yaml with: release: ${{ needs.adapt_release.outputs.kind }} unittests: name: unit-tests uses: ./.github/workflows/tests.yml build: uses: ./.github/workflows/build.yml linting: uses: ./.github/workflows/linting.yml functional: # needs: [unittests, build] uses: ./.github/workflows/functional.yaml container: needs: [build, init] uses: ./.github/workflows/push-container.yml secrets: dockerhub_user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub_token: ${{ secrets.DOCKERHUB_TOKEN}} cosign_key_opensight: ${{ secrets.COSIGN_KEY_OPENSIGHT }} cosign_password_opensight: ${{ secrets.COSIGN_KEY_PASSWORD_OPENSIGHT }} greenbone_bot_token: ${{ secrets.GREENBONE_BOT_TOKEN }} greenbone_registry: ${{ vars.GREENBONE_REGISTRY }} greenbone_registry_user: ${{ secrets.GREENBONE_REGISTRY_USER }} greenbone_registry_token: ${{ secrets.GREENBONE_REGISTRY_TOKEN }} greenbone_registry_replication_user: ${{ secrets.GREENBONE_REGISTRY_REPLICATION_USER }} greenbone_registry_replication_token: ${{ secrets.GREENBONE_REGISTRY_REPLICATION_TOKEN }} mattermost_webhook_url: ${{ secrets.MATTERMOST_WEBHOOK_URL }} with: is_latest_tag: ${{needs.init.outputs.docker_build_is_latest}} is_version_tag: ${{needs.init.outputs.docker_build_is_version }} container-testing: name: container needs: [init, build ] uses: ./.github/workflows/push-container-testing.yml secrets: dockerhub_user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub_token: ${{ secrets.DOCKERHUB_TOKEN}} with: is_latest_tag: ${{needs.init.outputs.docker_build_is_latest}} is_version_tag: ${{needs.init.outputs.docker_build_is_version}} container-oldstable: name: container needs: [init, build ] uses: ./.github/workflows/push-container-oldstable.yml secrets: dockerhub_user: ${{ secrets.DOCKERHUB_USERNAME }} dockerhub_token: ${{ secrets.DOCKERHUB_TOKEN}} with: is_latest_tag: ${{needs.init.outputs.docker_build_is_latest}} is_version_tag: ${{needs.init.outputs.docker_build_is_version}} release: permissions: contents: write # we release after container build so that we can release on a closed pr as we don't push the release container yet # instead it is pushed after the tag is created. # # For now we just don't use it as a dependency for releases which may is counter intuitive needs: [functional, init] if: ( needs.init.outputs.release_kind == 'major' || needs.init.outputs.release_kind == 'minor' || needs.init.outputs.release_kind == 'patch' ) uses: ./.github/workflows/release.yml with: new_version: ${{ needs.init.outputs.release_new_version }} latest_version: ${{ needs.init.outputs.release_latest_version }} release_kind: ${{ needs.init.outputs.release_kind }} release_ref: ${{ needs.init.outputs.release_ref }} project: ${{ needs.init.outputs.release_project }} repository: ${{ github.repository }} secrets: token: ${{ secrets.GREENBONE_BOT_TOKEN }} name: ${{ secrets.GREENBONE_BOT }} email: ${{ secrets.GREENBONE_BOT_MAIL }} gpg_key: ${{ secrets.GPG_KEY }} gpg_pass: ${{ secrets.GPG_PASSPHRASE }} helm: if: github.event_name != 'pull_request' && vars.IMAGE_REGISTRY != '' needs: [container, init] permissions: packages: write uses: ./.github/workflows/push-helm-chart.yml secrets: user: ${{ secrets.GREENBONE_BOT }} token: ${{ secrets.GITHUB_TOKEN }} with: registry: ${{ vars.IMAGE_REGISTRY }} openvas-scanner-23.17.0/.github/workflows/dependency-review.yml000066400000000000000000000004021500171107200245110ustar00rootroot00000000000000name: 'Dependency Review' on: [pull_request] permissions: contents: read pull-requests: write jobs: dependency-review: runs-on: self-hosted-generic steps: - name: 'Dependency Review' uses: greenbone/actions/dependency-review@v3 openvas-scanner-23.17.0/.github/workflows/functional.yaml000066400000000000000000000157171500171107200234160ustar00rootroot00000000000000name: functional on: workflow_call: # smoke test definition. # It depends on build.yml that is controlled via control.yml # jobs: # Tests that gvm-libs, openvas-smb and openvas dependencies work together and # that openvas is buildable and integrates openvas-smb when available distributed-monolith-railguard: runs-on: self-hosted-generic strategy: fail-fast: false matrix: system: - debian_stable steps: - uses: actions/checkout@v4 - run: docker build -t test -f .docker/railguards/${{matrix.system}}.Dockerfile . - run: docker run --rm test ldd /usr/local/sbin/openvas - run: docker run --rm test ldd /usr/local/sbin/openvas | grep libopenvas_wmiclient - run: docker rmi test || true build-rs: runs-on: self-hosted-generic steps: - uses: actions/checkout@v4 - uses: ./.github/actions/compile-x86_64 - name: archive uses: actions/upload-artifact@v4 with: name: rs-binaries path: assets/* retention-days: 1 build-image: runs-on: self-hosted-generic steps: - uses: actions/checkout@v4 - run: | docker build -t registry.community.greenbone.net/community/openvas-scanner:edge -f .docker/prod.Dockerfile . docker pull registry.community.greenbone.net/community/vulnerability-tests:community docker pull registry.community.greenbone.net/community/notus-data:community docker save -o ${{ runner.temp }}/vtc.tar registry.community.greenbone.net/community/vulnerability-tests:community docker save -o ${{ runner.temp }}/nc.tar registry.community.greenbone.net/community/notus-data:community docker save -o ${{ runner.temp }}/openvas.tar registry.community.greenbone.net/community/openvas-scanner:edge - name: Upload artifact uses: actions/upload-artifact@v4 with: name: ovimage path: ${{ runner.temp }}/*.tar smoketest: # currently we cannot use internal runner as they cannot start k3s until it # is fixed we need to build the image in another job running on # self-hosted-generic export it as a tar and import it here to work around # the unreliability of registry.community.greenbone.net/community # when called too often runs-on: ubuntu-latest needs: [build-image] steps: - name: Start a local k8s cluster uses: jupyterhub/action-k3s-helm@v4 with: k3s-channel: latest metrics-enabled: false docker-enabled: true - uses: actions/checkout@v4 - name: Download artifact uses: actions/download-artifact@v4 with: name: ovimage path: ${{ runner.temp }} - name: publish current docker image into k3s run: | docker load --input ${{ runner.temp }}/openvas.tar docker load --input ${{ runner.temp }}/nc.tar docker load --input ${{ runner.temp }}/vtc.tar docker image ls -a - name: 'do not, I repeat, do NOT pull that' run: | # repository: registry.community.greenbone.net/community/openvas-scanner # pullPolicy: Always # tag: "edge" echo "openvas:" >> st.yaml echo " repository: registry.community.greenbone.net/community/openvas-scanner" >> st.yaml echo " tag: edge" >> st.yaml echo " pullPolicy: Never" >> st.yaml echo "vulnerabilitytests:" >> st.yaml echo " repository: registry.community.greenbone.net/community/vulnerability-tests" >> st.yaml echo " tag: community" >> st.yaml echo " pullPolicy: Never" >> st.yaml echo "notusdata:" >> st.yaml echo " repository: registry.community.greenbone.net/community/notus-data" >> st.yaml echo " tag: community" >> st.yaml echo " pullPolicy: Never" >> st.yaml cat st.yaml - name: deploy openvasd run: | cd rust/examples/tls/self-signed make delete deploy cd - helm uninstall openvasd --namespace openvasd|| true helm install --namespace openvasd --create-namespace openvasd charts/openvasd/ \ --values charts/openvasd/values.yaml \ --values charts/openvasd/mtls-wo-ingress.yaml \ --values st.yaml kubectl rollout status --watch --timeout 600s deployment/openvasd --namespace openvasd echo "OPENVASD_SERVER=https://$(kubectl get svc -n openvasd | awk 'FNR == 2 {print $(3)}')" >> $GITHUB_ENV - run: kubectl get pods -n openvasd -o=wide - run: kubectl describe pod openvasd -n openvasd - name: smoketest working-directory: rust/crates/smoketest env: SCAN_CONFIG: configs/simple_scan_ssh_only.json CLIENT_KEY: ../../examples/tls/self-signed/client.rsa CLIENT_CERT: ../../examples/tls/self-signed/client.pem run: | make build run || ls -las ../../ # - uses: actions/cache@v4 # with: # path: /tmp/openvas.tar # key: openvas-cache-${{ github.run_id }} tests: runs-on: self-hosted-generic needs: [build-rs] services: redis: image: redis options: >- --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 container: image: registry.community.greenbone.net/community/gvm-libs:stable options: --privileged steps: - uses: actions/checkout@v4 - name: install dependencies run: | sh .github/install-openvas-dependencies.sh - name: install openvas run: | cmake -Bbuild -DCMAKE_BUILD_TYPE=Release cmake --build build -- install - uses: actions/download-artifact@v4 with: name: rs-binaries path: assets - name: prepare setup run: | apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \ docker.io FEED_DIR="feed/" sh .github/prepare-feed.sh install -m 755 assets/linux/amd64/feed-verifier /usr/local/bin/ install -m 755 assets/linux/amd64/scannerctl /usr/local/bin/ echo "db_address = tcp://redis:6379" >> /etc/openvas/openvas.conf # TODO export as env variable mv ./feed/* "$(openvas -s | grep plugins_folder | sed 's/plugins_folder = //')/" - run: openvas -s - name: verify feed syntax run: scannerctl syntax --quiet "$(openvas -s | grep plugins_folder | sed 's/plugins_folder = //')/" - name: verify feed update run: feed-verifier || (cat /var/log/gvm/openvas.log && false) - name: verify nasl tests run: | mkdir -p /etc/openvas cd nasl/tests make check - uses: actions/setup-go@v5 with: go-version: '>=1.16.0' - name: verify lint run: | make build ./run -e openvas-nasl-lint working-directory: smoketest_lint openvas-scanner-23.17.0/.github/workflows/helm-release-on-tag.yml000066400000000000000000000017741500171107200246370ustar00rootroot00000000000000name: helm-chart release on: push: tags: ["v*"] jobs: release-helm-chart: name: Release helm chart runs-on: self-hosted-generic strategy: fail-fast: false matrix: chart: - openvasd steps: - name: Checkout uses: actions/checkout@v4 - name: Upload to github registry uses: greenbone/actions/helm-build-push@v3 with: chart-name: ${{ matrix.chart }} registry: ${{ vars.IMAGE_REGISTRY }} registry-subpath: helm-charts/ registry-user: ${{ secrets.GREENBONE_BOT }} registry-token: ${{ secrets.GREENBONE_BOT_PACKAGES_WRITE_TOKEN }} - name: Trigger product helm chart upgrade uses: greenbone/actions/trigger-workflow@v3 with: token: ${{ secrets.GREENBONE_BOT_TOKEN }} repository: "greenbone/product-helm-chart" workflow: product-chart-upgrade.yml inputs: '{"chart": "${{ matrix.chart }}", "tag": "${{ github.ref_name }}"}' openvas-scanner-23.17.0/.github/workflows/init.yaml000066400000000000000000000144511500171107200222110ustar00rootroot00000000000000name: Initialize on: workflow_call: inputs: release: type: string ref_type: type: string required: false default: ${{ github.ref_type }} ref_name: type: string required: false default: ${{ github.ref_name }} base_ref: type: string required: false default: ${{ github.base_ref }} outputs: docker_build_is_latest: description: "Is used to set the tags latest and stable" value: ${{ jobs.init.outputs.is_latest_tag }} docker_build_is_version: description: "Is used to set the version tags: major, major-minor, major-minor-patch" value: ${{ jobs.init.outputs.is_version_tag }} docker_test_tag: description: "Is used to smoke test the latest push image" value: ${{ jobs.init.outputs.docker_tag }} release_new_version: description: "new_version, is a release information" value: ${{ jobs.init.outputs.new_version }} release_latest_version: description: "latest_version, is a release information" value: ${{ jobs.init.outputs.latest_version }} release_kind: description: "release_kind, is a release information" value: ${{ jobs.init.outputs.release_kind }} release_ref: description: "release_ref, is a release information" value: ${{ jobs.init.outputs.release_ref }} release_project: description: "The name of the project to be released" value: ${{ jobs.init.outputs.project }} jobs: init: runs-on: self-hosted-generic outputs: is_latest_tag: ${{ steps.version.outputs.is_latest_tag }} is_version_tag: ${{ steps.version.outputs.is_version_tag }} docker_tag: ${{ steps.version.outputs.docker_tag }} new_version: ${{ steps.version.outputs.new_version }} latest_version: ${{ steps.version.outputs.latest_version }} release_kind: ${{ steps.version.outputs.release_kind }} release_ref: ${{ steps.version.outputs.release_ref }} project: ${{ steps.version.outputs.project}} steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: "set IS_VERSION_TAG" run: | echo "IS_VERSION_TAG=${{ inputs.ref_type == 'tag' && startsWith(inputs.ref_name, 'v') }}" >> $GITHUB_ENV # set defaults echo "IS_LATEST_TAG=false" >> $GITHUB_ENV - name: "set IS_LATEST_TAG" if: ( env.IS_VERSION_TAG == 'true' ) run: | # find the latest version that is not ourself export LATEST_VERSION=$(git tag -l | grep -v '${{ inputs.ref_name }}' | sort -r --version-sort | head -n 1) # get major minor patch versions IFS='.' read -r latest_major latest_minor latest_patch << EOF $LATEST_VERSION EOF IFS='.' read -r tag_major tag_minor tag_patch << EOF ${{ inputs.ref_name }} EOF # remove leading v latest_major=$(echo $latest_major | cut -c2-) tag_major=$(echo $tag_major | cut -c2-) echo "$tag_major >= $latest_major" if [[ $tag_major -ge $latest_major && ($tag_minor -ne 0 || $tag_patch -ne 0) ]]; then # set this tag to latest and stable echo "IS_LATEST_TAG=true" >> $GITHUB_ENV fi echo "VERSION=$tag_major.$tag_minor.$tag_patch" >> $GITHUB_ENV - name: "set DOCKER_TAG = edge" if: ( env.IS_VERSION_TAG != 'true' ) run: | echo "DOCKER_TAG=edge" >> $GITHUB_ENV - name: "set DOCKER_TAG = ${{ env.VERSION }}" if: ( env.IS_VERSION_TAG == 'true' ) run: | echo "DOCKER_TAG=$VERSION" >> $GITHUB_ENV - name: "set KIND = ${{ inputs.release }}" run: echo "KIND=${{ inputs.release }}" >> $GITHUB_ENV - name: "set RELEASE_REF = ${{ inputs.ref_name }}" if: inputs.base_ref == '' run: echo "RELEASE_REF=${{ inputs.ref_name }}" >> $GITHUB_ENV - name: "set RELEASE_REF = ${{ inputs.base_ref }}" if: inputs.base_ref != '' run: echo "RELEASE_REF=${{ inputs.base_ref }}" >> $GITHUB_ENV - name: RELEASE_REF != NULL run: ([ -n "${{ env.RELEASE_REF }}" ]) - name: "LATEST_VERSION" if: inputs.release != 'no_release' run: | git tag if [[ "${{ env.RELEASE_REF }}" = "main" ]]; then echo "LATEST_VERSION=$(git tag | grep "^v" | sed 's/^v//' | sort --version-sort | tail -n 1)" >> $GITHUB_ENV else echo "LATEST_VERSION=$(git tag | grep "^v${{ env.RELEASE_REF }}" | sed 's/^v//' | sort --version-sort | tail -n 1)" >> $GITHUB_ENV fi - name: "default LATEST_VERSION" run: | # default to 0.1.0 when there is no previous tag and on main branch if ([[ -z "${{ env.LATEST_VERSION }}" ]] && [[ "${{ env.RELEASE_REF }}" = "main" ]]); then echo "LATEST_VERSION=0.1.0" >> $GITHUB_ENV fi - name: LATEST_VERSION != NULL if: env.KIND != 'no_release' run: ([ -n "${{ env.LATEST_VERSION }}" ]) - name: RELEASE_KIND != NULL run: ([ -n "${{ env.KIND }}" ]) - name: "NEW_VERSION" if: env.KIND != 'no_release' run: | echo "NEW_VERSION=$(sh .github/enhance_version.sh ${{ env.LATEST_VERSION }} ${{ env.KIND }})" >> $GITHUB_ENV # if version is set set docker-tag to latest # if not and branch is main then set to edge # if not and branch is not main then set to unknown - name: NEW_VERSION != NULL if: env.KIND != 'no_release' run: ([ -n "${{ env.NEW_VERSION }}" ]) - name: set output id: version run: | # release echo "latest_version=${{ env.LATEST_VERSION }}">> "$GITHUB_OUTPUT" echo "new_version=${{ env.NEW_VERSION }}">> "$GITHUB_OUTPUT" echo "release_kind=${{ env.KIND }}">> "$GITHUB_OUTPUT" echo "release_ref=${{ env.RELEASE_REF }}">> "$GITHUB_OUTPUT" echo "project=$(echo "${{ github.repository }}" | sed 's/.*\///' )" >> "$GITHUB_OUTPUT" # docker echo "is_latest_tag=${{ env.IS_LATEST_TAG }}">> "$GITHUB_OUTPUT" echo "is_version_tag=${{ env.IS_VERSION_TAG }}">> "$GITHUB_OUTPUT" echo "docker_tag=${{ env.DOCKER_TAG }}">> "$GITHUB_OUTPUT" openvas-scanner-23.17.0/.github/workflows/linting.yml000066400000000000000000000023161500171107200225460ustar00rootroot00000000000000name: "Linting" on: [workflow_call] jobs: C: runs-on: self-hosted-generic steps: - name: Check out openvas-scanner uses: actions/checkout@v4 - name: Formatting run: sh .github/check-c-formatting.sh ${{ github.base_ref }} Rust: runs-on: self-hosted-generic defaults: run: working-directory: rust steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup-rust - run: cargo clippy --all-targets -- -D warnings - run: cargo fmt --check Rust-Typos: runs-on: self-hosted-generic defaults: run: working-directory: rust steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup-rust - run: cargo install typos-cli || true - run: typos Rust-Audit: runs-on: self-hosted-generic defaults: run: working-directory: rust steps: - uses: actions/checkout@v4 - run: cargo install cargo-audit || true - run: cargo audit License-Headers: runs-on: self-hosted-generic steps: - name: Check out openvas-scanner uses: actions/checkout@v4 - name: Check license headers run: bash ./check_license_headers.bash openvas-scanner-23.17.0/.github/workflows/push-container-oldstable.yml000066400000000000000000000031521500171107200260070ustar00rootroot00000000000000name: Container on: workflow_call: inputs: is_latest_tag: required: true type: string is_version_tag: required: true type: string secrets: dockerhub_user: required: true dockerhub_token: required: true jobs: debian_oldstable: name: ghcr:debian:oldstable runs-on: "self-hosted-generic" steps: - name: Checkout repository uses: actions/checkout@v4 - name: Container build and push 3rd gen id: build-and-push uses: greenbone/actions/container-build-push-generic@v3 with: build-docker-file: .docker/prod.Dockerfile build-args: | REPOSITORY=${{ github.repository }} cosign-key: ${{ secrets.cosign_key_opensight }} cosign-key-password: ${{ secrets.cosign_password_opensight }} # The tlog function does not currently support an ed25519 key. cosign-tlog-upload: "false" image-labels: | org.opencontainers.image.vendor=Greenbone org.opencontainers.image.base.name=greenbone/gvm-libs image-tags: | type=raw,value=oldstable,enable=${{ inputs.is_latest_tag }} type=raw,value=oldstable-edge,enable=${{ github.ref_name == 'main' }} type=raw,value=oldstable-{{branch}}-{{sha}},enable=${{ github.ref_type == 'branch' && github.event_name == 'push' && github.ref_name != 'main' }} type=ref,event=pr registry: ${{ vars.IMAGE_REGISTRY }} registry-username: ${{ github.actor }} registry-password: ${{ secrets.GITHUB_TOKEN }} openvas-scanner-23.17.0/.github/workflows/push-container-testing.yml000066400000000000000000000032241500171107200255130ustar00rootroot00000000000000name: Container on: workflow_call: inputs: is_latest_tag: required: true type: string is_version_tag: required: true type: string secrets: dockerhub_user: required: true dockerhub_token: required: true jobs: # TODO: do we need to push or is building enough? debian_testing: name: ghcr:debian:testing runs-on: "self-hosted-generic" steps: - name: Checkout repository uses: actions/checkout@v4 - name: Container build and push 3rd gen id: build-and-push uses: greenbone/actions/container-build-push-generic@v3 with: build-docker-file: .docker/prod.Dockerfile build-args: | REPOSITORY=${{ github.repository }} cosign-key: ${{ secrets.cosign_key_opensight }} cosign-key-password: ${{ secrets.cosign_password_opensight }} # The tlog function does not currently support an ed25519 key. cosign-tlog-upload: "false" image-labels: | org.opencontainers.image.vendor=Greenbone org.opencontainers.image.base.name=greenbone/gvm-libs image-tags: | type=raw,value=testing,enable=${{ inputs.is_latest_tag }} type=raw,value=testing-edge,enable=${{ github.ref_name == 'main' }} type=raw,value=testing-{{branch}}-{{sha}},enable=${{ github.ref_type == 'branch' && github.event_name == 'push' && github.ref_name != 'main' }} type=ref,event=pr registry: ${{ vars.IMAGE_REGISTRY }} registry-username: ${{ github.actor }} registry-password: ${{ secrets.GITHUB_TOKEN }} openvas-scanner-23.17.0/.github/workflows/push-container.yml000066400000000000000000000136701500171107200240460ustar00rootroot00000000000000name: Container on: workflow_call: inputs: is_latest_tag: required: true type: string is_version_tag: required: true type: string secrets: dockerhub_user: required: true dockerhub_token: required: true cosign_key_opensight: required: true cosign_password_opensight: required: true greenbone_bot_token: required: true greenbone_registry: required: true greenbone_registry_user: required: true greenbone_registry_token: required: true greenbone_registry_replication_user: required: false greenbone_registry_replication_token: required: false mattermost_webhook_url: required: true # Grants rights to push to the Github container registry. permissions: contents: read packages: write id-token: write pull-requests: write jobs: debian_stable_arm64: name: ghcr:debian:stable:arm64 runs-on: self-hosted-generic-arm64 steps: - name: Checkout repository uses: actions/checkout@v4 - name: Container build and push 3rd gen id: build-and-push uses: greenbone/actions/container-build-push-generic@v3 with: image-platforms: linux/arm64 build-docker-file: .docker/prod.Dockerfile build-args: | REPOSITORY=${{ github.repository }} cosign-key: ${{ secrets.cosign_key_opensight }} cosign-key-password: ${{ secrets.cosign_password_opensight }} # The tlog function does not currently support an ed25519 key. cosign-tlog-upload: "false" image-labels: | org.opencontainers.image.vendor=Greenbone org.opencontainers.image.base.name=greenbone/gvm-libs image-tags: | # when IS_LATEST_TAG is set create a stable and a latest tag type=raw,value=latest,enable=${{ inputs.is_latest_tag }} type=raw,value=stable,enable=${{ inputs.is_latest_tag }} # if tag version is set than create a version tags type=semver,pattern={{version}},enable=${{ inputs.is_version_tag }} type=semver,pattern={{major}}.{{minor}},enable=${{ inputs.is_version_tag }} type=semver,pattern={{major}},enable=${{ inputs.is_version_tag }} # if on main or a branch TODO calculate upfront type=raw,value=edge,enable=${{ github.ref_name == 'main' }} type=raw,value={{branch}}-{{sha}},enable=${{ github.ref_type == 'branch' && github.event_name == 'push' && github.ref_name != 'main' }} # use pr-$PR_ID for pull requests (will not be uploaded) type=ref,event=pr registry: ${{ vars.IMAGE_REGISTRY }} registry-username: ${{ github.actor }} registry-password: ${{ secrets.GITHUB_TOKEN }} debian_stable: name: ghcr:debian:stable runs-on: "self-hosted-generic" steps: - name: Checkout repository uses: actions/checkout@v4 - name: Container build and push 3rd gen id: build-and-push uses: greenbone/actions/container-build-push-generic@v3 with: build-docker-file: .docker/prod.Dockerfile build-args: | REPOSITORY=${{ github.repository }} cosign-key: ${{ secrets.cosign_key_opensight }} cosign-key-password: ${{ secrets.cosign_password_opensight }} # The tlog function does not currently support an ed25519 key. cosign-tlog-upload: "false" image-labels: | org.opencontainers.image.vendor=Greenbone org.opencontainers.image.base.name=greenbone/gvm-libs image-tags: | # when IS_LATEST_TAG is set create a stable and a latest tag type=raw,value=latest,enable=${{ inputs.is_latest_tag }} type=raw,value=stable,enable=${{ inputs.is_latest_tag }} # if tag version is set than create a version tags type=semver,pattern={{version}},enable=${{ inputs.is_version_tag }} type=semver,pattern={{major}}.{{minor}},enable=${{ inputs.is_version_tag }} type=semver,pattern={{major}},enable=${{ inputs.is_version_tag }} # if on main or a branch TODO calculate upfront type=raw,value=edge,enable=${{ github.ref_name == 'main' }} type=raw,value={{branch}}-{{sha}},enable=${{ github.ref_type == 'branch' && github.event_name == 'push' && github.ref_name != 'main' }} # use pr-$PR_ID for pull requests (will not be uploaded) type=ref,event=pr registry: ${{ vars.IMAGE_REGISTRY }} registry-username: ${{ github.actor }} registry-password: ${{ secrets.GITHUB_TOKEN }} scout-user: ${{ secrets.dockerhub_user }} scout-password: ${{ secrets.dockerhub_token }} greenbone_reg_debian_stable: name: greenbone-reg:debian:stable uses: greenbone/workflows/.github/workflows/container-build-push-2nd-gen.yml@main with: image-url: community/openvas-scanner image-labels: | org.opencontainers.image.vendor=Greenbone org.opencontainers.image.base.name=greenbone/gvm-libs service: openvas-scanner secrets: COSIGN_KEY_OPENSIGHT: ${{ secrets.cosign_key_opensight }} COSIGN_KEY_PASSWORD_OPENSIGHT: ${{ secrets.cosign_password_opensight }} DOCKERHUB_USERNAME: ${{ secrets.dockerhub_user }} DOCKERHUB_TOKEN: ${{ secrets.dockerhub_token }} GREENBONE_BOT_TOKEN: ${{ secrets.greenbone_bot_token }} GREENBONE_REGISTRY: ${{ secrets.greenbone_registry }} GREENBONE_REGISTRY_USER: ${{ secrets.greenbone_registry_user }} GREENBONE_REGISTRY_TOKEN: ${{ secrets.greenbone_registry_token }} GREENBONE_REGISTRY_REPLICATION_USER: ${{ secrets.greenbone_registry_replication_user }} GREENBONE_REGISTRY_REPLICATION_TOKEN: ${{ secrets.greenbone_registry_replication_token }} MATTERMOST_WEBHOOK_URL: ${{ secrets.mattermost_webhook_url }} openvas-scanner-23.17.0/.github/workflows/push-helm-chart.yml000066400000000000000000000010631500171107200241010ustar00rootroot00000000000000name: "Helm Push" on: workflow_call: inputs: registry: required: true type: string secrets: user: required: true token: required: true jobs: helm: runs-on: self-hosted-generic steps: - uses: actions/checkout@v4 - uses: greenbone/actions/helm-build-push@v3 with: chart-name: openvasd registry: ${{ inputs.registry }} registry-subpath: helm-charts/ registry-user: ${{ secrets.user }} registry-token: ${{ secrets.token }} openvas-scanner-23.17.0/.github/workflows/release.yml000066400000000000000000000137061500171107200225270ustar00rootroot00000000000000name: "release" on: workflow_call: inputs: new_version: required: true type: string latest_version: required: true type: string release_kind: required: true type: string release_ref: required: true type: string project: required: true type: string repository: required: true type: string secrets: token: required: true name: required: true email: required: true gpg_key: required: true gpg_pass: required: true # This job first determines the target branch of the closed pull request. If the target branch is "main", # then the latest release tag is used. If no release tag exists, it is set to 0.1.0. If it is a release # branch (e.g. v22), then the latest tag within that major version is used. # # For a patch release, the latest tag is enhanced with 0.0.1, leaving the major and minor versions as # they are. # # For a minor release, the latest tag is enhanced with 0.1.0, and the patch version is set to 0. # # For a major release, a branch is created for the latest major release found by tag, and the version # is enhanced with $latest_tag + 1.0.0, increasing the major version by 1 and setting the minor and # patch versions to 0. # # Major version releases are only valid on the "main" branch. # # Once the version is found and enhanced, each CMakeLists file is updated to the new # version, and a commit is created in the found branch. jobs: release: name: release runs-on: "self-hosted-generic" env: RELEASE_KIND: ${{inputs.release_kind}} RELEASE_REF: ${{inputs.release_ref}} LATEST_VERSION: ${{inputs.latest_version}} NEW_VERSION: ${{inputs.new_version}} PROJECT: ${{inputs.project}} REPOSITORY: ${{inputs.repository}} steps: - uses: actions/checkout@v4 with: token: ${{ secrets.token }} fetch-depth: '0' - name: set git credentials run: | git config --global user.email "${{ secrets.email }}" git config --global user.name "${{ secrets.name }}" - name: "create working branch for previous major release (${{ env.LATEST_VERSION }})" if: ( env.RELEASE_KIND == 'major' ) run: | # checkout latest version git checkout "v${{ env.LATEST_VERSION }}" # get just the major version of latest version export BRANCH_NAME=$(echo "${{ env.LATEST_VERSION }}" | sed 's/^\([0-9]*\).*/v\1/') git checkout -b "$BRANCH_NAME" && git push origin "$BRANCH_NAME" || true # create binaries - uses: greenbone/actions/setup-pontos@v3 - uses: ./.github/actions/compile-x86_64 - uses: ./.github/actions/compile-aarch64 - run: mv assets/linux/arm64/openvasd assets/openvasd-aarch64-unknown-linux-gnu - run: mv assets/linux/amd64/openvasd assets/openvasd-x86_64-unknown-linux-gnu - run: mv assets/linux/arm64/scannerctl assets/scannerctl-aarch64-unknown-linux-gnu - run: mv assets/linux/amd64/scannerctl assets/scannerctl-x86_64-unknown-linux-gnu - run: rm -rf assets/linux - run: ls -las assets/ # create branch of version - name: prepare project version ${{ env.RELEASE_REF }} ${{ env.LATEST_VERSION }} -> ${{ env.NEW_VERSION }} run: | # jump back for the case that we switched to a tag git checkout "${{ env.RELEASE_REF }}" # ignore failure on setting version pontos-version update ${{ env.NEW_VERSION }} || true # set app version on chart awk '{sub(/appVersion: "[0-9]+\.[0-9]+\.[0-9]+"/,"appVersion: \"${{ env.NEW_VERSION }}\""); print}' charts/openvasd/Chart.yaml | tee /tmp/Chart.yaml mv /tmp/Chart.yaml charts/openvasd/Chart.yaml # as soon as pontos-version release is available and it supports cargo do # cd rust # pontos-version update ${{ env.NEW_VERSION }} # but since we don't upload cargo modules to registry the version doesn't matter as of now. if git diff --exit-code --quiet; then echo "There are no modified files, skipping." else git add CMakeLists.txt git add charts/openvasd/Chart.yaml git commit -m "Automated commit: change version from ${{ env.LATEST_VERSION }} -> ${{ env.NEW_VERSION }}" git fetch --all git rebase origin/${{ env.RELEASE_REF}} git push origin ${{ env.RELEASE_REF }} fi - name: release ${{ env.PROJECT }} ${{ env.LATEST_VERSION }} -> ${{ env.NEW_VERSION }} run: | pontos-changelog \ --current-version ${{ env.LATEST_VERSION }} \ --next-version ${{ env.NEW_VERSION }} \ --config changelog.toml \ --repository $REPOSITORY \ --versioning-scheme semver \ -o /tmp/changelog.md || true # we would rather have empty release notes than no release if [ ! -f "/tmp/changelog.md" ]; then touch /tmp/changelog.md fi echo "${{ secrets.token }}" | gh auth login --with-token export nrn="v${{ env.NEW_VERSION }}" gh release create "$nrn" -F /tmp/changelog.md - name: "sign ${{ env.PROJECT }}" run: | export nrn="v${{ env.NEW_VERSION }}" export filename="$PROJECT-$nrn" curl -sfSL --retry 3 --retry-connrefused --retry-delay 2 -o assets/$filename.zip https://github.com/${{ github.repository }}/archive/refs/tags/$nrn.zip curl -sfSL --retry 3 --retry-connrefused --retry-delay 2 -o assets/$filename.tar.gz https://github.com/${{ github.repository }}/archive/refs/tags/$nrn.tar.gz echo -e "${{ secrets.gpg_key }}" > private.pgp echo ${{ secrets.gpg_pass }} | bash .github/sign-assets.sh private.pgp rm assets/$filename.zip rm assets/$filename.tar.gz gh release upload $nrn assets/* openvas-scanner-23.17.0/.github/workflows/sbom-upload.yml000066400000000000000000000004231500171107200233210ustar00rootroot00000000000000name: SBOM upload on: workflow_dispatch: push: branches: ["main"] jobs: SBOM-upload: runs-on: self-hosted-generic permissions: id-token: write contents: write steps: - name: 'SBOM upload' uses: greenbone/actions/sbom-upload@v3 openvas-scanner-23.17.0/.github/workflows/tests.yml000066400000000000000000000015261500171107200222460ustar00rootroot00000000000000name: "Unit-Tests" on: [workflow_call] jobs: C: runs-on: self-hosted-generic container: image: registry.community.greenbone.net/community/gvm-libs:stable steps: - uses: actions/checkout@v4 - name: install dependencies run: | sh .github/install-openvas-dependencies.sh - name: unit-tests run: | cmake -Bbuild -DCMAKE_BUILD_TYPE=Release CTEST_OUTPUT_ON_FAILURE=1 cmake --build build -- tests test Rust: runs-on: self-hosted-generic defaults: run: working-directory: rust steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup-rust - name: unit-tests run: cargo test --lib --tests --workspace - name: experimental unit-tests run: cargo test --lib --tests --workspace --features experimental openvas-scanner-23.17.0/.gitignore000066400000000000000000000002231500171107200167450ustar00rootroot00000000000000build/ nasl/nasl_grammar.output nasl/nasl_grammar.tab.c nasl/nasl_grammar.tab.h .cache/ testsuiterun.nasl .vscode .venv/ *.bak assets/ *.rsa *.pem openvas-scanner-23.17.0/.mergify.yml000066400000000000000000000035121500171107200172240ustar00rootroot00000000000000pull_request_rules: # backports from main branch - name: backport main patches to stable branch conditions: - base=main - label=backport-to-stable actions: backport: branches: - stable - name: backport main patches to oldstable branch conditions: - base=main - label=backport-to-oldstable actions: backport: branches: - oldstable - name: backport main patches to middleware branch conditions: - base=main - label=backport-to-middleware actions: backport: branches: - middleware # backports from upcoming release branch - name: backport stable patches to main branch conditions: - base=stable - label=backport-to-main actions: backport: branches: - main - name: backport stable patches to oldstable branch conditions: - base=stable - label=backport-to-oldstable actions: backport: branches: - oldstable - name: backport stable patches to middleware branch conditions: - base=stable - label=backport-to-middleware actions: backport: branches: - middleware # backports from current release branch - name: backport oldstable patches to main branch conditions: - base=oldstable - label=backport-to-main actions: backport: branches: - main - name: backport oldstable patches to stable branch conditions: - base=oldstable - label=backport-to-stable actions: backport: branches: - stable - name: backport oldstable patches to middleware branch conditions: - base=oldstable - label=backport-to-middleware actions: backport: branches: - middleware openvas-scanner-23.17.0/.pontos-header-ignore000066400000000000000000000007201500171107200210110ustar00rootroot00000000000000README.txt md4.* md5.* iconv.h time.c smb.h smb_signing.* smb_crypt* nasl.h nasl_var.* nasl_tree.* nasl_text_utils.* nasl_socket.h nasl_scanner_glue.* nasl_raw.h nasl_packet_forgery* nasl_misc_funcs.* nasl_lex_ctxt.* nasl_init.* nasl_http.* nasl_host.* nasl_func.* nasl_debug.* nasl_crypto* nasl_cmd_exec.* nasl_buildin_synscan.c nasl_buildin_openvas_tcp_scanner.c hmacmd5.* exec.h charset.c charcnv.c capture_packet.* byteorder.h arc4.c tests/keys/ownertrust.txt openvas-scanner-23.17.0/CHANGELOG.md000066400000000000000000000631071500171107200166000ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [22.4] (unreleased) ### Added - Add support for volatile keys. [#682](https://github.com/greenbone/openvas/pull/682) - Extend nasl lint to check Syntax for Arguments for script_xref() function. [#714](https://github.com/greenbone/openvas/pull/714) - Recheck alive status of host with specified amount of NVT timeouts. [#729](https://github.com/greenbone/openvas/pull/729) - Integrate sentry for debugging purposes [#742](https://github.com/greenbone/openvas/pull/742) - Add support for non-interactive shell to nasl_ssh_shell_open(). [#744](https://github.com/greenbone/openvas/pull/744) [#757](https://github.com/greenbone/openvas/pull/757) - Add message type validation for proto_post_wrapped. [#805](https://github.com/greenbone/openvas/pull/805) - Add nasl function sftp_enabled_check() to check if sftp subsystem is enabled in the target. - [#853](https://github.com/greenbone/openvas/pull/853) - [#862](https://github.com/greenbone/openvas/pull/862) - Add `find_all` to eregmatch() nasl function. Backport PR #875. [#876](https://github.com/greenbone/openvas/pull/876) - Add nasl functions for checking ssl/tls secure renegotiation and performing re-handshake. [#889](https://github.com/greenbone/openvas/pull/889) - Fix Segmentation fault when freeing hosts and alive hosts [#888](https://github.com/greenbone/openvas/pull/888) ### Changed - function script_bugtraq_id getting skipped, linter warns. [#724](https://github.com/greenbone/openvas/pull/724) - Refactor dead host status sending. [#807](https://github.com/greenbone/openvas/pull/807) - Refactor openvas.c. [#810](https://github.com/greenbone/openvas/pull/810) [#811](https://github.com/greenbone/openvas/pull/811) - Handle script timeout as script preference with ID 0 [#844](https://github.com/greenbone/gvm-libs/pull/844) ### Fixed - Use fchmod to change file permission instead of on open to prevent race conditions [854](https://github.com/greenbone/openvas-scanner/pull/854) - Several minor potential security risks in different files, spotted by Code QL [854](https://github.com/greenbone/openvas-scanner/pull/854) - Fix plugins upload. Backport #878 [#880](https://github.com/greenbone/openvas/pull/880) - Fix Error Message when NVTI chache init failed. Backport #885 [#887](https://github.com/greenbone/openvas/pull/887) - Fix Segmentation fault when freeing hosts and alive hosts [#888](https://github.com/greenbone/openvas/pull/888) ### Removed - Remove handling of source_iface related preferences. [#730](https://github.com/greenbone/openvas/pull/730) [22.4]: https://github.com/greenbone/openvas-scanner/compare/stable...main ## [21.4.3] (unreleased) ### Added - Add nasl function sftp_enabled_check() to check if sftp subsystem is enabled in the target. - [#853](https://github.com/greenbone/openvas/pull/853) - [#862](https://github.com/greenbone/openvas/pull/862) ### Changed - Changed defaults for installation locations [#826](https://github.com/greenbone/openvas-scanner/pull/826) - SYSCONFDIR is /etc by default now - LOCALSTATEDIR is /var by default now - OPENVAS_RUN_DIR is /run/ospd by default now - OPENVAS_FEED_LOCK_PATH is /var/lib/openvas/feed-update.lock by default now ### Deprecated ### Removed ### Fixed - Fix interrupted scan, when the process table is full. [#832](https://github.com/greenbone/openvas-scanner/pull/832) - Use fchmod to change file permission instead of on open to prevent race conditions [854](https://github.com/greenbone/openvas-scanner/pull/854) - Fix plugins upload [#878](https://github.com/greenbone/openvas/pull/878) - Fix Error Message when NVTI chache init failed [#885](https://github.com/greenbone/openvas/pull/885) - Fix potential segfault.[#884](https://github.com/greenbone/openvas/pull/884) [21.4.3]: https://github.com/greenbone/openvas-scanner/compare/v21.4.2...stable ## [21.4.2] - 2021-08-03 ### Fixed - Fix clang-analyzer warnings. [#791](https://github.com/greenbone/openvas/pull/791) [#795](https://github.com/greenbone/openvas/pull/795) [21.4.2]: https://github.com/greenbone/openvas-scanner/compare/v21.4.1...v21.4.2 ## [21.4.1] - 2021-06-23 ### Added - Improve nasl linter to catch more cases of undeclared variables. [#728](https://github.com/greenbone/openvas-scanner/pull/728) - Add deprecation warning for source_iface related settings which will be removed with the 22.4 release. [#732](https://github.com/greenbone/openvas-scanner/pull/732) - New Credentials for SSH to get su privileges. Backport of [#744](https://github.com/greenbone/openvas-scanner/pull/744). [#753](https://github.com/greenbone/openvas-scanner/pull/753) ### Changed - Update default log config [#711](https://github.com/greenbone/openvas-scanner/pull/711) ### Fixed - Use host from the original hosts list when boreas is enabled. [#725](https://github.com/greenbone/openvas/pull/725) - Initialize the the kb to store results for openvas-nasl [#735](https://github.com/greenbone/openvas/pull/735) - Fix unittest. Mock kb_lnk_reset. [#748](https://github.com/greenbone/openvas/pull/748) ### Removed [21.4.1]: https://github.com/greenbone/openvas/compare/v21.4.1...stable ## [21.4.0] - 2021-04-15 ### Added - Add scanner-only option to enable tls debugging. [#558](https://github.com/greenbone/openvas/pull/558) - Extend nasl lint to detect if function parameter is used twice. [#585](https://github.com/greenbone/openvas/pull/585) - Add option to specify if a host can be scanned through its IPv4 and IPv6 in parallel. [#604](https://github.com/greenbone/openvas/pull/604) [#645](https://github.com/greenbone/openvas/pull/645) - Add insert_tcp_options and insert_tcp_v6_options nasl functions. [#618](https://github.com/greenbone/openvas/pull/618) - Add get_tcp_option and extend dump_tcp_packet nasl functions. [#621](https://github.com/greenbone/openvas/pull/621) - Add new scanner only option for spawning NASL functions with a different owner. [#634](https://github.com/greenbone/openvas/pull/634) - Add debug logs for allow_simultaneous_ips=no. [#685](https://github.com/greenbone/openvas/pull/685) - Add min_free_mem and max_sysload scanner only options. [#690](https://github.com/greenbone/openvas/pull/690) ### Changed - Store results in main_kb instead of host_kb. [#550](https://github.com/greenbone/openvas/pull/550) - Also use internal function name in some nasl log messages. [#611](https://github.com/greenbone/openvas/pull/611) - Move more scanner preferences to gvm-libs to make them available for openvas-nasl. [#614](https://github.com/greenbone/openvas/pull/614) ### Removed - Use the nvticache name from gvm-libs, defined in nvticache.h. [#578](https://github.com/greenbone/openvas/pull/578) [21.4.0]: https://github.com/greenbone/openvas/compare/oldstable...v21.4.0 ## [20.8.4] (unreleased) ### Added ### Changed - Changed defaults for installation locations [#826](https://github.com/greenbone/openvas-scanner/pull/826) - SYSCONFDIR is /etc by default now - LOCALSTATEDIR is /var by default now - OPENVAS_RUN_DIR is /run/ospd by default now - OPENVAS_FEED_LOCK_PATH is /var/lib/openvas/feed-update.lock by default now ### Deprecated ### Removed ### Fixed [20.8.4]: https://github.com/greenbone/openvas-scanner/compare/v20.8.3...oldstable ## [20.8.3] - 2021-08-03 ### Fixed - Fix clang-analyzer warnings. [#791](https://github.com/greenbone/openvas/pull/791) [#795](https://github.com/greenbone/openvas/pull/795) [20.8.3]: https://github.com/greenbone/openvas/compare/v20.8.2...v20.8.3 ## [20.8.2] - 2021-06-23 ### Added - Check for wrong names or values in the script_xrefs params. [#650](https://github.com/greenbone/openvas/pull/650) [#653](https://github.com/greenbone/openvas/pull/653) - Log a message if the scanner did not launch all plugins against a host. [#700](https://github.com/greenbone/openvas/pull/700) [#734](https://github.com/greenbone/openvas/pull/734) ### Changed - Replace bogus data with a better message and the vendor. [#665](https://github.com/greenbone/openvas/pull/665) - Improve log message for WMI connect failed or missing WMI support. [#670](https://github.com/greenbone/openvas/pull/670) - Don't use g_error. Use g_warning instead and let the scanner to continue. [#710](https://github.com/greenbone/openvas/pull/710) - Update COPYING file. [#750](https://github.com/greenbone/openvas/pull/750) - Set file permissions when syncing community feed [#769](https://github.com/greenbone/openvas-scanner/pull/769) ### Fixed - Fix issues discovered with clang compiler. [#654](https://github.com/greenbone/openvas/pull/654) - Fix gcc-9 and gcc-10 warnings. [#655](https://github.com/greenbone/openvas/pull/655) - Fix double free in nasl_cert_query. [#658](https://github.com/greenbone/openvas/pull/658) - Fix message to the client if there is a iface problem. [#695](https://github.com/greenbone/openvas/pull/695) - Fix SIGSEGV when no best route is found. [#702](https://github.com/greenbone/openvas/pull/702) - Fix host count when reverse_lookup_only is enabled. [#715](https://github.com/greenbone/openvas/pull/715) - Use host from the original hosts list when boreas is enabled. Backport of [PR #727](https://github.com/greenbone/openvas/pull/727). [#725](https://github.com/greenbone/openvas/pull/725) - The function description of nasl_ssh_shell_read() has been fixed. [#755](https://github.com/greenbone/openvas/pull/755) ### Removed - Remove code from the openvas daemon era. Do not flushall redis. [#689](https://github.com/greenbone/openvas/pull/689) - Remove deprecated option logfile. [#713](https://github.com/greenbone/openvas/pull/713) [20.8.2]: https://github.com/greenbone/openvas/compare/v20.8.1...v20.8.2 ## [20.8.1] - 2021-02-01 ### Added - Extend nasl lint to detect if function parameter is used twice. [#590](https://github.com/greenbone/openvas/pull/590) - Add support for TLSv1.3. [#588](https://github.com/greenbone/openvas/pull/588)[#598](https://github.com/greenbone/openvas/pull/598) - Add alternative for supporting snmp during scans. [#594](https://github.com/greenbone/openvas/pull/594) - Add resolve_hostname_to_multiple_ips() NASL function. [#596](https://github.com/greenbone/openvas/pull/596) - Add dump_icmp_packet() and dump_icmp_v6_packet() nasl functions. [#609](https://github.com/greenbone/openvas/pull/609) - Send message to the client with hosts count. [#606](https://github.com/greenbone/openvas/pull/606) - Use nasl_perror on invalid input and add more documentation. [#608](https://github.com/greenbone/openvas/pull/608) - Add timeout argument to ssh_connect() nasl function to set the connection timeout. [631](https://github.com/greenbone/openvas/pull/631) ### Changed - Downgrade wmi queries log level for common errors. [#602](https://github.com/greenbone/openvas/pull/602) [#607](https://github.com/greenbone/openvas/pull/607) - Rename some nasl functions and func parameters for consistency and fix byte order issue in get_ipv6_element. [#613](https://github.com/greenbone/openvas/pull/613) - Change log level from debug to message to show max_host and max_scan during scan start. [#626](https://github.com/greenbone/openvas/pull/626) - Changed the redis-openvas.conf, so that it is compliant with the 5.0+ version(s) of redis. [#668](https://github.com/greenbone/openvas/pull/668) ### Fixed - Fork vhosts before creating the socket.[#576](https://github.com/greenbone/openvas/pull/576) - Check if another forked child has already added the same vhost. [#581](https://github.com/greenbone/openvas/pull/581) - Send duplicated hosts as dead hosts to ospd, to adjust scan progress calculation. [#586](https://github.com/greenbone/openvas/pull/586) - Only send the signal if the pid is a positive value. [#593](https://github.com/greenbone/openvas/pull/593) - When routes with same mask are found the route with the better metric is chosen. [#593](https://github.com/greenbone/openvas/pull/593) [#639](https://github.com/greenbone/openvas/pull/639) - Fix malformed target. [#625](https://github.com/greenbone/openvas/pull/625) - Fix snmp result. Only return the value and do not stop at the first \n. [#627](https://github.com/greenbone/openvas/pull/627) - Fix masking of IPv6 addresses. [#635](https://github.com/greenbone/openvas/pull/635) - Fix technique switch for getting the appropriate interface to use for IPv6 dst addr. [#636](https://github.com/greenbone/openvas/pull/636) - Fix host count. Set to -1 when the target string is invalid. [#646](https://github.com/greenbone/openvas/pull/646) [20.08.1]: https://github.com/greenbone/openvas/compare/v20.8.0...v20.8.1 ## [20.8.0] - 2020-08-11 ### Added - Create greenbone-nvt-sync create lock file during feed sync. [#458](https://github.com/greenbone/openvas/pull/458) [#459](https://github.com/greenbone/openvas/pull/459) - Extend script_get_preference() to get the value by id. [#470](https://github.com/greenbone/openvas/pull/470) - Add extended environmental variables info to greenbone-nvt-sync help text. [#488](https://github.com/greenbone/openvas/pull/488) - Extend nasl functions which generate results with optional "uri" parameter [#526](https://github.com/greenbone/openvas/pull/526) - Add nasl function to get the host kb index. [#530](https://github.com/greenbone/openvas/pull/530) - Print the filter used by pcap in the error message. [#537](https://github.com/greenbone/openvas/pull/537) [#540](https://github.com/greenbone/openvas/pull/540) ### Changed - The logging of the NASL internal regexp functions was extended to include the pattern in case of a failed regcomp(). [#397](https://github.com/greenbone/openvas/pull/397) - Add config for gpg keyring path (OPENVAS_GPG_BASE_DIR) [#407](https://github.com/greenbone/openvas/pull/407) - Use __func__ instead of __FUNCTION__ [#419](https://github.com/greenbone/openvas/pull/419) - Use pcap_findalldevs() instead of deprecated function pcap_lookupdev() [#422](https://github.com/greenbone/openvas/pull/422) [#430](https://github.com/greenbone/openvas/pull/430) - Add port-range option for openvas-nasl [#431](https://github.com/greenbone/openvas/pull/431) - Add test_alive_hosts_only feature. [#456](https://github.com/greenbone/openvas/pull/456) - Don't reload the plugins when start a new scan. [#458](https://github.com/greenbone/openvas/pull/458) - Drop http feed sync. [#478](https://github.com/greenbone/openvas/pull/478) - Add aligned summary to log at scan end. [#496](https://github.com/greenbone/openvas/pull/496) - Unify log messages about start/end of scan and of hosts. [#500](https://github.com/greenbone/openvas/pull/500) - Use flock to lock the feed lock file. [#507](https://github.com/greenbone/openvas/pull/507) - Move alive detection module (Boreas) into gvm-libs [#519](https://github.com/greenbone/openvas/pull/519) - Allow to set all legal types of icmp v6 in icmp header in openvas-nasl. [#542](https://github.com/greenbone/openvas/pull/542) - The output of the NASL dump_* packet forgery functions was made consistent. [#555](https://github.com/greenbone/openvas/pull/555) - Make drop_privileges setting a scanner-only preference. [#557](https://github.com/greenbone/openvas/pull/557) - Feed lock path is now configurable. [#574](https://github.com/greenbone/openvas/pull/574) ### Fixed - Improve signal handling when update vhosts list. [#425](https://github.com/greenbone/openvas/pull/425) - Wait for all children instead of waiting just for one a time. [#428](https://github.com/greenbone/openvas/pull/428) - Don't detect MongoDB as a HTTP service. [#447](https://github.com/greenbone/openvas/pull/447) - Set status finished and send a message if the port list is invalid. [#453](https://github.com/greenbone/openvas/pull/453) - Fix format-truncation warning in GCC 8.2 and later. [#461](https://github.com/greenbone/openvas/pull/461) - Clean the new kb when the scan was stopped and the host has not been started. [#494](https://github.com/greenbone/openvas/pull/494) - Prevent child deadlock. [#491](https://github.com/greenbone/openvas/pull/491) - Memleak fixes for kb_item_get_str(). [#502](https://github.com/greenbone/openvas/pull/502) - Fix denied hosts. [#510](https://github.com/greenbone/openvas/pull/510) - Fix openvas-nasl. Add kb key/value for all vhosts. [#533](https://github.com/greenbone/openvas/pull/533) - Wait for last plugin to finish before change to other category. [#534](https://github.com/greenbone/openvas/pull/534) - Corrected function parameter names in nasl_perror calls. [#539](https://github.com/greenbone/openvas/pull/539) - Various updates to the nasl_perror() error texts. [#539](https://github.com/greenbone/openvas/pull/542) - Fix icmp checksum calculation in openvas-nasl. [#543](https://github.com/greenbone/openvas/pull/543) - Fix ipv6 flow label in nasl_packet_forgery_v6() for openvas-nasl. [#545](https://github.com/greenbone/openvas/pull/545) - Fix name of NASL internal IPPROTO_IP variable. [#552](https://github.com/greenbone/openvas/pull/552) - Fix byte ordering and wrong PROTO identifier in dump_ipv6_packet() for openvas-nasl. [#549](https://github.com/greenbone/openvas/pull/549) - Fix size calculation which lead to alloc error in get_tcp_element() of openvas-nasl. [#546](https://github.com/greenbone/openvas/pull/546) - Fix filter out of default 'radio' type preferences [#560](https://github.com/greenbone/openvas/pull/560) - Allow group access to lockfile and fix empty timestamp [#562](https://github.com/greenbone/openvas/pull/562) - Do not simply abort when log file is not writable but print err msg and shutdown gracefully instead. [#661](https://github.com/greenbone/openvas/pull/661) ### Removed - Removed "network scan" mode. This includes removal of NASL API methods "scan_phase()" and "network_targets()". Sending a "network_mode=yes" in a scanner configuration will have no effect anymore. [#493](https://github.com/greenbone/openvas/pull/493) [20.8.1]: https://github.com/greenbone/openvas/compare/openvas-7.0...v20.8.1 ## [7.0.1] ### Added - Display gvm-libs version in `openvas --version` output [#436](https://github.com/greenbone/openvas/pull/436) ### Changed - Improve handling of invalid or existent ids of nvt's preference id. [#416](https://github.com/greenbone/openvas/pull/416) - Perform a scan even if there are missing plugins. [#439](https://github.com/greenbone/openvas/pull/439) ### Fixed - Do not store in memory an empty file received as nvt preference. [#409](https://github.com/greenbone/openvas/pull/409) - Fix stop scan. [#414](https://github.com/greenbone/openvas/pull/414) - Fix hanging scans. [#423](https://github.com/greenbone/openvas/pull/423) - Improve signal handling when update vhosts list. [#426](https://github.com/greenbone/openvas/pull/426) - Wait for all children instead of waiting just for one a time. [#429](https://github.com/greenbone/openvas/pull/429) - Release redis connection. [[#452](https://github.com/greenbone/openvas/pull/452) [7.0.1]: https://github.com/greenbone/openvas/compare/v7.0.0...openvas-7.0 ## [7.0.0] (2019-10-11) ### Added - An ID has been added to NVT preferences. [#282](https://github.com/greenbone/openvas/pull/282) - A new NVT cross references data handling has been added. [#317](https://github.com/greenbone/openvas/pull/317) - Add option --scan-stop. [#352](https://github.com/greenbone/openvas/pull/352) - Add support to open an rc4 stream cipher, the function to encrypt stream data using the cipher handle, and the function to close a handler. [#354](https://github.com/greenbone/openvas/pull/354) - Add one single config for redis to config/redis-openvas.conf. [#370](https://github.com/greenbone/openvas/pull/370) ### Changed - Vendor version is now an option in the config file. [#363](https://github.com/greenbone/openvas/pull/363) - The NVT preference format has been changed. [#275](https://github.com/greenbone/openvas/pull/275) - Redis supported versions must be 3.2 or higher. [#287](https://github.com/greenbone/openvas/pull/287) - Log directory is now configurable. [#316](https://github.com/greenbone/openvas/pull/316) - The greenbone-nvt-sync script is not allowed to run as root. [#323](https://github.com/greenbone/openvas/pull/323) - OpenVAS Scanner has been renamed to OpenVAS (Open Vulnerability Assessment Scanner). [#337](https://github.com/greenbone/openvas/pull/337) [#343](https://github.com/greenbone/openvas/pull/343) - Retry until a host finishes and frees a db before running a new host scan, in case there is no free redis db. Therefore a infinite loop has been added when it call kb_new(). [#340](https://github.com/greenbone/openvas/pull/340) - Use new nvti_add_tag() instead of plug_set_tag() and remove plug_set_tag(). [#385](https://github.com/greenbone/openvas/pull/385) - Remove dead code about tags regarding former openvas settings "result_prepend_tags" and "result_append_tags". [#386](https://github.com/greenbone/openvas/pull/386) - Check cache/feed errors during plugin scheduling. [#358](https://github.com/greenbone/openvas/pull/358) - Vendor version is now an option in the config file. [#363](https://github.com/greenbone/openvas/pull/363) - Use API for accessing NVTI elements. [#365](https://github.com/greenbone/openvas/pull/365) - Allow send_packet() and send_v6packet() to send packets to broadcast/multicast.[#388](https://github.com/greenbone/openvas/pull/388) ### Fixed - An issue with stuck scans where only a single plugin is running and is beyond its timeout has been addressed. [#289](https://github.com/greenbone/openvas/pull/289) - Fix a type mismatch. Use correct format specifier for size_t. [#299](https://github.com/greenbone/openvas/pull/299) - An issue which caused falling back into a default port in get_ssh_port() has been fixed. [#342](https://github.com/greenbone/openvas/pull/342) - An issue which could have caused a truncated string in register_service() has been fixed. [#373](https://github.com/greenbone/openvas/pull/373) - Reset redis connection after the host scan finished. This avoids to leave open fd, which cause ulimit problems. [#384](https://github.com/greenbone/openvas/pull/384) - Fix mis-identification of Sphinx Search service. [#387](https://github.com/greenbone/openvas/pull/387) - Set a key in redis when the scan finishes and fix stop scan using the right pid. [#390](https://github.com/greenbone/openvas/pull/390) - Fix detection of finger service. [#391](https://github.com/greenbone/openvas/pull/391) - Wait for zombie process in case of timed out nvts. [#379](https://github.com/greenbone/openvas/pull/379) - Fix handling of file type nvt preferences. [#399](https://github.com/greenbone/openvas/pull/399) ### Removed - Unused be_nice scan preferences has been removed. [#313](https://github.com/greenbone/openvas/pull/313) - OTP has been entirely removed in favor of using the ospd-openvas interface. [#333](https://github.com/greenbone/openvas/pull/333) [#351](https://github.com/greenbone/openvas/pull/351) [#337](https://github.com/greenbone/openvas/pull/337) [#389](https://github.com/greenbone/openvas/pull/389) - Daemon mode has been entirely removed. [#337](https://github.com/greenbone/openvas/pull/337) [#341](https://github.com/greenbone/openvas/pull/341) [7.0.0]: https://github.com/greenbone/openvas/compare/v6.0.1...v7.0.0 ## [6.0.2] (unreleased) ### Changed - The call to wmiexec.py has been replaced with impacket-wmiexec, because the symlink has been added in Debian Stretch with python-impacket 0.9.15-1. [6.0.2]: https://github.com/greenbone/openvas/compare/v6.0.1...openvas-scanner-6.0 ## [6.0.1] (2019-07-17) ### Added ### Changed - Use lowercase for values added from add_host_name(). [#306](https://github.com/greenbone/openvas/pull/306) - Do not launch the scan if the nvticache is corrupted. [#309](https://github.com/greenbone/openvas/pull/310) - Separate each scan plugin process into its own process group. [#325](https://github.com/greenbone/openvas/pull/325) ### Fixed - An issue which caused the scanner to crash when a plugin is missing during a scan has been addressed. [#296](https://github.com/greenbone/openvas/pull/296) - An issue which causes a scan to hang has been addressed. [#301](https://github.com/greenbone/openvas/pull/301) - Issues in building process have been addressed. [#308](https://github.com/greenbone/openvas/pull/308) - An issue which caused resuming task not to work was addressed. [#312](https://github.com/greenbone/openvas/pull/312) - An issue which caused a possible null IP values in OTP results / HOST_END has been addressed. [#321](https://github.com/greenbone/openvas/pull/321) - An issue which caused the scanner to finish instantly without any result has been addressed. [#330](https://github.com/greenbone/openvas/pull/330) ### Removed - Currently unused advanced_log related code has been removed. [#327](https://github.com/greenbone/openvas/pull/327) [6.0.1]: https://github.com/greenbone/openvas/compare/v6.0.0...openvas-scanner-6.0 ## [6.0.0] (2019-04-05) ### Added - Function to get the currently running script filename has been added. ### Changed - Debugging nasl mechanism has been improved, replacing preprocessor directives with g_debug facility. - Code related to redis queries was improved. - OpenVAS reload has been improved. - Documentation has been improved. ### Fixed - An issue related to the log facility and greenbone-nvt-sync has been fixed. - An issue which caused nasl-lint to fail in case of unneeded nested functions has been addressed. - An issue which caused returning erroneous values by get_plugin_preference() has been addressed. - An issue which cause stuck scans where only a single plugin is running and is beyond its timeout has been addressed. - Issues reported by static code analysis have been addressed. - Issues in building process have been addressed. - Several code improvements and clean-ups have been done. ### Removed - Unused internal_send/recv() functions have been removed. [6.0.0]: https://github.com/greenbone/openvas/compare/v6.0+beta2...v6.0.0 openvas-scanner-23.17.0/CMakeLists.txt000066400000000000000000000164041500171107200175250ustar00rootroot00000000000000# SPDX-FileCopyrightText: 2023 Greenbone AG # # SPDX-License-Identifier: GPL-2.0-or-later cmake_minimum_required (VERSION 3.0) message ("-- Configuring the Scanner...") # VERSION: Always include major, minor and patch level. project (openvas VERSION 23.17.0 LANGUAGES C) if (POLICY CMP0005) cmake_policy (SET CMP0005 NEW) endif (POLICY CMP0005) SET(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) if (NOT CMAKE_BUILD_TYPE) set (CMAKE_BUILD_TYPE Debug) endif (NOT CMAKE_BUILD_TYPE) OPTION (ENABLE_COVERAGE "Enable support for coverage analysis" OFF) ## Retrieve git revision (at configure time) include (GetGit) if (NOT CMAKE_BUILD_TYPE MATCHES "Release") if (EXISTS "${CMAKE_SOURCE_DIR}/.git/") if (GIT_FOUND) Git_GET_REVISION(${CMAKE_SOURCE_DIR} ProjectRevision) set (GIT_REVISION "~git-${ProjectRevision}") endif (GIT_FOUND) endif (EXISTS "${CMAKE_SOURCE_DIR}/.git/") endif (NOT CMAKE_BUILD_TYPE MATCHES "Release") ## make format message (STATUS "Looking for clang-format...") find_program (CLANG_FORMAT clang-format) if (CLANG_FORMAT) message (STATUS "Looking for clang-format... ${CLANG_FORMAT}") add_custom_target(format COMMAND ${CLANG_FORMAT} "-i" "./src/*.c" "./misc/*.c" "./nasl/*.c" "./src/*.h" "./misc/*.h" "./nasl/*.h" WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}") else (CLANG_FORMAT) message (STATUS "clang-format not found...") endif (CLANG_FORMAT) # Set dev version if this is a development version and not a full release, # unset (put value 0 or delete line) before a full release and reset after. set (PROJECT_DEV_VERSION 0) # If PROJECT_DEV_VERSION is set, the version string will be set to: # "major.minor.patch~dev${PROJECT_DEV_VERSION}${GIT_REVISION}" # If PROJECT_DEV_VERSION is NOT set, the version string will be set to: # "major.minor.patch${GIT_REVISION}" # For CMAKE_BUILD_TYPE "Release" the git revision will be empty. if (PROJECT_DEV_VERSION) set (PROJECT_VERSION_SUFFIX "~dev${PROJECT_DEV_VERSION}") endif (PROJECT_DEV_VERSION) set (PROJECT_VERSION_STRING "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}${PROJECT_VERSION_SUFFIX}${GIT_REVISION}") ## CPack configuration set (CPACK_CMAKE_GENERATOR "Unix Makefiles") set (CPACK_GENERATOR "TGZ") set (CPACK_INSTALL_CMAKE_PROJECTS ".;openvas;ALL;/") set (CPACK_MODULE_PATH "") set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/COPYING") set (CPACK_RESOURCE_FILE_README "${CMAKE_SOURCE_DIR}/README.md") set (CPACK_RESOURCE_FILE_WELCOME "${CMAKE_SOURCE_DIR}/README.md") set (CPACK_SOURCE_GENERATOR "TGZ") set (CPACK_SOURCE_TOPLEVEL_TAG "") set (CPACK_SYSTEM_NAME "") set (CPACK_TOPLEVEL_TAG "") set (CPACK_PACKAGE_VERSION "${PROJECT_VERSION_STRING}${PROJECT_VERSION_GIT}") set (CPACK_PACKAGE_FILE_NAME "${PROJECT_NAME}-${CPACK_PACKAGE_VERSION}") set (CPACK_SOURCE_PACKAGE_FILE_NAME "${PROJECT_NAME}-${CPACK_PACKAGE_VERSION}") set (CPACK_PACKAGE_VENDOR "The OpenVAS Project") set (CPACK_SOURCE_IGNORE_FILES "${CMAKE_BINARY_DIR}" "/.git/" "swp$" "nasl/nasl_grammar.tab.c" "nasl/nasl_grammar.tab.h" "nasl/nasl_grammar.output" ) include (CPack) ## Variables if (SYSCONF_INSTALL_DIR) set (SYSCONFDIR "${SYSCONF_INSTALL_DIR}") endif (SYSCONF_INSTALL_DIR) if (NOT SYSCONFDIR) set (SYSCONFDIR "/etc") endif (NOT SYSCONFDIR) if (NOT EXEC_PREFIX) set (EXEC_PREFIX "${CMAKE_INSTALL_PREFIX}") endif (NOT EXEC_PREFIX) if (NOT BINDIR) set (BINDIR "${EXEC_PREFIX}/bin") endif (NOT BINDIR) if (NOT SBINDIR) set (SBINDIR "${EXEC_PREFIX}/sbin") endif (NOT SBINDIR) if (NOT LIBDIR) set (_DEFAULT_LIBRARY_INSTALL_DIR lib) if (EXISTS "${EXEC_PREFIX}/lib32/" AND CMAKE_SIZEOF_VOID_P EQUAL 4) set (_DEFAULT_LIBRARY_INSTALL_DIR lib32) endif (EXISTS "${EXEC_PREFIX}/lib32/" AND CMAKE_SIZEOF_VOID_P EQUAL 4) if (EXISTS "${CMAKE_INSTALL_PREFIX}/lib64/" AND CMAKE_SIZEOF_VOID_P EQUAL 8) set (_DEFAULT_LIBRARY_INSTALL_DIR lib64) endif (EXISTS "${CMAKE_INSTALL_PREFIX}/lib64/" AND CMAKE_SIZEOF_VOID_P EQUAL 8) set( LIBRARY_INSTALL_DIR "${_DEFAULT_LIBRARY_INSTALL_DIR}") set (LIBDIR "${EXEC_PREFIX}/${LIBRARY_INSTALL_DIR}") message ("Set LIBDIR to ${LIBDIR}") endif (NOT LIBDIR) if (NOT LOCALSTATEDIR) set (LOCALSTATEDIR "/var") endif (NOT LOCALSTATEDIR) if (NOT DATADIR) set (DATADIR "${CMAKE_INSTALL_PREFIX}/share") endif (NOT DATADIR) if (NOT OPENVAS_RUN_DIR) set (OPENVAS_RUN_DIR "/run/ospd") endif (NOT OPENVAS_RUN_DIR) if (NOT DEFINED BUILD_WITH_NETSNMP) set (BUILD_WITH_NETSNMP TRUE) endif (NOT DEFINED BUILD_WITH_NETSNMP) set (OPENVAS_DATA_DIR "${DATADIR}/openvas") set (OPENVAS_STATE_DIR "${LOCALSTATEDIR}/lib/openvas") if (NOT OPENVAS_FEED_LOCK_PATH) set (OPENVAS_FEED_LOCK_PATH "${OPENVAS_STATE_DIR}/feed-update.lock") endif (NOT OPENVAS_FEED_LOCK_PATH) add_definitions (-DOPENVAS_FEED_LOCK_PATH="${OPENVAS_FEED_LOCK_PATH}") if (NOT GVM_LOG_DIR) set (GVM_LOG_DIR "${LOCALSTATEDIR}/log/gvm") endif (NOT GVM_LOG_DIR) set (OPENVAS_SYSCONF_DIR "${SYSCONFDIR}/openvas") set (GVM_SYSCONF_DIR "${SYSCONFDIR}/gvm") if (NOT OPENVAS_NVT_DIR) set (OPENVAS_NVT_DIR "${OPENVAS_STATE_DIR}/plugins") endif (NOT OPENVAS_NVT_DIR) if (NOT GVM_ACCESS_KEY_DIR) set (GVM_ACCESS_KEY_DIR "${GVM_SYSCONF_DIR}") endif (NOT GVM_ACCESS_KEY_DIR) if (NOT OPENVAS_GPG_BASE_DIR) set (OPENVAS_GPG_BASE_DIR "${OPENVAS_SYSCONF_DIR}") endif (NOT OPENVAS_GPG_BASE_DIR) set (OPENVAS_LIB_INSTALL_DIR "${LIBDIR}") set (OPENVAS_CONF "${OPENVAS_SYSCONF_DIR}/openvas.conf") set (NVT_TIMEOUT "320") set (SCANNER_NVT_TIMEOUT "36000") message ("-- Install prefix: ${CMAKE_INSTALL_PREFIX}") ## Version set (OPENVAS_VERSION "${PROJECT_VERSION_STRING}") # Configure Doxyfile with version number configure_file (doc/man/openvas.8.in doc/man/openvas.8 @ONLY) configure_file (VERSION.in VERSION @ONLY) configure_file (src/openvas_log_conf.cmake_in src/openvas_log.conf) ## Testing enable_testing () add_custom_target (tests DEPENDS attack-test pcap-test ipc-openvas-test lsc-test) ## Program if (ENABLE_COVERAGE) set (COVERAGE_FLAGS "--coverage") endif (ENABLE_COVERAGE) set (HARDENING_FLAGS "-Wformat -Wformat-security -D_FORTIFY_SOURCE=2 -fstack-protector") set (LINKER_HARDENING_FLAGS "-Wl,-z,relro -Wl,-z,now") # The "-D_FILE_OFFSET_BITS=64 -DLARGEFILE_SOURCE=1" is necessary for GPGME! set (GPGME_C_FLAGS "-D_FILE_OFFSET_BITS=64 -DLARGEFILE_SOURCE=1") set (CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${HARDENING_FLAGS}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COVERAGE_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GPGME_C_FLAGS} \ -std=c11 \ -Wall \ -Wextra \ -Werror \ -Wpedantic \ -Wmissing-prototypes \ -Wshadow \ -Wsequence-point \ -D_BSD_SOURCE \ -D_ISOC11_SOURCE \ -D_SVID_SOURCE \ -D_DEFAULT_SOURCE") if (NOT SKIP_SRC) add_subdirectory (misc) add_subdirectory (nasl) add_subdirectory (src) endif (NOT SKIP_SRC) ## Documentation add_subdirectory (doc) ## Tests enable_testing () ## End openvas-scanner-23.17.0/COPYING000066400000000000000000000431221500171107200160150ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. openvas-scanner-23.17.0/INSTALL.md000066400000000000000000000236451500171107200164220ustar00rootroot00000000000000INSTALLATION INSTRUCTIONS FOR OPENVAS ===================================== Please note: The reference system used by most of the developers is Debian Stable. The build might fail on any other system. Also, it is necessary to install dependent development packages. Prerequisites for openvas ------------------------- Prerequisites: * a C compiler (e.g. gcc) * cmake >= 3.0 * libgvm_base, libgvm_util >= 22.4 * glib-2.0 >= 2.42 * gio-2.0 * json-glib-1.0 >= 1.4.4 * bison * flex * libgcrypt >= 1.6 * pkg-config * libpcap * libgpgme >= 1.1.2 * redis >= 5.0.3 * libssh >= 0.6.0 * libksba >= 1.0.7 * libgnutls >= 3.6.4 * libcurl4-gnutls-dev * libbsd * krb5-multidev Prerequisites for building documentation: * Doxygen * pandoc (optional, for building manual and man pages for NASL built-in functions) Prerequisites for building tests: * Cgreen (optional, for building tests) Recommended to have WMI support: * openvas-smb >= 1.0.1 Recommended for extended Windows support (e.g. automatically start the remote registry service): * impacket-wmiexec of python-impacket >= 0.9.15 found within your PATH Recommended to have improved SNMP support: * netsnmp libraries or alternatively the snmpget binary. * snmp client Recommended for port scanning and service detection based on nmap. * nmap Recommended for port scanning based on pnscan. * pnscan Install prerequisites on Debian GNU/Linux 'Bullseye' 11: apt-get install gcc pkg-config libssh-gcrypt-dev libgnutls28-dev \ libglib2.0-dev libjson-glib-dev libpcap-dev libgpgme-dev bison libksba-dev \ libsnmp-dev libgcrypt20-dev redis-server libbsd-dev libcurl4-gnutls-dev \ krb5-multidev Compiling openvas ----------------- If you have installed required libraries to a non-standard location, remember to set the `PKG_CONFIG_PATH` environment variable to the location of you pkg-config files before configuring: export PKG_CONFIG_PATH=/your/location/lib/pkgconfig:$PKG_CONFIG_PATH Create a build directory and change into it with: mkdir build cd build Then configure the build with: cmake -DCMAKE_INSTALL_PREFIX=/path/to/your/installation .. Or (if you want to use the default installation path /usr/local): cmake .. This only needs to be done once. Other cmake variables need to be adjusted as well if you want to have all files in CMAKE_INSTALL_PREFIX or in the default installation path `/usr/local`. They can be added to the `cmake` call with `-D=`. | Variable | Default | | ---------------------- | :---------------------------------- | | SYSCONFDIR | `/etc` | | LOCALSTATEDIR | `/var` | | OPENVAS_FEED_LOCK_PATH | `/var/lib/openvas/feed-update.lock` | | OPENVAS_RUN_DIR | `/run/ospd` | Thereafter, the following commands are useful: make # build the scanner make doxygen # build the documentation make doxygen-full # build more developer-oriented documentation make doxygen-xml # build the documentation (XML) make manual # build a HTML manual make nasl-man # build man pages for NASL built-in functions make tests # build tests make install # install the build make rebuild_cache # rebuild the cmake cache Please note that you may have to execute `make install` as root, especially if you have specified a prefix for which your user does not have full permissions. To clean up the build environment, simply remove the contents of the `build` directory you created above. Configuration Options --------------------- During compilation, the build process uses a set of compiler options which enable very strict error checking and asks the compiler to abort should it detect any errors in the code. This is to ensure a maximum of code quality and security. Some (especially newer) compilers can be stricter than others when it comes to error checking. While this is a good thing and the developers aim to address all compiler warnings, it may lead the build process to abort on your system. Should you notice error messages causing your build process to abort, do not hesitate to contact the developers by creating a [new issue report](https://github.com/greenbone/openvas/issues/new). Don't forget to include the name and version of your compiler and distribution in your message. Setting up openvas ------------------ Setting up an openvas requires the following steps: 1. (optional) You may decide to change the default scanner preferences by setting them in the file `/etc/openvas/openvas.conf` or `/openvas/openvas.conf` when `SYSCONFDIR` was set via the `cmake` call. If that file does not exist (default), then the default settings are used. You can view them with `openvas -s`. The output of that command is a valid configuration file. The man page (`man openvas`) provides details about the available settings, among these opportunities to restrict access of scanner regarding scan targets and interfaces. 2. In order to run vulnerability scans, you will need a collection of Network Vulnerability Tests (NVTs) that can be run by openvas. Initially, your NVT collection will be empty. It is recommended that you synchronize with an NVT feed service before starting openvas for the first time. Use [greenbone-feed-sync](https://github.com/greenbone/greenbone-feed-sync/) to synchronize the feed. It can be used to synchronize with the community feed, as well as with the enterprise feed. Look into it for more information. NVT feeds are updated on a regular basis. Be sure to update your NVT collection regularly to detect the latest threats. 3. The scanner needs a running Redis server to temporarily store information gathered on the scanned hosts. Redis 3.2 and newer are supported. See `doc/redis_config.txt` to see how to set up and run a Redis server. The easiest and most reliable way to start redis under Ubuntu and Debian is to use systemd. ```bash sudo cp config/redis-openvas.conf /etc/redis/ sudo chown redis:redis /etc/redis/redis-openvas.conf sudo echo "db_address = /run/redis-openvas/redis.sock" > /etc/openvas/openvas.conf # Or append to /openvas/openvas.conf when SYSCONFDIR was set via the cmake call. sudo systemctl start redis-server@openvas.service ``` 4. The Greenbone Vulnerability Management service (`gvmd`) acts as OSP client to connect to and control scanners. openvas does not act as a OSP service - you need the `OSPD-OpenVAS` module for this. The actual user interfaces (for example GSA or GVM-Tools) will only interact with `gvmd` and/or `ospd-openvas`, not the scanner. You can launch openvas to upload the plugins in redis using the following command: openvas -u but `ospd-openvas` will do the update automatically. 5. Please note that although you can run `openvas` as a user without elevated privileges, it is recommended that you start `openvas` as `root` since a number of Network Vulnerability Tests (NVTs) require root privileges to perform certain operations like packet forgery. If you run `openvas` as a user without permission to perform these operations, your scan results are likely to be incomplete. As `openvas` will be launched from an `ospd-openvas` process with sudo, the next configuration is required in the sudoers file: sudo visudo add this line to allow the user running `ospd-openvas`, to launch `openvas` with root permissions ALL = NOPASSWD: /sbin/openvas If you set an install prefix, you have to update the path in the sudoers file too: Defaults secure_path=:/sbin Logging Configuration --------------------- If you encounter problems, by default the scanner writes logs to the file /var/log/gvm/openvas.log When `LOCALSTATEDIR` was set via the `cmake` call the scanner writes logs to the file /log/gvm/openvas.log It may contain useful information.The exact location of this file may differ depending on your distribution and installation method. Please have this file ready when contacting the GVM developers via the Greenbone Community Portal or submitting bug reports at as they may help to pinpoint the source of your issue. Logging is configured via the file at default location /etc/openvas/openvas_log.conf When `SYSCONFDIR` was set via the `cmake` call the file is located at /openvas/openvas_log.conf The configuration is divided into domains like this one [sd main] prepend=%t %p prepend_time_format=%Y-%m-%d %Hh%M.%S %Z file=/var/log/gvm/openvas.log level=128 The `level` field controls the amount of logging that is written. The value of `level` can be 4 Errors. 8 Critical situation. 16 Warnings. 32 Messages. 64 Information. 128 Debug. (Lots of output.) Enabling any level includes all the levels above it. So enabling Information will include Warnings, Critical situations and Errors. To get absolutely all logging, set the level to 128 for all domains in the configuration file. Logging to `syslog` can be enabled in each domain like: [sd main] prepend=%t %p prepend_time_format=%Y-%m-%d %Hh%M.%S %Z file=syslog syslog_facility=daemon level=128 Static code analysis with the Clang Static Analyzer --------------------------------------------------- If you want to use the Clang Static Analyzer (https://clang-analyzer.llvm.org/) to do a static code analysis, you can do so by prefixing the configuration and build commands with `scan-build`: scan-build cmake .. scan-build make The tool will provide a hint on how to launch a web browser with the results. It is recommended to do this analysis in a separate, empty build directory and to empty the build directory before `scan-build` call. openvas-scanner-23.17.0/README.md000066400000000000000000000111561500171107200162430ustar00rootroot00000000000000![Greenbone Logo](https://www.greenbone.net/wp-content/uploads/gb_new-logo_horizontal_rgb_small.png) # OpenVAS Scanner [![GitHub releases](https://img.shields.io/github/release/greenbone/openvas-scanner.svg)](https://github.com/greenbone/openvas-scanner/releases) [![Docker Pulls](https://img.shields.io/docker/pulls/greenbone/openvas-scanner.svg)](https://hub.docker.com/r/greenbone/openvas-scanner/) [![Docker Image Size](https://img.shields.io/docker/image-size/greenbone/openvas-scanner.svg?maxAge=2592000)](https://hub.docker.com/r/greenbone/openvas-scanner/) [![CI](https://github.com/greenbone/openvas-scanner/actions/workflows/control.yml/badge.svg?branch=main)](https://github.com/greenbone/openvas-scanner/actions/workflows/control.yml?query=branch%3Amain) This is the OpenVAS Scanner of the Greenbone Community Edition. It is used for the Greenbone Enterprise appliances and is a full-featured scan engine that executes a continuously updated and extended feed of Vulnerability Tests (VTs). ## Releases All [release files](https://github.com/greenbone/openvas/releases) are signed with the [Greenbone Community Feed integrity key](https://community.greenbone.net/t/gcf-managing-the-digital-signatures/101). This gpg key can be downloaded at https://www.greenbone.net/GBCommunitySigningKey.asc and the fingerprint is `8AE4 BE42 9B60 A59B 311C 2E73 9823 FAA6 0ED1 E580`. ## Installation This module can be configured, built and installed with following commands: cmake . make install For detailed installation requirements and instructions, please see the file [INSTALL.md](INSTALL.md). The file also contains instructions for setting up `openvas` and for making the scanner available to other GVM modules. If you are not familiar or comfortable building from source code, we recommend that you use the Greenbone Enterprise TRIAL, a prepared virtual machine with a readily available setup. Information regarding the virtual machine is available at . ## Rust Implementation This repository also consists of a [rust project](rust/README.md) aiming to replace the current scanner stack (openvas-scanner, ospd-openvas, notus-scanner). It simplifies the use of the scanner and centralizes everything needed for scanning. Currently it uses the openvas-scanner as scan engine. ## Docker, [Greenbone Community Containers](https://greenbone.github.io/docs/latest/22.4/container/) If you want to use the docker files provided in this repository you can pull them from the [Greenbone registry](registry.community.greenbone.net/community/openvas-scanner:stable). You can also locally build them using: ``` docker build -t -f .docker/prod.Dockerfile . ``` For more information about building docker images, see [official man](https://docs.docker.com/engine/reference/commandline/build/). We also provide a [fully containerized solution](https://greenbone.github.io/docs/latest/22.4/container/) for the Greenbone Community Edition. > Please beware: The Greenbone Community Container are currently under development. ## Support For any question on the usage of `openvas` please use the [Greenbone Community Portal](https://community.greenbone.net/). If you found a problem with the software, please [create an issue](https://github.com/greenbone/openvas-scanner/issues) on GitHub. If you are a Greenbone customer you may alternatively or additionally forward your issue to the Greenbone Support Portal. ## Maintainer This project is maintained by [Greenbone AG](https://www.greenbone.net/). ## Contributing Your contributions are highly appreciated. Please [create a pull request](https://github.com/greenbone/openvas-scanner/pulls) on GitHub. Remember to commit the contribution agreement as explained in [RELICENSING](https://github.com/greenbone/openvas-scanner/tree/main/RELICENSE) folder with your first PR. Bigger changes should be discussed with the development team via the [issues section at GitHub](https://github.com/greenbone/openvas-scanner/issues) first. ## License This repository consists of two scanner implementation, one in programming language C and one in programming language Rust. This module except for the Rust-implementation in directory rust/ is licensed under the GNU General Public License v2.0 only. Single files, however, are licensed either under the GNU General Public License v2.0 only or under GNU General Public License v2.0 or later, please see the [license-details.md](license-details.md) file for details. The Rust-implementation in directory rust/ is licensed under the GNU General Public License v2.0 or later with OpenSSL exception. Single files, however, are additionally licensed under MIT. openvas-scanner-23.17.0/RELICENSE/000077500000000000000000000000001500171107200162715ustar00rootroot00000000000000openvas-scanner-23.17.0/RELICENSE/GFoti.md000066400000000000000000000066371500171107200176370ustar00rootroot00000000000000Thank you for your interest in the project openvas-scanner managed by Greenbone. In order for you to make Contributions now or in the future to this project, You agree to license your Contributions under the MIT-0 license (see below). Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without any attribution. .................. MIT No Attribution (MIT-0) Copyright Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ......................... Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without attribution. However, we will include your name as a Contributor in our CREDITS list as long as your Contribution is used in the project openvas-scanner. "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is submitted by you to Greenbone for inclusion in the project openvas-scanner. Greenbone requires that each Contribution you submit now or in the future to comply with the following commitments documented in the Developer Certificate of Origin (DCO) [https://developercertificate.org/]: ........ Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. .....openvas-scanner-23.17.0/RELICENSE/NickKotsakidis.md000066400000000000000000000066371500171107200215410ustar00rootroot00000000000000Thank you for your interest in the project openvas-scanner managed by Greenbone. In order for you to make Contributions now or in the future to this project, You agree to license your Contributions under the MIT-0 license (see below). Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without any attribution. .................. MIT No Attribution (MIT-0) Copyright Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ......................... Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without attribution. However, we will include your name as a Contributor in our CREDITS list as long as your Contribution is used in the project openvas-scanner. "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is submitted by you to Greenbone for inclusion in the project openvas-scanner. Greenbone requires that each Contribution you submit now or in the future to comply with the following commitments documented in the Developer Certificate of Origin (DCO) [https://developercertificate.org/]: ........ Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. .....openvas-scanner-23.17.0/RELICENSE/README.md000066400000000000000000000020461500171107200175520ustar00rootroot00000000000000# openvas-scanner contribution agreement We want to relicense our code to `GPL-3.0-or-later` or `AGPL-3.0-or-later` in the future. Although most of the new code is licensed under `GPL-2.0-or-later` we want to make sure that no problems accure when relicensing. Your contributions are licensed under `MIT-0` and instantly relicensed to our currently used license. This means either `GPL-2.0-or-later` or `GPL-2.0`. Rust code is currently licensed with `GPL-2.0-or-later` per default. C code depends on the location and changes. Please read and commit the /template/template.txt as [Name].md in this folder with your first PR. Having a valid `git.user.name` and `git.user.email` is sufficient. Example usage: ``` # check with e.g. `git config --list` if you have a valid `user.name` and `user.email` set. $ git config --list user.email=Jane.Doe@example.com user.name=jane Doe .... # Commit the template $ cd {path_to_openvas-scanner}/openvas-scanner/RELICENSE $ cp ./template/template.txt JDoe.md $ git add JDoe.md $ git commit ``` Happy hacking!openvas-scanner-23.17.0/RELICENSE/RuffaloLavoisier.md000066400000000000000000000066371500171107200221030ustar00rootroot00000000000000Thank you for your interest in the project openvas-scanner managed by Greenbone. In order for you to make Contributions now or in the future to this project, You agree to license your Contributions under the MIT-0 license (see below). Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without any attribution. .................. MIT No Attribution (MIT-0) Copyright Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ......................... Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without attribution. However, we will include your name as a Contributor in our CREDITS list as long as your Contribution is used in the project openvas-scanner. "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is submitted by you to Greenbone for inclusion in the project openvas-scanner. Greenbone requires that each Contribution you submit now or in the future to comply with the following commitments documented in the Developer Certificate of Origin (DCO) [https://developercertificate.org/]: ........ Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. .....openvas-scanner-23.17.0/RELICENSE/sepehrdaddev.md000066400000000000000000000066371500171107200212650ustar00rootroot00000000000000Thank you for your interest in the project openvas-scanner managed by Greenbone. In order for you to make Contributions now or in the future to this project, You agree to license your Contributions under the MIT-0 license (see below). Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without any attribution. .................. MIT No Attribution (MIT-0) Copyright Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ......................... Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without attribution. However, we will include your name as a Contributor in our CREDITS list as long as your Contribution is used in the project openvas-scanner. "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is submitted by you to Greenbone for inclusion in the project openvas-scanner. Greenbone requires that each Contribution you submit now or in the future to comply with the following commitments documented in the Developer Certificate of Origin (DCO) [https://developercertificate.org/]: ........ Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. .....openvas-scanner-23.17.0/RELICENSE/template/000077500000000000000000000000001500171107200201045ustar00rootroot00000000000000openvas-scanner-23.17.0/RELICENSE/template/template.txt000066400000000000000000000066371500171107200224740ustar00rootroot00000000000000Thank you for your interest in the project openvas-scanner managed by Greenbone. In order for you to make Contributions now or in the future to this project, You agree to license your Contributions under the MIT-0 license (see below). Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without any attribution. .................. MIT No Attribution (MIT-0) Copyright Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ......................... Please note that You remain the copyright owner, and anyone receives the right to use your Contribution in proprietary and open source software without attribution. However, we will include your name as a Contributor in our CREDITS list as long as your Contribution is used in the project openvas-scanner. "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is submitted by you to Greenbone for inclusion in the project openvas-scanner. Greenbone requires that each Contribution you submit now or in the future to comply with the following commitments documented in the Developer Certificate of Origin (DCO) [https://developercertificate.org/]: ........ Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. .....openvas-scanner-23.17.0/VERSION.in000066400000000000000000000000271500171107200164340ustar00rootroot00000000000000@CPACK_PACKAGE_VERSION@openvas-scanner-23.17.0/changelog.toml000066400000000000000000000003461500171107200176070ustar00rootroot00000000000000commit_types = [ { message = "^add", group = "Added"}, { message = "^remove", group = "Removed"}, { message = "^change", group = "Changed"}, { message = "^fix", group = "Bug Fixes"}, ] changelog_dir = "changelog" openvas-scanner-23.17.0/charts/000077500000000000000000000000001500171107200162445ustar00rootroot00000000000000openvas-scanner-23.17.0/charts/.gitignore000066400000000000000000000000141500171107200202270ustar00rootroot00000000000000*.rsa *.pem openvas-scanner-23.17.0/charts/Makefile000066400000000000000000000007541500171107200177120ustar00rootroot00000000000000_PHONY: install-http install-mtls uninstall log-openvasd TLS_PATH="../rust/examples/tls/self-signed" install-http: helm install --namespace openvasd --create-namespace openvasd openvasd/ --values openvasd/values.yaml --values openvasd/http-root.yaml install-mtls: helm install --namespace openvasd --create-namespace openvasd openvasd/ --values openvasd/values.yaml uninstall: helm uninstall -n openvasd openvasd log-openvasd: kubectl logs -n openvasd deployment/openvasd -c openvasd openvas-scanner-23.17.0/charts/openvasd/000077500000000000000000000000001500171107200200635ustar00rootroot00000000000000openvas-scanner-23.17.0/charts/openvasd/.helmignore000066400000000000000000000005351500171107200222200ustar00rootroot00000000000000# Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *.orig *~ # Various IDEs .project .idea/ *.tmproj .vscode/ openvas-scanner-23.17.0/charts/openvasd/Chart.yaml000066400000000000000000000021711500171107200220110ustar00rootroot00000000000000apiVersion: v2 name: openvasd description: A Helm chart for Kubernetes # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives # to be deployed. # # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) version: 0.1.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. appVersion: "23.17.0" openvas-scanner-23.17.0/charts/openvasd/README.md000066400000000000000000000063201500171107200213430ustar00rootroot00000000000000# Helm Chart for `openvasd` deployment This helm chart is only providing the scanner API. It does not include any vulnerability management services. If you want to utilize the whole stack please use: https://greenbone.github.io/docs/latest/22.4/container/index.html#docker-compose-file. ## Requirements This Helm chart is tested with k3s and Traefik. Note that other options may require unsupported changes. ## mTLS (Enabled by Default) To use mTLS, store the server certificate and key as a secret named 'ingress-certificate', containing key.pem and certs.pem. For example, deploying `openvasd` into the 'openvasd' namespace with a generated certificate: ```bash cd ../../rust/examples/tls/Self-Signed\ mTLS\ Method sh server_certificates.sh kubectl create secret generic ingress-certificate \ --from-file=key.pem=./server.rsa \ --from-file=certs.pem=./server.pem \ --namespace openvasd ``` Additionally, populate client certificates within a 'client-certs' secret: ```bash cd ../../rust/examples/tls/Self-Signed\ mTLS\ Method kubectl create secret generic client-certs \ --from-file=client1.pem=./client.pem \ --namespace openvasd ``` There can be multiple client certificates. Verify that the secrets are deployed: ```bash kubectl describe secrets --namespace openvasd ``` ## Install To install `openvasd` Helm Chart from a local path, execute: ```bash helm install openvasd ./openvasd/ -f openvasd/values.yaml --namespace openvasd --create-namespace openvasd ``` You can also override initial values within openvasd/values.yaml by providing an additional -f flag. For example: ```bash helm install --namespace openvasd --create-namespace openvasd openvasd/ --values openvasd/values.yaml --values openvasd/http-root.yaml ``` This will start `openvasd` with http and with a API-KEY `changeme`. ## Preconfigured deployment scenarios ### mTLS This is enabled by default. Please read the requirements sections. ### HTTP Single Instance To deploy `openvasd` as an HTTP instance on the root path, execute: ```bash helm install --namespace openvasd --create-namespace openvasd openvasd/ --values openvasd/values.yaml --values openvasd/http-root.yaml ``` ## Accessing the service When `routing.enabled` is enabled, you can access `openvasd` directly via either `http://localhost` (if you provide the the http-root.yaml values) or via `https://localhost` For testing, you can use the following command: ```bash curl --verbose --insecure --key $CLIENT_KEY --cert $CLIENT_CERT --request HEAD https://127.0.0.1/health ``` ## Design decisions ### IngressRouteTCP instead of Ingress To enable passthrough, IngressRouteTCP is used instead of the usual Ingress definition. ### OSPD and Redis via unix socket OSPD is used in Unix socket mode to prevent users from bypassing `openvasd` and interfering with scans. The Redis instance is shared between OSPD and OpenVAS, started in Unix socket mode to ensure it is not used by another container. ### No scaling Due to current architectural limitations, replica count and auto-scaling are disabled. OSPD lacks cluster capabilities and a database setup that allows sharing via multiple instances. Each replica would have its own state, requiring vertical scaling via deployment. openvas-scanner-23.17.0/charts/openvasd/http-root.yaml000066400000000000000000000014461500171107200227140ustar00rootroot00000000000000routing: enabled: true service: type: ClusterIP port: 80 ingress: enabled: true annotations: kubernetes.io/ingress.class: "traefik" className: "traefik" hosts: # Captures everything of / delegates it to openvasd although this is not feasible # on deployments that contain multiple http services our current deployment model # sees a sensor as an own machine. # Currently it is configures with http instead of https in mind # Do not use it in production environments. - paths: - path: / pathType: ImplementationSpecific backend: service: name: openvasd port: number: 80 openvasd: apikey: changeme tls: certificates: deploy_server: false deploy_client: false openvas-scanner-23.17.0/charts/openvasd/mtls-wo-ingress.yaml000066400000000000000000000003361500171107200240230ustar00rootroot00000000000000# this settings will startup openvasd without exposing the 443 port routing: enabled: false service: type: ClusterIP port: 443 openvasd: tls: certificates: deploy_server: true deploy_client: true openvas-scanner-23.17.0/charts/openvasd/templates/000077500000000000000000000000001500171107200220615ustar00rootroot00000000000000openvas-scanner-23.17.0/charts/openvasd/templates/NOTES.txt000066400000000000000000000023721500171107200235160ustar00rootroot00000000000000This deployment takes a while. To verify if the rollout is complete, you can use: $ kubectl rollout status --watch --timeout 600s deployment/openvasd -n {{ .Release.Namespace }} After the deployment is finished it should be available via: {{- if .Values.routing.enabled -}} {{- $apiKey := .Values.openvasd.apikey }} {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} {{- if eq .Values.openvasd.tls.certificates.deploy_client true }} $ curl -vk -X HEAD https://localhost/ --key yourclientkey.rsa --cert yourclientkey.pem {{- else }} $ curl -vk -X HEAD https://localhost/ -H "X-API-KEY: {{ .apiKey }}" {{- end }} {{- else }} $ curl -vk -X HEAD https://localhost/ -H "X-API-KEY: {{ .apiKey }}" {{- end }} {{- else }} export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "openvasd.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT echo "Visit http://127.0.0.1:8080 to use your application" {{- end }} openvas-scanner-23.17.0/charts/openvasd/templates/_helpers.tpl000066400000000000000000000034001500171107200244000ustar00rootroot00000000000000{{/* Expand the name of the chart. */}} {{- define "openvasd.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "openvasd.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} {{- if contains $name .Release.Name }} {{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} {{- end }} {{/* Create chart name and version as used by the chart label. */}} {{- define "openvasd.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} {{- define "openvasd.labels" -}} helm.sh/chart: {{ include "openvasd.chart" . }} {{ include "openvasd.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} {{- define "openvasd.selectorLabels" -}} app.kubernetes.io/name: {{ include "openvasd.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} {{- define "openvasd.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} {{- default (include "openvasd.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} openvas-scanner-23.17.0/charts/openvasd/templates/deployment.yaml000066400000000000000000000334031500171107200251300ustar00rootroot00000000000000apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "openvasd.fullname" . }} labels: {{- include "openvasd.labels" . | nindent 4 }} spec: replicas: 1 selector: matchLabels: {{- include "openvasd.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "openvasd.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "openvasd.serviceAccountName" . }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} volumes: - name: redis-socket emptyDir: {} - name: nasl-plugins emptyDir: {} - name: notus-data emptyDir: {} - name: openvas-config emptyDir: {} - name: scan-config emptyDir: {} - name: ospd-config emptyDir: {} - name: ospd-socket emptyDir: {} - name: openvas-logs emptyDir: {} {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - name: ingress-certificate secret: secretName: ingress-certificate {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_client true }} - name: client-certs secret: secretName: client-certs {{ end }} initContainers: - name: nasl image: "{{ .Values.vulnerabilitytests.repository }}:{{ .Values.vulnerabilitytests.tag }}" imagePullPolicy: Always volumeMounts: - name: nasl-plugins mountPath: /mnt/nasl env: - name: MOUNT_PATH value: "/mnt/nasl" - name: normalize-nasl-feed image: "{{ .Values.vulnerabilitytests.repository }}:{{ .Values.vulnerabilitytests.tag }}" imagePullPolicy: Always volumeMounts: - name: nasl-plugins mountPath: /mnt/nasl command: ['/bin/sh', '-c'] # flatten nasl data args: - mv -vf /mnt/nasl/22.04/vt-data/nasl/* /mnt/nasl/; rm -r /mnt/nasl/22.04; rm -r /mnt/nasl/21.04; - name: notus-data image: "{{ .Values.notusdata.repository }}:{{ .Values.notusdata.tag }}" imagePullPolicy: Always volumeMounts: - name: notus-data mountPath: /mnt/notus env: - name: MOUNT_PATH value: "/mnt/notus" {{- if eq .Values.openvasd.scanner_type "ospd" }} - name: ospd-disable-notus-hashsum-verification #since can mount local volumes which may be altered we have to disable hashsum verification for notus image: "{{ .Values.ospd.repository }}:{{ .Values.ospd.tag }}" imagePullPolicy: Always volumeMounts: - name: ospd-config mountPath: /mnt/ovc command: ['sh', '-c'] args: ["cp /etc/gvm/ospd-openvas.conf /mnt/ovc/ospd-openvas.conf && printf \"disable_notus_hashsum_verification = True\n\" >> /mnt/ovc/ospd-openvas.conf"] - name: create-dummy-openvas-log image: "{{ .Values.ospd.repository }}:{{ .Values.ospd.tag }}" imagePullPolicy: Always volumeMounts: - name: openvas-logs mountPath: /mnt/ovc command: ['sh', '-c'] args: ["touch /mnt/ovc/openvas.log && chown ospd-openvas:ospd-openvas /mnt/ovc/openvas.log"] {{ else }} - name: create-dummy-openvas-log image: "{{ .Values.openvas.repository }}:{{ .Values.openvas.tag }}" imagePullPolicy: Always volumeMounts: - name: openvas-logs mountPath: /mnt/ovc command: ['sh', '-c'] args: ["touch /mnt/ovc/openvas.log"] {{ end }} - name: mqtt-broker-openvas-fix image: "{{ .Values.ospd.repository }}:{{ .Values.ospd.tag }}" imagePullPolicy: Always volumeMounts: - name: openvas-config mountPath: /mnt/ovc command: ['sh', '-c'] {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} args: ["sed 's/mqtt_server_uri = .*/openvasd_server = https:\\/\\/localhost:443/' /etc/openvas/openvas.conf > /mnt/ovc/openvas.conf; cp /etc/openvas/openvas_log.conf /mnt/ovc/"] {{ else }} args: ["sed 's/mqtt_server_uri = .*/openvasd_server = http:\\/\\/localhost:80/' /etc/openvas/openvas.conf > /mnt/ovc/openvas.conf; cp /etc/openvas/openvas_log.conf /mnt/ovc/"] {{ end }} containers: - name: redis image: "{{ .Values.redis.repository }}:{{ .Values.redis.tag }}" imagePullPolicy: Always volumeMounts: - name: redis-socket mountPath: /run/redis # although the main purpose is to display openvas logs # we make it as ospd so that there is a container running # to play around {{- if eq .Values.openvasd.scanner_type "ospd"}} - name: openvas image: "{{ .Values.ospd.repository }}:{{ .Values.ospd.tag }}" imagePullPolicy: Always command: [ "tail", "-f", "/var/log/gvm/openvas.log" ] volumeMounts: - name: scan-config mountPath: /usr/local/src/policies - name: redis-socket mountPath: /run/redis - name: nasl-plugins mountPath: /var/lib/openvas/plugins - name: notus-data mountPath: /var/lib/notus - name: openvas-config mountPath: /etc/openvas {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - mountPath: "/etc/openvasd/tls/" name: ingress-certificate readOnly: true {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_client true }} - mountPath: "/etc/openvasd/clientcerts" name: client-certs readOnly: true {{ end }} - name: openvas-logs mountPath: /var/log/gvm securityContext: capabilities: add: - NET_ADMIN - NET_RAW {{ else }} # openvas log replicate - name: openvas image: "{{ .Values.openvas.repository }}:{{ .Values.openvas.tag }}" imagePullPolicy: Always command: [ "tail", "-f", "/var/log/gvm/openvas.log" ] volumeMounts: - name: redis-socket mountPath: /run/redis - name: nasl-plugins mountPath: /var/lib/openvas/plugins - name: notus-data mountPath: /var/lib/notus - name: openvas-config mountPath: /etc/openvas - name: ospd-socket mountPath: /run/ospd/ {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - mountPath: "/etc/openvasd/tls/" name: ingress-certificate readOnly: true {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_client true }} - mountPath: "/etc/openvasd/clientcerts" name: client-certs readOnly: true {{ end }} - name: openvas-logs mountPath: /var/log/gvm securityContext: capabilities: add: - NET_ADMIN - NET_RAW env: {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - name: LISTENING value: "0.0.0.0:443" {{ else }} - name: LISTENING value: "0.0.0.0:80" {{ end }} - name: OPENVASD_LOG value: {{ .Values.openvasd.loglevel | default "INFO" }} {{ with .Values.openvasd.apikey }} - name: API_KEY value: {{.}} {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - name: TLS_CERTS value: "/etc/openvasd/tls/certs.pem" - name: TLS_KEY value: "/etc/openvasd/tls/key.pem" {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_client true }} - name: TLS_CLIENT_CERTS value: "/etc/openvasd/clientcerts/" {{ end }} - name: FEED_PATH value: /var/lib/openvas/plugins # - name: FEED_CHECK_INTERVAL # value: 3600 - name: NOTUS_ADVISORIES value: /var/lib/notus/advisories - name: NOTUS_PRODUCTS value: /var/lib/notus/products - name: ENABLE_GET_SCANS value: "true" - name: SCANNER_TYPE value: {{ .Values.openvasd.scanner_type }} - name: STORAGE_TYPE value: "redis" - name: REDIS_URL value: "unix:///run/redis/redis.sock" # - name: MAX_QUEUED_SCANS # value: "TBD" # - name: MAX_RUNNING_SCANS # value: "TBD" # - name: MIN_FREE_MEMORY # value: "TBD" # - name: SCHEDULER_CHECK_INTERVAL # value: "TBD" # - name: READ_TIMEOUT # value: "TBD" # - name: RESULT_CHECK_INTERVAL # value: "TBD" - name: OPENVASD_MODE value: "service" {{ end }} # openvas log replicate end - name: openvasd image: "{{ .Values.openvas.repository }}:{{ .Values.openvas.tag }}" imagePullPolicy: Always volumeMounts: - name: redis-socket mountPath: /run/redis - name: nasl-plugins mountPath: /var/lib/openvas/plugins - name: notus-data mountPath: /var/lib/notus - name: openvas-config mountPath: /etc/openvas - name: ospd-socket mountPath: /run/ospd/ - name: openvas-logs mountPath: /var/log/gvm {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - mountPath: "/etc/openvasd/tls/" name: ingress-certificate readOnly: true {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_client true }} - mountPath: "/etc/openvasd/clientcerts" name: client-certs readOnly: true {{ end }} securityContext: capabilities: add: - NET_ADMIN - NET_RAW ports: {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - containerPort: 443 protocol: TCP {{ else }} - containerPort: 80 protocol: TCP {{ end }} env: {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - name: LISTENING value: "0.0.0.0:443" {{ else }} - name: LISTENING value: "0.0.0.0:80" {{ end }} - name: OSPD_SOCKET value: /run/ospd/ospd-openvas.sock - name: OPENVASD_LOG value: {{ .Values.openvasd.loglevel | default "INFO" }} {{ with .Values.openvasd.apikey }} - name: API_KEY value: {{.}} {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} - name: TLS_CERTS value: "/etc/openvasd/tls/certs.pem" - name: TLS_KEY value: "/etc/openvasd/tls/key.pem" {{ end }} {{- if eq .Values.openvasd.tls.certificates.deploy_client true }} - name: TLS_CLIENT_CERTS value: "/etc/openvasd/clientcerts/" {{ end }} - name: FEED_PATH value: /var/lib/openvas/plugins # - name: FEED_CHECK_INTERVAL # value: 3600 - name: NOTUS_ADVISORIES value: /var/lib/notus/advisories - name: NOTUS_PRODUCTS value: /var/lib/notus/products - name: ENABLE_GET_SCANS value: "true" - name: SCANNER_TYPE value: {{ .Values.openvasd.scanner_type }} {{- if eq .Values.openvasd.scanner_type "ospd" }} - name: OSPD_SOCKET value: "/run/ospd/" - name: STORAGE_TYPE value: "in_memory" # - name: STORAGE_PATH # value: "TBD" # - name: STORAGE_KEY # value: "TBD" {{ else }} - name: STORAGE_TYPE value: "redis" - name: REDIS_URL value: "unix:///run/redis/redis.sock" {{ end }} # - name: MAX_QUEUED_SCANS # value: "TBD" # - name: MAX_RUNNING_SCANS # value: "TBD" # - name: MIN_FREE_MEMORY # value: "TBD" # - name: SCHEDULER_CHECK_INTERVAL # value: "TBD" # - name: READ_TIMEOUT # value: "TBD" # - name: RESULT_CHECK_INTERVAL # value: "TBD" - name: OPENVASD_MODE value: "service" {{- if eq .Values.openvasd.scanner_type "ospd" }} - name: ospd image: "{{ .Values.ospd.repository }}:{{ .Values.ospd.tag }}" imagePullPolicy: Always volumeMounts: - name: redis-socket mountPath: /run/redis - name: nasl-plugins mountPath: /var/lib/openvas/plugins - name: notus-data mountPath: /var/lib/notus - name: openvas-config mountPath: /etc/openvas - name: ospd-config mountPath: /etc/gvm/ - name: ospd-socket mountPath: /run/ospd/ - name: openvas-logs mountPath: /var/log/gvm securityContext: capabilities: add: - NET_ADMIN - NET_RAW {{ end }} # what does it mean? {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} openvas-scanner-23.17.0/charts/openvasd/templates/routing.yaml000066400000000000000000000013151500171107200244340ustar00rootroot00000000000000{{- if .Values.routing.enabled -}} {{- $svcPort := .Values.service.port -}} apiVersion: traefik.containo.us/v1alpha1 kind: IngressRouteTCP metadata: name: openvasd-route namespace: {{ .Release.Namespace }} annotations: "helm.sh/hook": post-install,post-upgrade "helm.sh/hook-delete-policy": before-hook-creation {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} spec: entryPoints: - websecure routes: - match: HostSNI(`*`) services: - name: openvasd port: {{ $svcPort }} tls: passthrough: true {{ else }} spec: entryPoints: - web routes: - match: HostSNI(`*`) services: - name: openvasd port: {{ $svcPort }} {{ end }} {{- end }} openvas-scanner-23.17.0/charts/openvasd/templates/service.yaml000066400000000000000000000007161500171107200244110ustar00rootroot00000000000000apiVersion: v1 kind: Service metadata: name: openvasd labels: {{- include "openvasd.labels" . | nindent 4 }} spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.port }} {{- if eq .Values.openvasd.tls.certificates.deploy_server true }} targetPort: 443 {{ else }} targetPort: 80 {{ end }} protocol: TCP name: http selector: {{- include "openvasd.selectorLabels" . | nindent 4 }} openvas-scanner-23.17.0/charts/openvasd/templates/serviceaccount.yaml000066400000000000000000000005021500171107200257570ustar00rootroot00000000000000{{- if .Values.serviceAccount.create -}} apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "openvasd.serviceAccountName" . }} labels: {{- include "openvasd.labels" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} {{- end }} openvas-scanner-23.17.0/charts/openvasd/templates/tests/000077500000000000000000000000001500171107200232235ustar00rootroot00000000000000openvas-scanner-23.17.0/charts/openvasd/templates/tests/test-connection.yaml000066400000000000000000000006711500171107200272270ustar00rootroot00000000000000apiVersion: v1 kind: Pod metadata: name: "{{ include "openvasd.fullname" . }}-test-connection" labels: {{- include "openvasd.labels" . | nindent 4 }} annotations: "helm.sh/hook": test spec: containers: - name: wget image: busybox command: ['wget'] args: ['--header', 'x-api-key: {{ .Values.openvasd.apikey }}','{{ include "openvasd.fullname" . }}:{{ .Values.service.port }}/vts'] restartPolicy: Never openvas-scanner-23.17.0/charts/openvasd/values.yaml000066400000000000000000000063661500171107200222610ustar00rootroot00000000000000# Default values for openvasd. # This is a YAML-formatted file. # Declare variables to be passed into your templates. # Is currently reqired by openvasd to control openvas ospd: repository: registry.community.greenbone.net/community/ospd-openvas pullPolicy: Always tag: "edge" # Contains openvasd openvas: repository: registry.community.greenbone.net/community/openvas-scanner pullPolicy: Always tag: "edge" # Configuration of openvasd openvasd: # Sets the log level and changes the verbosity of openvasd. # Can be set to TRACE, DEBUG, INFO, WARNING, ERROR # openvasd is provided by the openvas image loglevel: DEBUG # When set it will be the used API-KEY. It is not required when deploy_client is true. # apikey: changeme # can be either service: fill openvasd capabilities, service_notus: only notus mode: service # can be either: # - openvas, use openvas and redis # - ospd, use ospd-openvas scanner_type: openvas # can be either: # - redis, when scanner_type is set to openvas it has to redis and this field is ignored # - in_memory, uses a in memory storage # - fs, uses a file system storage storage_type: redis tls: # enables the server-/client-certs deployment via secret # server-key # client-certs # deplying a client certificate without a server certificate will not have any effect. certificates: deploy_server: true deploy_client: true # NASL scripts also known as feed vulnerabilitytests: # latest is the most current community feed. repository: registry.community.greenbone.net/community/vulnerability-tests pullPolicy: Always tag: "community" # Notus description json also known as feed notusdata: # latest is the most current community feed. repository: registry.community.greenbone.net/community/notus-data pullPolicy: Always tag: "community" # required by openvas and ospd redis: repository: greenbone/redis-server pullPolicy: Always tag: "latest" # When you have access to the enterprise feed add the credentials for the private repository here. # Additionally change notus and vulnerabilitytests accordingly. imagePullSecrets: [] nameOverride: "" serviceAccount: # Specifies whether a service account should be created create: true # Annotations to add to the service account annotations: {} # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" podAnnotations: {} podSecurityContext: {} # fsGroup: 2000 securityContext: {} # capabilities: # drop: # - ALL # readOnlyRootFilesystem: true # runAsNonRoot: true # runAsUser: 1000 service: type: ClusterIP # type: LoadBalancer port: 443 # enables routing.yaml routing: enabled: true resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} openvas-scanner-23.17.0/check_license_headers.bash000077500000000000000000000020031500171107200220670ustar00rootroot00000000000000#!/usr/bin/env bash function comment_string () { ext=$1 if [[ $ext == "c" || $ext == "h" || $ext == "rs" ]]; then echo "//" elif [[ $ext == "nasl" || $ext == "cmake" ]]; then echo "#" fi } any_missing_headers=0 exts="c h nasl cmake" for ext in $exts; do find . -not -path "./rust/target/*" -not -path "./rust/crates/nasl-c-lib/tmp/*" -regex ".*\.\($ext\)" -print0 | while read -d $'\0' f; do header=$(head -n 3 "$f") if ! [[ "$header" =~ SPDX ]]; then echo "File does not contain license header: $f" any_missing_headers=1 if [[ "$1" == add_header ]]; then tmpfile=$(mktemp) cp "$f" "$tmpfile" comment=$(comment_string $ext) echo -e "$comment SPDX-FileCopyrightText: 2025 Greenbone AG\n$comment\n$comment SPDX-License-Identifier: GPL-2.0-or-later WITH x11vnc-openssl-exception\n" | cat - $tmpfile > "$f" fi fi done done exit $any_missing_headers openvas-scanner-23.17.0/cmake/000077500000000000000000000000001500171107200160405ustar00rootroot00000000000000openvas-scanner-23.17.0/cmake/GetGit.cmake000066400000000000000000000023501500171107200202250ustar00rootroot00000000000000# SPDX-FileCopyrightText: 2023 Greenbone AG # # SPDX-License-Identifier: GPL-2.0-or-later # This script attempts to determine the Git commit ID and writes or updates # a "gitrevision.h" file if successful. find_package (Git) macro (Git_GET_REVISION dir variable) execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref HEAD WORKING_DIRECTORY ${dir} OUTPUT_VARIABLE GIT_BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE) execute_process(COMMAND ${GIT_EXECUTABLE} log -1 --format=%h WORKING_DIRECTORY ${dir} OUTPUT_VARIABLE GIT_COMMIT_HASH OUTPUT_STRIP_TRAILING_WHITESPACE) string (REPLACE "/" "_" GIT_BRANCH ${GIT_BRANCH}) set (${variable} "${GIT_COMMIT_HASH}-${GIT_BRANCH}") endmacro (Git_GET_REVISION) if (EXISTS "${SOURCE_DIR}/.git/") if (GIT_FOUND) Git_GET_REVISION (${SOURCE_DIR} GIT_REVISION) endif (GIT_FOUND) endif (EXISTS "${SOURCE_DIR}/.git/") if (GIT_REVISION) file (WRITE gitrevision.h.in "#define OPENVASSD_GIT_REVISION \"${GIT_REVISION}\"\n") execute_process (COMMAND ${CMAKE_COMMAND} -E copy_if_different gitrevision.h.in gitrevision.h) file (REMOVE gitrevision.h.in) endif (GIT_REVISION) openvas-scanner-23.17.0/config/000077500000000000000000000000001500171107200162255ustar00rootroot00000000000000openvas-scanner-23.17.0/config/redis-openvas.conf000066400000000000000000001705711500171107200216660ustar00rootroot00000000000000# Redis configuration file example. # # Note that in order to read the configuration file, Redis must be # started with the file path as first argument: # # ./redis-server /path/to/redis.conf # Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: # # 1k => 1000 bytes # 1kb => 1024 bytes # 1m => 1000000 bytes # 1mb => 1024*1024 bytes # 1g => 1000000000 bytes # 1gb => 1024*1024*1024 bytes # # units are case insensitive so 1GB 1Gb 1gB are all the same. ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you # have a standard template that goes to all Redis servers but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # # Notice option "include" won't be rewritten by command "CONFIG REWRITE" # from admin or Redis Sentinel. Since Redis always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. # # If instead you are interested in using includes to override configuration # options, it is better to use include as the last line. # # include /path/to/local.conf # include /path/to/other.conf ################################## MODULES ##################################### # Load modules at startup. If the server is not able to load modules # it will abort. It is possible to use multiple loadmodule directives. # # loadmodule /path/to/my_module.so # loadmodule /path/to/other_module.so ################################## NETWORK ##################################### # By default, if no "bind" configuration directive is specified, Redis listens # for connections from all the network interfaces available on the server. # It is possible to listen to just one or multiple selected interfaces using # the "bind" configuration directive, followed by one or more IP addresses. # # Examples: # # bind 192.168.1.100 10.0.0.1 # bind 127.0.0.1 ::1 # # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the # internet, binding to all the interfaces is dangerous and will expose the # instance to everybody on the internet. So by default we uncomment the # following bind directive, that will force Redis to listen only into # the IPv4 loopback interface address (this means Redis will be able to # accept connections only from clients running into the same computer it # is running). # # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES # JUST COMMENT THE FOLLOWING LINE. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ bind 127.0.0.1 # Protected mode is a layer of security protection, in order to avoid that # Redis instances left open on the internet are accessed and exploited. # # When protected mode is on and if: # # 1) The server is not binding explicitly to a set of addresses using the # "bind" directive. # 2) No password is configured. # # The server only accepts connections from clients connecting from the # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain # sockets. # # By default protected mode is enabled. You should disable it only if # you are sure you want clients from other hosts to connect to Redis # even if no authentication is configured, nor a specific set of interfaces # are explicitly listed using the "bind" directive. protected-mode yes # Accept connections on the specified port, default is 6379 (IANA #815344). # If port 0 is specified Redis will not listen on a TCP socket. port 0 # TCP listen() backlog. # # In high requests-per-second environments you need an high backlog in order # to avoid slow clients connections issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. tcp-backlog 511 # Unix socket. # # Specify the path for the Unix socket that will be used to listen for # incoming connections. There is no default, so Redis will not listen # on a unix socket when not specified. # unixsocket /run/redis-openvas/redis.sock unixsocketperm 770 # Close the connection after a client is idle for N seconds (0 to disable) timeout 0 # TCP keepalive. # # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence # of communication. This is useful for two reasons: # # 1) Detect dead peers. # 2) Take the connection alive from the point of view of network # equipment in the middle. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. # On other kernels the period depends on the kernel configuration. # # A reasonable value for this option is 300 seconds, which is the new # Redis default starting with Redis 3.2.1. tcp-keepalive 300 ################################# GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. daemonize yes # If you run Redis from upstart or systemd, Redis can interact with your # supervision tree. Options: # supervised no - no supervision interaction # supervised upstart - signal upstart by putting Redis into SIGSTOP mode # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET # supervised auto - detect upstart or systemd method based on # UPSTART_JOB or NOTIFY_SOCKET environment variables # Note: these supervision methods only signal "process is ready." # They do not enable continuous liveness pings back to your supervisor. supervised no # If a pid file is specified, Redis writes it where specified at startup # and removes it at exit. # # When the server runs non daemonized, no pid file is created if none is # specified in the configuration. When the server is daemonized, the pid file # is used even if not specified, defaulting to "/var/run/redis.pid". # # Creating a pid file is best effort: if Redis is not able to create it # nothing bad happens, the server will start and run normally. pidfile /run/redis-openvas/redis-server.pid # Specify the server verbosity level. # This can be one of: # debug (a lot of information, useful for development/testing) # verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) loglevel notice # Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null logfile "" # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. syslog-enabled yes # Specify the syslog identity. # syslog-ident redis # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. # syslog-facility local0 # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 databases 1025 # By default Redis shows an ASCII art logo only when started to log to the # standard output and if the standard output is a TTY. Basically this means # that normally a logo is displayed only in interactive sessions. # # However it is possible to force the pre-4.0 behavior and always show a # ASCII art logo in startup logs by setting the following option to yes. always-show-logo yes ################################ SNAPSHOTTING ################################ # # Save the DB on disk: # # save # # Will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # # In the example below the behaviour will be to save: # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed # # Note: you can disable saving completely by commenting out all "save" lines. # # It is also possible to remove all the previously configured save # points by adding a save directive with a single empty string argument # like in the following example: # save "" # save 900 1 # save 300 10 # save 60 10000 # By default Redis will stop accepting writes if RDB snapshots are enabled # (at least one save point) and the latest background save failed. # This will make the user aware (in a hard way) that data is not persisting # on disk properly, otherwise chances are that no one will notice and some # disaster will happen. # # If the background saving process will start working again Redis will # automatically allow writes again. # # However if you have setup your proper monitoring of the Redis server # and persistence, you may want to disable this feature so that Redis will # continue to work as usual even if there are problems with disk, # permissions, and so forth. stop-writes-on-bgsave-error yes # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. # This makes the format more resistant to corruption but there is a performance # hit to pay (around 10%) when saving and loading RDB files, so you can disable it # for maximum performances. # # RDB files created with checksum disabled have a checksum of zero that will # tell the loading code to skip the check. rdbchecksum yes # The filename where to dump the DB dbfilename dump.rdb # The working directory. # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. # # The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. dir ./ ################################# REPLICATION ################################# # Master-Replica replication. Use replicaof to make a Redis instance a copy of # another Redis server. A few things to understand ASAP about Redis replication. # # +------------------+ +---------------+ # | Master | ---> | Replica | # | (receive writes) | | (exact copy) | # +------------------+ +---------------+ # # 1) Redis replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least # a given number of replicas. # 2) Redis replicas are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a # network partition replicas automatically try to reconnect to masters # and resynchronize with them. # # replicaof # If the master is password protected (using the "requirepass" configuration # directive below) it is possible to tell the replica to authenticate before # starting the replication synchronization process, otherwise the master will # refuse the replica request. # # masterauth # When a replica loses its connection with the master, or when the replication # is still in progress, the replica can act in two different ways: # # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # # 2) if replica-serve-stale-data is set to 'no' the replica will reply with # an error "SYNC with master in progress" to all the kind of commands # but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, # SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, # COMMAND, POST, HOST: and LATENCY. # replica-serve-stale-data yes # You can configure a replica instance to accept writes or not. Writing against # a replica instance may be useful to store some ephemeral data (because data # written on a replica will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # # Since Redis 2.6 by default replicas are read-only. # # Note: read only replicas are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. # Still a read only replica exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve # security of read only replicas using 'rename-command' to shadow all the # administrative / dangerous commands. replica-read-only yes # Replication SYNC strategy: disk or socket. # # ------------------------------------------------------- # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY # ------------------------------------------------------- # # New replicas and reconnecting replicas that are not able to continue the replication # process just receiving differences, need to do what is called a "full # synchronization". An RDB file is transmitted from the master to the replicas. # The transmission can happen in two different ways: # # 1) Disk-backed: The Redis master creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent # process to the replicas incrementally. # 2) Diskless: The Redis master creates a new process that directly writes the # RDB file to replica sockets, without touching the disk at all. # # With disk-backed replication, while the RDB file is generated, more replicas # can be queued and served with the RDB file as soon as the current child producing # the RDB file finishes its work. With diskless replication instead once # the transfer starts, new replicas arriving will be queued and a new transfer # will start when the current one terminates. # # When diskless replication is used, the master waits a configurable amount of # time (in seconds) before starting the transfer in the hope that multiple replicas # will arrive and the transfer can be parallelized. # # With slow disks and fast (large bandwidth) networks, diskless replication # works better. repl-diskless-sync no # When diskless replication is enabled, it is possible to configure the delay # the server waits in order to spawn the child that transfers the RDB via socket # to the replicas. # # This is important since once the transfer starts, it is not possible to serve # new replicas arriving, that will be queued for the next RDB transfer, so the server # waits a delay in order to let more replicas arrive. # # The delay is specified in seconds, and by default is 5 seconds. To disable # it entirely just set it to 0 seconds and the transfer will start ASAP. repl-diskless-sync-delay 5 # Replicas send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_replica_period option. The default value is 10 # seconds. # # repl-ping-replica-period 10 # The following option sets the replication timeout for: # # 1) Bulk transfer I/O during SYNC, from the point of view of replica. # 2) Master timeout from the point of view of replicas (data, pings). # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value # specified for repl-ping-replica-period otherwise a timeout will be detected # every time there is low traffic between the master and the replica. # # repl-timeout 60 # Disable TCP_NODELAY on the replica socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and # less bandwidth to send data to replicas. But this can add a delay for # the data to appear on the replica side, up to 40 milliseconds with # Linux kernels using a default configuration. # # If you select "no" the delay for data to appear on the replica side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions # or when the master and replicas are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no # Set the replication backlog size. The backlog is a buffer that accumulates # replica data when replicas are disconnected for some time, so that when a replica # wants to reconnect again, often a full resync is not needed, but a partial # resync is enough, just passing the portion of data the replica missed while # disconnected. # # The bigger the replication backlog, the longer the time the replica can be # disconnected and later be able to perform a partial resynchronization. # # The backlog is only allocated once there is at least a replica connected. # # repl-backlog-size 1mb # After a master has no longer connected replicas for some time, the backlog # will be freed. The following option configures the amount of seconds that # need to elapse, starting from the time the last replica disconnected, for # the backlog buffer to be freed. # # Note that replicas never free the backlog for timeout, since they may be # promoted to masters later, and should be able to correctly "partially # resynchronize" with the replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. # # repl-backlog-ttl 3600 # The replica priority is an integer number published by Redis in the INFO output. # It is used by Redis Sentinel in order to select a replica to promote into a # master if the master is no longer working correctly. # # A replica with a low priority number is considered better for promotion, so # for instance if there are three replicas with priority 10, 100, 25 Sentinel will # pick the one with priority 10, that is the lowest. # # However a special priority of 0 marks the replica as not able to perform the # role of master, so a replica with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. replica-priority 100 # It is possible for a master to stop accepting writes if there are less than # N replicas connected, having a lag less or equal than M seconds. # # The N replicas need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from # the last ping received from the replica, that is usually sent every second. # # This option does not GUARANTEE that N replicas will accept the write, but # will limit the window of exposure for lost writes in case not enough replicas # are available, to the specified number of seconds. # # For example to require at least 3 replicas with a lag <= 10 seconds use: # # min-replicas-to-write 3 # min-replicas-max-lag 10 # # Setting one or the other to 0 disables the feature. # # By default min-replicas-to-write is set to 0 (feature disabled) and # min-replicas-max-lag is set to 10. # A Redis master is able to list the address and port of the attached # replicas in different ways. For example the "INFO replication" section # offers this information, which is used, among other tools, by # Redis Sentinel in order to discover replica instances. # Another place where this info is available is in the output of the # "ROLE" command of a master. # # The listed IP and address normally reported by a replica is obtained # in the following way: # # IP: The address is auto detected by checking the peer address # of the socket used by the replica to connect with the master. # # Port: The port is communicated by the replica during the replication # handshake, and is normally the port that the replica is using to # listen for connections. # # However when port forwarding or Network Address Translation (NAT) is # used, the replica may be actually reachable via different IP and port # pairs. The following two options can be used by a replica in order to # report to its master a specific set of IP and port, so that both INFO # and ROLE will report those values. # # There is no need to use both the options if you need to override just # the port or the IP address. # # replica-announce-ip 5.5.5.5 # replica-announce-port 1234 ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other # commands. This might be useful in environments in which you do not trust # others with access to the host running redis-server. # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). # # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # # requirepass foobared # Command renaming. # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something # hard to guess so that it will still be available for internal-use tools # but not available for general clients. # # Example: # # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 # # It is also possible to completely kill a command by renaming it into # an empty string: # # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the # AOF file or transmitted to replicas may cause problems. ################################### CLIENTS #################################### # Set the max number of connected clients at the same time. By default # this limit is set to 10000 clients, however if the Redis server is not # able to configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit # minus 32 (as Redis reserves a few file descriptors for internal uses). # # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # maxclients 10000 ############################## MEMORY MANAGEMENT ################################ # Set a memory usage limit to the specified amount of bytes. # When the memory limit is reached Redis will try to remove keys # according to the eviction policy selected (see maxmemory-policy). # # If Redis can't remove keys according to the policy, or if the policy is # set to 'noeviction', Redis will start to reply with errors to commands # that would use more memory, like SET, LPUSH, and so on, and will continue # to reply to read-only commands like GET. # # This option is usually useful when using Redis as an LRU or LFU cache, or to # set a hard memory limit for an instance (using the 'noeviction' policy). # # WARNING: If you have replicas attached to an instance with maxmemory on, # the size of the output buffers needed to feed the replicas are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output # buffer of replicas is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # # In short... if you have replicas attached it is suggested that you set a lower # limit for maxmemory so that there is some free RAM on the system for replica # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: # # volatile-lru -> Evict using approximated LRU among the keys with an expire set. # allkeys-lru -> Evict any key using approximated LRU. # volatile-lfu -> Evict using approximated LFU among the keys with an expire set. # allkeys-lfu -> Evict any key using approximated LFU. # volatile-random -> Remove a random key among the ones with an expire set. # allkeys-random -> Remove a random key, any key. # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) # noeviction -> Don't evict anything, just return an error on write operations. # # LRU means Least Recently Used # LFU means Least Frequently Used # # Both LRU, LFU and volatile-ttl are implemented using approximated # randomized algorithms. # # Note: with any of the above policies, Redis will return an error on write # operations, when there are no suitable keys for eviction. # # At the date of writing these commands are: set setnx setex append # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby # getset mset msetnx exec sort # # The default is: # # maxmemory-policy noeviction # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or # accuracy. For default Redis will check five keys and pick the one that was # used less recently, you can change the sample size using the following # configuration directive. # # The default of 5 produces good enough results. 10 Approximates very closely # true LRU but costs more CPU. 3 is faster but not very accurate. # # maxmemory-samples 5 # Starting from Redis 5, by default a replica will ignore its maxmemory setting # (unless it is promoted to master after a failover or manually). It means # that the eviction of keys will be just handled by the master, sending the # DEL commands to the replica as keys evict in the master side. # # This behavior ensures that masters and replicas stay consistent, and is usually # what you want, however if your replica is writable, or you want the replica to have # a different memory setting, and you are sure all the writes performed to the # replica are idempotent, then you may change this default (but be sure to understand # what you are doing). # # Note that since the replica by default does not evict, it may end using more # memory than the one set via maxmemory (there are certain buffers that may # be larger on the replica, or data structures may sometimes take more memory and so # forth). So make sure you monitor your replicas and make sure they have enough # memory to never hit a real out-of-memory condition before the master hits # the configured maxmemory setting. # # replica-ignore-maxmemory yes ############################# LAZY FREEING #################################### # Redis has two primitives to delete keys. One is called DEL and is a blocking # deletion of the object. It means that the server stops processing new commands # in order to reclaim all the memory associated with an object in a synchronous # way. If the key deleted is associated with a small object, the time needed # in order to execute the DEL command is very small and comparable to most other # O(1) or O(log_N) commands in Redis. However if the key is associated with an # aggregated value containing millions of elements, the server can block for # a long time (even seconds) in order to complete the operation. # # For the above reasons Redis also offers non blocking deletion primitives # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and # FLUSHDB commands, in order to reclaim memory in background. Those commands # are executed in constant time. Another thread will incrementally free the # object in the background as fast as possible. # # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. # It's up to the design of the application to understand when it is a good # idea to use one or the other. However the Redis server sometimes has to # delete keys or flush the whole database as a side effect of other operations. # Specifically Redis deletes objects independently of a user call in the # following scenarios: # # 1) On eviction, because of the maxmemory and maxmemory policy configurations, # in order to make room for new data, without going over the specified # memory limit. # 2) Because of expire: when a key with an associated time to live (see the # EXPIRE command) must be deleted from memory. # 3) Because of a side effect of a command that stores data on a key that may # already exist. For example the RENAME command may delete the old key # content when it is replaced with another one. Similarly SUNIONSTORE # or SORT with STORE option may delete existing keys. The SET command # itself removes any old content of the specified key in order to replace # it with the specified string. # 4) During replication, when a replica performs a full resynchronization with # its master, the content of the whole database is removed in order to # load the RDB file just transferred. # # In all the above cases the default is to delete objects in a blocking way, # like if DEL was called. However you can configure each case specifically # in order to instead release memory in a non-blocking way like if UNLINK # was called, using the following configuration directives: lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no replica-lazy-flush no ############################## APPEND ONLY MODE ############################### # By default Redis asynchronously dumps the dataset on disk. This mode is # good enough in many applications, but an issue with the Redis process or # a power outage may result into a few minutes of writes lost (depending on # the configured save points). # # The Append Only File is an alternative persistence mode that provides # much better durability. For instance using the default data fsync policy # (see later in the config file) Redis can lose just one second of writes in a # dramatic event like a server power outage, or a single write if something # wrong with the Redis process itself happens, but the operating system is # still running correctly. # # AOF and RDB persistence can be enabled at the same time without problems. # If the AOF is enabled on startup Redis will load the AOF, that is the file # with the better durability guarantees. # # Please check http://redis.io/topics/persistence for more information. appendonly no # The name of the append only file (default: "appendonly.aof") appendfilename "appendonly.aof" # The fsync() call tells the Operating System to actually write data on disk # instead of waiting for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # # Redis supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log. Slow, Safest. # everysec: fsync only one time every second. Compromise. # # The default is "everysec", as that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to # "no" that will let the operating system flush the output buffer when # it wants, for better performances (but if you can live with the idea of # some data loss consider the default persistence mode that's snapshotting), # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # # More details please check the following article: # http://antirez.com/post/redis-persistence-demystified.html # # If unsure, use "everysec". # appendfsync always appendfsync everysec # appendfsync no # When the AOF fsync policy is set to always or everysec, and a background # saving process (a background save or AOF log background rewriting) is # performing a lot of I/O against the disk, in some Linux configurations # Redis may block too long on the fsync() call. Note that there is no fix for # this currently, as even performing fsync in a different thread will block # our synchronous write(2) call. # # In order to mitigate this problem it's possible to use the following option # that will prevent fsync() from being called in the main process while a # BGSAVE or BGREWRITEAOF is in progress. # # This means that while another child is saving, the durability of Redis is # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). # # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. # # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). # # This base size is compared to the current size. If the current size is # bigger than the specified percentage, the rewrite is triggered. Also # you need to specify a minimal size for the AOF file to be rewritten, this # is useful to avoid rewriting the AOF file even if the percentage increase # is reached but it is still pretty small. # # Specify a percentage of zero in order to disable the automatic AOF # rewrite feature. auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb # An AOF file may be found to be truncated at the end during the Redis # startup process, when the AOF data gets loaded back into memory. # This may happen when the system where Redis is running # crashes, especially when an ext4 filesystem is mounted without the # data=ordered option (however this can't happen when Redis itself # crashes or aborts but the operating system still works correctly). # # Redis can either exit with an error when this happens, or load as much # data as possible (the default now) and start if the AOF file is found # to be truncated at the end. The following option controls this behavior. # # If aof-load-truncated is set to yes, a truncated AOF file is loaded and # the Redis server starts emitting a log to inform the user of the event. # Otherwise if the option is set to no, the server aborts with an error # and refuses to start. When the option is set to no, the user requires # to fix the AOF file using the "redis-check-aof" utility before to restart # the server. # # Note that if the AOF file will be found to be corrupted in the middle # the server will still exit with an error. This option only applies when # Redis will try to read more data from the AOF file but not enough bytes # will be found. aof-load-truncated yes # When rewriting the AOF file, Redis is able to use an RDB preamble in the # AOF file for faster rewrites and recoveries. When this option is turned # on the rewritten AOF file is composed of two different stanzas: # # [RDB file][AOF tail] # # When loading Redis recognizes that the AOF file starts with the "REDIS" # string and loads the prefixed RDB file, and continues loading the AOF # tail. aof-use-rdb-preamble yes ################################ LUA SCRIPTING ############################### # Max execution time of a Lua script in milliseconds. # # If the maximum execution time is reached Redis will log that a script is # still in execution after the maximum allowed time and will start to # reply to queries with an error. # # When a long running script exceeds the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be # used to stop a script that did not yet called write commands. The second # is the only way to shut down the server in the case a write command was # already issued by the script but the user doesn't want to wait for the natural # termination of the script. # # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 ################################ REDIS CLUSTER ############################### # Normal Redis instances can't be part of a Redis Cluster; only nodes that are # started as cluster nodes can. In order to start a Redis instance as a # cluster node enable the cluster support uncommenting the following: # # cluster-enabled yes # Every cluster node has a cluster configuration file. This file is not # intended to be edited by hand. It is created and updated by Redis nodes. # Every Redis Cluster node requires a different cluster configuration file. # Make sure that instances running in the same system do not have # overlapping cluster configuration file names. # # cluster-config-file nodes-6379.conf # Cluster node timeout is the amount of milliseconds a node must be unreachable # for it to be considered in failure state. # Most other internal time limits are multiple of the node timeout. # # cluster-node-timeout 15000 # A replica of a failing master will avoid to start a failover if its data # looks too old. # # There is no simple way for a replica to actually have an exact measure of # its "data age", so the following two checks are performed: # # 1) If there are multiple replicas able to failover, they exchange messages # in order to try to give an advantage to the replica with the best # replication offset (more data from the master processed). # Replicas will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # # 2) Every single replica computes the time of the last interaction with # its master. This can be the last ping or command received (if the master # is still in the "connected" state), or the time that elapsed since the # disconnection with the master (if the replication link is currently down). # If the last interaction is too old, the replica will not try to failover # at all. # # The point "2" can be tuned by user. Specifically a replica will not perform # the failover if, since the last interaction with the master, the time # elapsed is greater than: # # (node-timeout * replica-validity-factor) + repl-ping-replica-period # # So for example if node-timeout is 30 seconds, and the replica-validity-factor # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the # replica will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # # A large replica-validity-factor may allow replicas with too old data to failover # a master, while a too small value may prevent the cluster from being able to # elect a replica at all. # # For maximum availability, it is possible to set the replica-validity-factor # to a value of 0, which means, that replicas will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their # offset rank). # # Zero is the only value able to guarantee that when all the partitions heal # the cluster will always be able to continue. # # cluster-replica-validity-factor 10 # Cluster replicas are able to migrate to orphaned masters, that are masters # that are left without working replicas. This improves the cluster ability # to resist to failures as otherwise an orphaned master can't be failed over # in case of failure if it has no working replicas. # # Replicas migrate to orphaned masters only if there are still at least a # given number of other working replicas for their old master. This number # is the "migration barrier". A migration barrier of 1 means that a replica # will migrate only if there is at least 1 other working replica for its master # and so forth. It usually reflects the number of replicas you want for every # master in your cluster. # # Default is 1 (replicas migrate only if their masters remain with at least # one replica). To disable migration just set it to a very large value. # A value of 0 can be set but is useful only for debugging and dangerous # in production. # # cluster-migration-barrier 1 # By default Redis Cluster nodes stop accepting queries if they detect there # is at least an hash slot uncovered (no available node is serving it). # This way if the cluster is partially down (for example a range of hash slots # are no longer covered) all the cluster becomes, eventually, unavailable. # It automatically returns available as soon as all the slots are covered again. # # However sometimes you want the subset of the cluster which is working, # to continue to accept queries for the part of the key space that is still # covered. In order to do so, just set the cluster-require-full-coverage # option to no. # # cluster-require-full-coverage yes # This option, when set to yes, prevents replicas from trying to failover its # master during master failures. However the master can still perform a # manual failover, if forced to do so. # # This is useful in different scenarios, especially in the case of multiple # data center operations, where we want one side to never be promoted if not # in the case of a total DC failure. # # cluster-replica-no-failover no # In order to setup your cluster make sure to read the documentation # available at http://redis.io web site. ########################## CLUSTER DOCKER/NAT support ######################## # In certain deployments, Redis Cluster nodes address discovery fails, because # addresses are NAT-ted or because ports are forwarded (the typical case is # Docker and other containers). # # In order to make Redis Cluster working in such environments, a static # configuration where each node knows its public address is needed. The # following two options are used for this scope, and are: # # * cluster-announce-ip # * cluster-announce-port # * cluster-announce-bus-port # # Each instruct the node about its address, client port, and cluster message # bus port. The information is then published in the header of the bus packets # so that other nodes will be able to correctly map the address of the node # publishing the information. # # If the above options are not used, the normal Redis Cluster auto-detection # will be used instead. # # Note that when remapped, the bus port may not be at the fixed offset of # clients port + 10000, so you can specify any port and bus-port depending # on how they get remapped. If the bus-port is not set, a fixed offset of # 10000 will be used as usually. # # Example: # # cluster-announce-ip 10.1.1.5 # cluster-announce-port 6379 # cluster-announce-bus-port 6380 ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified # execution time. The execution time does not include the I/O operations # like talking with the client, sending the reply and so forth, # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). # # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the # slow log. When a new command is logged the oldest one is removed from the # queue of logged commands. # The following time is expressed in microseconds, so 1000000 is equivalent # to one second. Note that a negative number disables the slow log, while # a value of zero forces the logging of every command. slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. slowlog-max-len 128 ################################ LATENCY MONITOR ############################## # The Redis latency monitoring subsystem samples different operations # at runtime in order to collect data related to possible sources of # latency of a Redis instance. # # Via the LATENCY command this information is available to the user that can # print graphs and obtain reports. # # The system only logs operations that were performed in a time equal or # greater than the amount of milliseconds specified via the # latency-monitor-threshold configuration directive. When its value is set # to zero, the latency monitor is turned off. # # By default latency monitoring is disabled since it is mostly not needed # if you don't have latency issues, and collecting data has a performance # impact, that while very small, can be measured under big load. Latency # monitoring can easily be enabled at runtime using the command # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold 0 ############################# EVENT NOTIFICATION ############################## # Redis can notify Pub/Sub clients about events happening in the key space. # This feature is documented at http://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two # messages will be published via Pub/Sub: # # PUBLISH __keyspace@0__:foo del # PUBLISH __keyevent@0__:del foo # # It is possible to select the events that Redis will notify among a set # of classes. Every class is identified by a single character: # # K Keyspace events, published with __keyspace@__ prefix. # E Keyevent events, published with __keyevent@__ prefix. # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... # $ String commands # l List commands # s Set commands # h Hash commands # z Sorted set commands # x Expired events (events generated every time a key expires) # e Evicted events (events generated when a key is evicted for maxmemory) # A Alias for g$lshzxe, so that the "AKE" string means all the events. # # The "notify-keyspace-events" takes as argument a string that is composed # of zero or multiple characters. The empty string means that notifications # are disabled. # # Example: to enable list and generic events, from the point of view of the # event name, use: # # notify-keyspace-events Elg # # Example 2: to get the stream of the expired keys subscribing to channel # name __keyevent@0__:expired use: # # notify-keyspace-events Ex # # By default all notifications are disabled because most users don't need # this feature and the feature has some overhead. Note that if you don't # specify at least one of K or E, no events will be delivered. notify-keyspace-events "" ############################### ADVANCED CONFIG ############################### # Hashes are encoded using a memory efficient data structure when they have a # small number of entries, and the biggest entry does not exceed a given # threshold. These thresholds can be configured using the following directives. hash-max-ziplist-entries 512 hash-max-ziplist-value 64 # Lists are also encoded in a special way to save a lot of space. # The number of entries allowed per internal list node can be specified # as a fixed maximum size or a maximum number of elements. # For a fixed maximum size, use -5 through -1, meaning: # -5: max size: 64 Kb <-- not recommended for normal workloads # -4: max size: 32 Kb <-- not recommended # -3: max size: 16 Kb <-- probably not recommended # -2: max size: 8 Kb <-- good # -1: max size: 4 Kb <-- good # Positive numbers mean store up to _exactly_ that number of elements # per list node. # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), # but if your use case is unique, adjust the settings as necessary. list-max-ziplist-size -2 # Lists may also be compressed. # Compress depth is the number of quicklist ziplist nodes from *each* side of # the list to *exclude* from compression. The head and tail of the list # are always uncompressed for fast push/pop operations. Settings are: # 0: disable all list compression # 1: depth 1 means "don't start compressing until after 1 node into the list, # going from either the head or tail" # So: [head]->node->node->...->node->[tail] # [head], [tail] will always be uncompressed; inner nodes will compress. # 2: [head]->[next]->node->node->...->node->[prev]->[tail] # 2 here means: don't compress head or head->next or tail->prev or tail, # but compress all nodes between them. # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] # etc. list-compress-depth 0 # Sets have a special encoding in just one case: when a set is composed # of just strings that happen to be integers in radix 10 in the range # of 64 bit signed integers. # The following configuration setting sets the limit in the size of the # set in order to use this special memory saving encoding. set-max-intset-entries 512 # Similarly to hashes and lists, sorted sets are also specially encoded in # order to save a lot of space. This encoding is only used when the length and # elements of a sorted set are below the following limits: zset-max-ziplist-entries 128 zset-max-ziplist-value 64 # HyperLogLog sparse representation bytes limit. The limit includes the # 16 bytes header. When an HyperLogLog using the sparse representation crosses # this limit, it is converted into the dense representation. # # A value greater than 16000 is totally useless, since at that point the # dense representation is more memory efficient. # # The suggested value is ~ 3000 in order to have the benefits of # the space efficient encoding without slowing down too much PFADD, # which is O(N) with the sparse encoding. The value can be raised to # ~ 10000 when CPU is not a concern, but space is, and the data set is # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes 3000 # Streams macro node max size / items. The stream data structure is a radix # tree of big nodes that encode multiple items inside. Using this configuration # it is possible to configure how big a single node can be in bytes, and the # maximum number of items it may contain before switching to a new node when # appending new stream entries. If any of the following settings are set to # zero, the limit is ignored, so for instance it is possible to set just a # max entries limit by setting max-bytes to 0 and max-entries to the desired # value. stream-node-max-bytes 4096 stream-node-max-entries 100 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) # performs a lazy rehashing: the more operation you run into a hash table # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. # # The default is to use this millisecond 10 times every second in order to # actively rehash the main dictionaries, freeing memory when possible. # # If unsure: # use "activerehashing no" if you have hard latency requirements and it is # not a good thing in your environment that Redis can reply from time to time # to queries with 2 milliseconds delay. # # use "activerehashing yes" if you don't have such hard requirements but # want to free memory asap when possible. activerehashing yes # The client output buffer limits can be used to force disconnection of clients # that are not reading data from the server fast enough for some reason (a # common reason is that a Pub/Sub client can't consume messages as fast as the # publisher can produce them). # # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients # replica -> replica clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: # # client-output-buffer-limit # # A client is immediately disconnected once the hard limit is reached, or if # the soft limit is reached and remains reached for the specified number of # seconds (continuously). # So for instance if the hard limit is 32 megabytes and the soft limit is # 16 megabytes / 10 seconds, the client will get disconnected immediately # if the size of the output buffers reach 32 megabytes, but will also get # disconnected if the client reaches 16 megabytes and continuously overcomes # the limit for 10 seconds. # # By default normal clients are not limited because they don't receive data # without asking (in a push way), but just after a request, so only # asynchronous clients may create a scenario where data is requested faster # than it can read. # # Instead there is a default limit for pubsub and replica clients, since # subscribers and replicas receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 client-output-buffer-limit replica 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 # Client query buffers accumulate new commands. They are limited to a fixed # amount by default in order to avoid that a protocol desynchronization (for # instance due to a bug in the client) will lead to unbound memory usage in # the query buffer. However you can configure it here if you have very special # needs, such us huge multi/exec requests or alike. # # client-query-buffer-limit 1gb # In the Redis protocol, bulk requests, that are, elements representing single # strings, are normally limited ot 512 mb. However you can change this limit # here. # # proto-max-bulk-len 512mb # Redis calls an internal function to perform many background tasks, like # closing connections of clients in timeout, purging expired keys that are # never requested, and so forth. # # Not all tasks are performed with the same frequency, but Redis checks for # tasks to perform according to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when # Redis is idle, but at the same time will make Redis more responsive when # there are many keys expiring at the same time, and timeouts may be # handled with more precision. # # The range is between 1 and 500, however a value over 100 is usually not # a good idea. Most users should use the default of 10 and raise this up to # 100 only in environments where very low latency is required. hz 10 # Normally it is useful to have an HZ value which is proportional to the # number of clients connected. This is useful in order, for instance, to # avoid too many clients are processed for each background task invocation # in order to avoid latency spikes. # # Since the default HZ value by default is conservatively set to 10, Redis # offers, and enables by default, the ability to use an adaptive HZ value # which will temporary raise when there are many connected clients. # # When dynamic HZ is enabled, the actual configured HZ will be used as # as a baseline, but multiples of the configured HZ value will be actually # used as needed once more clients are connected. In this way an idle # instance will use very little CPU time while a busy instance will be # more responsive. dynamic-hz yes # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes # When redis saves RDB file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. rdb-save-incremental-fsync yes # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good # idea to start with the default settings and only change them after investigating # how to improve the performances and how the keys LFU change over time, which # is possible to inspect via the OBJECT FREQ command. # # There are two tunable parameters in the Redis LFU implementation: the # counter logarithm factor and the counter decay time. It is important to # understand what the two parameters mean before changing them. # # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis # uses a probabilistic increment with logarithmic behavior. Given the value # of the old counter, when a key is accessed, the counter is incremented in # this way: # # 1. A random number R between 0 and 1 is extracted. # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). # 3. The counter is incremented only if R < P. # # The default lfu-log-factor is 10. This is a table of how the frequency # counter changes with a different number of accesses with different # logarithmic factors: # # +--------+------------+------------+------------+------------+------------+ # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | # +--------+------------+------------+------------+------------+------------+ # | 0 | 104 | 255 | 255 | 255 | 255 | # +--------+------------+------------+------------+------------+------------+ # | 1 | 18 | 49 | 255 | 255 | 255 | # +--------+------------+------------+------------+------------+------------+ # | 10 | 10 | 18 | 142 | 255 | 255 | # +--------+------------+------------+------------+------------+------------+ # | 100 | 8 | 11 | 49 | 143 | 255 | # +--------+------------+------------+------------+------------+------------+ # # NOTE: The above table was obtained by running the following commands: # # redis-benchmark -n 1000000 incr foo # redis-cli object freq foo # # NOTE 2: The counter initial value is 5 in order to give new objects a chance # to accumulate hits. # # The counter decay time is the time, in minutes, that must elapse in order # for the key counter to be divided by two (or decremented if it has a value # less <= 10). # # The default value for the lfu-decay-time is 1. A Special value of 0 means to # decay the counter every time it happens to be scanned. # # lfu-log-factor 10 # lfu-decay-time 1 ########################### ACTIVE DEFRAGMENTATION ####################### # # WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested # even in production and manually tested by multiple engineers for some # time. # # What is active defragmentation? # ------------------------------- # # Active (online) defragmentation allows a Redis server to compact the # spaces left between small allocations and deallocations of data in memory, # thus allowing to reclaim back memory. # # Fragmentation is a natural process that happens with every allocator (but # less so with Jemalloc, fortunately) and certain workloads. Normally a server # restart is needed in order to lower the fragmentation, or at least to flush # away all the data and create it again. However thanks to this feature # implemented by Oran Agra for Redis 4.0 this process can happen at runtime # in an "hot" way, while the server is running. # # Basically when the fragmentation is over a certain level (see the # configuration options below) Redis will start to create new copies of the # values in contiguous memory regions by exploiting certain specific Jemalloc # features (in order to understand if an allocation is causing fragmentation # and to allocate it in a better place), and at the same time, will release the # old copies of the data. This process, repeated incrementally for all the keys # will cause the fragmentation to drop back to normal values. # # Important things to understand: # # 1. This feature is disabled by default, and only works if you compiled Redis # to use the copy of Jemalloc we ship with the source code of Redis. # This is the default with Linux builds. # # 2. You never need to enable this feature if you don't have fragmentation # issues. # # 3. Once you experience fragmentation, you can enable this feature when # needed with the command "CONFIG SET activedefrag yes". # # The configuration parameters are able to fine tune the behavior of the # defragmentation process. If you are not sure about what they mean it is # a good idea to leave the defaults untouched. # Enabled active defragmentation # activedefrag yes # Minimum amount of fragmentation waste to start active defrag # active-defrag-ignore-bytes 100mb # Minimum percentage of fragmentation to start active defrag # active-defrag-threshold-lower 10 # Maximum percentage of fragmentation at which we use maximum effort # active-defrag-threshold-upper 100 # Minimal effort for defrag in CPU percentage # active-defrag-cycle-min 5 # Maximal effort for defrag in CPU percentage # active-defrag-cycle-max 75 # Maximum number of set/hash/zset/list fields that will be processed from # the main dictionary scan # active-defrag-max-scan-fields 1000 openvas-scanner-23.17.0/doc/000077500000000000000000000000001500171107200155255ustar00rootroot00000000000000openvas-scanner-23.17.0/doc/.gitignore000066400000000000000000000000061500171107200175110ustar00rootroot00000000000000html/ openvas-scanner-23.17.0/doc/CMakeLists.txt000066400000000000000000000041121500171107200202630ustar00rootroot00000000000000# SPDX-FileCopyrightText: 2023 Greenbone AG # # SPDX-License-Identifier: GPL-2.0-or-later ## build include (FindDoxygen) if (NOT DOXYGEN_EXECUTABLE) message (STATUS "WARNING: Doxygen is required to build the HTML docs.") else (NOT DOXYGEN_EXECUTABLE) configure_file (Doxyfile.in Doxyfile @ONLY) configure_file (Doxyfile_full.in Doxyfile_full @ONLY) configure_file (Doxyfile_xml.in Doxyfile_xml @ONLY) add_custom_target (doxygen COMMENT "Building documentation..." DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile ${CMAKE_CURRENT_BINARY_DIR}/.built-html) add_custom_command (OUTPUT .built-html COMMAND sh ARGS -c \"${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile && touch ${CMAKE_CURRENT_BINARY_DIR}/.built-html\;\" DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) add_custom_target (doxygen-full COMMENT "Building documentation..." DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile_full ${CMAKE_CURRENT_BINARY_DIR}/.built-html_full) add_custom_command (OUTPUT .built-html_full COMMAND sh ARGS -c \"${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile_full && touch ${CMAKE_CURRENT_BINARY_DIR}/.built-html_full\;\" DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile_full) add_custom_target (doxygen-xml COMMENT "Building documentation (XML)..." DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile_xml ${CMAKE_CURRENT_BINARY_DIR}/.built-xml) add_custom_command (OUTPUT .built-xml COMMAND sh ARGS -c \"${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile_xml && touch ${CMAKE_CURRENT_BINARY_DIR}/.built-xml\;\" DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile_xml) endif (NOT DOXYGEN_EXECUTABLE) find_program(PANDOC pandoc) if (NOT PANDOC) message(STATUS "WARNING: pandoc is required to build the HTML user manual.") else (NOT PANDOC) configure_file(man.sh man.sh @ONLY) configure_file(html.sh html.sh @ONLY) file(COPY manual DESTINATION .) file(COPY templates DESTINATION .) add_custom_target(manual COMMAND ${CMAKE_CURRENT_BINARY_DIR}/html.sh) add_custom_target(nasl-man COMMAND ${CMAKE_CURRENT_BINARY_DIR}/man.sh) endif (NOT PANDOC) openvas-scanner-23.17.0/doc/Doxyfile.in000066400000000000000000003115451500171107200176510ustar00rootroot00000000000000# Doxyfile 1.8.8 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all text # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv # built into libc) for the transcoding. See http://www.gnu.org/software/libiconv # for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = "OpenVAS Scanner" # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@ # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is included in # the documentation. The maximum height of the logo should not exceed 55 pixels # and the maximum width should not exceed 200 pixels. Doxygen will copy the logo # to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = @CMAKE_BINARY_DIR@/doc/generated # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, # Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = YES # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = @CMAKE_SOURCE_DIR@ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a # new page for each member. If set to NO, the documentation of a member will be # part of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 2 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines. ALIASES = "TODO=\todo" \ "naslfn{1}=\par NASL Function: \b \1\n" \ "nasluparam=\par NASL Unnamed Parameters:\n" \ "naslnparam=\par NASL Named Parameters:\n" \ "naslret=\par NASL Returns:\n" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, # C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: # FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: # Fortran. In the later case the parser tries to guess whether the code is fixed # or free formatted code, this is the default for Fortran type files), VHDL. For # instance to make doxygen treat .inc files as Fortran files (default is PHP), # and .f files as C (default is Fortran), use: inc=Fortran f=C. # # Note For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by by putting a % sign in front of the word # or globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = YES # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined # locally in source files will be included in the documentation. If set to NO # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO these classes will be included in the various overviews. This option has # no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the # todo list. This list is created by putting \todo commands in the # documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the # test list. This list is created by putting \test commands in the # documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if ... \endif and \cond # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES the list # will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO doxygen will only warn about wrong or incomplete parameter # documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = YES # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. # Note: If this tag is empty the current directory is searched. INPUT = @CMAKE_SOURCE_DIR@/src \ @CMAKE_SOURCE_DIR@/misc \ @CMAKE_SOURCE_DIR@/nasl # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: http://www.gnu.org/software/libiconv) for the list of # possible encodings. # The default value is: UTF-8. INPUT_ENCODING = ISO-8859-1 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank the # following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, # *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, # *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, # *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, # *.qsf, *.as and *.js. FILE_PATTERNS = # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = @CMAKE_SOURCE_DIR@ \ @CMAKE_SOURCE_DIR@/doc # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # # # where is the value of the INPUT_FILTER tag, and is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER ) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # function all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES, then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see http://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the config file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES # If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the # clang parser (see: http://clang.llvm.org/) for more accurate parsing at the # cost of reduced performance. This can be particularly helpful with template # rich C++ code for which doxygen's built-in parser lacks the necessary type # information. # Note: The availability of this option depends on whether or not doxygen was # compiled with the --with-libclang option. # The default value is: NO. CLANG_ASSISTED_PARSING = NO # If clang assisted parsing is enabled you can provide the compiler with command # line options that you would normally use when invoking the compiler. Note that # the include paths will already be set by doxygen for the files and directories # specified with INPUT and INCLUDE_PATH. # This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. CLANG_OPTIONS = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in # which the alphabetical index list will be split. # Minimum value: 1, maximum value: 20, default value: 5. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra stylesheet files is of importance (e.g. the last # stylesheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the stylesheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to NO can help when comparing the output of multiple runs. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: http://developer.apple.com/tools/xcode/), introduced with # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler ( hhc.exe). If non-empty # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated ( # YES) or that it should be included in the master .chm file ( NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated ( # YES) or a normal table of contents ( NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location of Qt's # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the # generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering # instead of using prerendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from http://www.mathjax.org before deployment. # The default value is: http://cdn.mathjax.org/mathjax/latest. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use + S # (what the is depends on the OS and browser, but it is typically # , /